From 6b576be4aee6c4528403fbb5636cacc3636fddd0 Mon Sep 17 00:00:00 2001
From: =?utf8?q?=D0=9F=D0=B0=D0=B2=D0=B5=D0=BB=20=D0=98=D0=BB=D1=8C=D1=8E?=
=?utf8?q?=D1=82=D1=87=D0=B5=D0=BD=D0=BA=D0=BE/AI=20Tools=20Lab=20/SRR/Eng?=
=?utf8?q?ineer/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?=
Date: Fri, 30 Nov 2018 13:25:13 +0300
Subject: [PATCH] [nnc] Support Pad operation on soft backend (#2364)
* Add serialization on softbackend
* Add deserialization on softbackend
* Add cpp_pad snippet
Signed-off-by: Pavel Iliutchenko
---
contrib/nnc/include/core/modelIR/Scalar.h | 1 -
contrib/nnc/passes/soft_backend/CPPGenerator.cpp | 2 +
contrib/nnc/passes/soft_backend/ModelAnalyzer.cpp | 3 +-
contrib/nnc/passes/soft_backend/SBSerializer.cpp | 18 +++-
.../code_snippets/cpp_common_funcs.def | 1 +
.../soft_backend/code_snippets/cpp_operations.def | 29 +++++++
.../passes/soft_backend/code_snippets/cpp_pad.def | 96 ++++++++++++++++++++++
.../passes/tflite_frontend/tflite_op_creator.cpp | 4 +-
contrib/nnc/unittests/core/TensorVariant.cpp | 2 -
.../nnc/unittests/soft_backend/CPPOperations.cpp | 27 ++++++
10 files changed, 176 insertions(+), 7 deletions(-)
create mode 100644 contrib/nnc/passes/soft_backend/code_snippets/cpp_pad.def
diff --git a/contrib/nnc/include/core/modelIR/Scalar.h b/contrib/nnc/include/core/modelIR/Scalar.h
index 7a5c59b..dba1e08 100644
--- a/contrib/nnc/include/core/modelIR/Scalar.h
+++ b/contrib/nnc/include/core/modelIR/Scalar.h
@@ -25,7 +25,6 @@
namespace nnc {
namespace mir {
-
/**
* @brief Scalar class
*/
diff --git a/contrib/nnc/passes/soft_backend/CPPGenerator.cpp b/contrib/nnc/passes/soft_backend/CPPGenerator.cpp
index 7b2ea80..30565ad 100644
--- a/contrib/nnc/passes/soft_backend/CPPGenerator.cpp
+++ b/contrib/nnc/passes/soft_backend/CPPGenerator.cpp
@@ -44,6 +44,7 @@ using namespace std;
#include "cpp_elu.generated.h"
#include "cpp_tanh.generated.h"
#include "cpp_elementwise.generated.h"
+#include "cpp_pad.generated.h"
namespace nnc
{
@@ -284,6 +285,7 @@ void CPPCodeGenerator::materializeCode(ostream &out, const ModelAnalyzer &ma, co
out.write(cpp_elementwise, sizeof(cpp_elementwise));
out.write(cpp_elu, sizeof(cpp_elu));
out.write(cpp_tanh, sizeof(cpp_tanh));
+ out.write(cpp_pad, sizeof(cpp_pad));
out.write(cpp_conv_transpose, sizeof(cpp_conv_transpose));
out.write(cpp_operations, sizeof(cpp_operations));
out.write(cpp_scale, sizeof(cpp_scale));
diff --git a/contrib/nnc/passes/soft_backend/ModelAnalyzer.cpp b/contrib/nnc/passes/soft_backend/ModelAnalyzer.cpp
index 48aa237..d3e459e 100644
--- a/contrib/nnc/passes/soft_backend/ModelAnalyzer.cpp
+++ b/contrib/nnc/passes/soft_backend/ModelAnalyzer.cpp
@@ -44,6 +44,7 @@
#include "core/modelIR/operations/ElementwiseOp.h"
#include "core/modelIR/operations/VariableOp.h"
#include "core/modelIR/operations/SqueezeOp.h"
+#include "core/modelIR/operations/PadOp.h"
#include "core/modelIR/operations/ReduceFOp.h"
using namespace std;
@@ -284,7 +285,7 @@ void ModelAnalyzer::visit(ops::SqueezeOp& op) {
}
void ModelAnalyzer::visit(mir::ops::PadOp& op) {
- assert(false && "Not implemented yet");
+ addOpDescr(&op, "pad");
}
void ModelAnalyzer::visit(mir::ops::ReduceFOp& op) {
diff --git a/contrib/nnc/passes/soft_backend/SBSerializer.cpp b/contrib/nnc/passes/soft_backend/SBSerializer.cpp
index c97a784..dba8621 100644
--- a/contrib/nnc/passes/soft_backend/SBSerializer.cpp
+++ b/contrib/nnc/passes/soft_backend/SBSerializer.cpp
@@ -40,6 +40,7 @@
#include "core/modelIR/operations/TanhOp.h"
#include "core/modelIR/operations/ElementwiseOp.h"
#include "core/modelIR/operations/SqueezeOp.h"
+#include "core/modelIR/operations/PadOp.h"
#include "core/modelIR/operations/ReduceFOp.h"
#include "pass/PassException.h"
@@ -325,7 +326,22 @@ void Serializer::visit(ops::SqueezeOp& op) {
}
void Serializer::visit(mir::ops::PadOp& op) {
- throw PassException("Not implemented yet");
+ _curOp->_paramStartOffset = _buffer.size();
+
+ // serialize paddings
+ int num_dims = op.getNumDim();
+
+ // serialize output shape
+ serializeShape(op.getOutputShape(0));
+
+ // serialize num dimensions
+ serializeT(op.getNumDim());
+
+ for(int i = 0; i < num_dims; i++) {
+ std::pair pair = op.getPaddingForDim(num_dims - 1 - i);
+ serializeT(pair.first);
+ serializeT(pair.second);
+ }
}
void Serializer::visit(mir::ops::ResizeOp& op) {
diff --git a/contrib/nnc/passes/soft_backend/code_snippets/cpp_common_funcs.def b/contrib/nnc/passes/soft_backend/code_snippets/cpp_common_funcs.def
index 6be873a..2f4a271 100644
--- a/contrib/nnc/passes/soft_backend/code_snippets/cpp_common_funcs.def
+++ b/contrib/nnc/passes/soft_backend/code_snippets/cpp_common_funcs.def
@@ -19,6 +19,7 @@ limitations under the License.
#include
#include
#include
+#include
#ifndef TFLITE_DCHECK
#define TFLITE_DCHECK(condition) (condition) ? (void)0 : assert(false)
diff --git a/contrib/nnc/passes/soft_backend/code_snippets/cpp_operations.def b/contrib/nnc/passes/soft_backend/code_snippets/cpp_operations.def
index 4c37889..e78dc97 100644
--- a/contrib/nnc/passes/soft_backend/code_snippets/cpp_operations.def
+++ b/contrib/nnc/passes/soft_backend/code_snippets/cpp_operations.def
@@ -507,3 +507,32 @@ void reshape(Tensor &out, const char *params, const Tensor &in)
out.reShape(out_s);
out.fillData(in.getData());
}
+
+void pad(Tensor& out, const char* params, const Tensor& in) {
+ const float* input = in.getData();
+ const Dims<4> input_dims = shapeToDims(in.getShape());
+
+ // deserialize output shape
+ Shape output_shape = deserializeShape(params);
+
+ // deserialize number of dimensions
+ const int32_t num_dim = deserializeT(params);
+
+ // deserialize paddings
+ std::vector left_paddings, right_paddings;
+ for(int i = 0; i < num_dim; i++) {
+ left_paddings.push_back(deserializeT(params));
+ right_paddings.push_back(deserializeT(params));
+ }
+ for(int i = num_dim; i < 4; i++) {
+ left_paddings.push_back(0);
+ right_paddings.push_back(0);
+ }
+
+ out.reShape(output_shape);
+
+ float* output = out.getData();
+ const Dims<4> output_dims = shapeToDims(out.getShape());
+
+ Pad(input, input_dims, left_paddings, right_paddings, output, output_dims);
+}
diff --git a/contrib/nnc/passes/soft_backend/code_snippets/cpp_pad.def b/contrib/nnc/passes/soft_backend/code_snippets/cpp_pad.def
new file mode 100644
index 0000000..39dd60d
--- /dev/null
+++ b/contrib/nnc/passes/soft_backend/code_snippets/cpp_pad.def
@@ -0,0 +1,96 @@
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+inline void Pad(const float* input_data, const Dims<4>& input_dims,
+ const std::vector& left_paddings,
+ const std::vector& right_paddings, float* output_data,
+ const Dims<4>& output_dims) {
+
+ const int output_batch = ArraySize(output_dims, 3);
+ const int output_height = ArraySize(output_dims, 2);
+ const int output_width = ArraySize(output_dims, 1);
+ const int output_depth = ArraySize(output_dims, 0);
+
+ const int left_b_padding = left_paddings[3];
+ const int left_h_padding = left_paddings[2];
+ const int left_w_padding = left_paddings[1];
+ const int left_d_padding = left_paddings[0];
+
+ const int right_b_padding = right_paddings[3];
+ const int right_h_padding = right_paddings[2];
+ const int right_w_padding = right_paddings[1];
+ const int right_d_padding = right_paddings[0];
+
+ const int input_depth = ArraySize(input_dims, 0);
+
+ if (left_b_padding != 0) {
+ memset(output_data, 0,
+ left_b_padding * output_height * output_width * output_depth *
+ sizeof(float));
+ }
+ for (int out_b = left_b_padding; out_b < output_batch - right_b_padding;
+ ++out_b) {
+ if (left_h_padding != 0) {
+ memset(output_data + Offset(output_dims, 0, 0, 0, out_b), 0,
+ left_h_padding * output_width * output_depth * sizeof(float));
+ }
+ for (int out_h = left_h_padding; out_h < output_height - right_h_padding;
+ ++out_h) {
+ if (left_w_padding != 0) {
+ memset(output_data + Offset(output_dims, 0, 0, out_h, out_b), 0,
+ left_w_padding * output_depth * sizeof(float));
+ }
+ for (int out_w = left_w_padding; out_w < output_width - right_w_padding;
+ ++out_w) {
+ if (left_d_padding != 0) {
+ memset(output_data + Offset(output_dims, 0, out_w, out_h, out_b), 0,
+ left_d_padding * sizeof(float));
+ }
+
+ float* out = output_data +
+ Offset(output_dims, left_d_padding, out_w, out_h, out_b);
+ const float* in =
+ input_data + Offset(input_dims, 0, out_w - left_w_padding,
+ out_h - left_h_padding, out_b - left_b_padding);
+ memcpy(out, in, input_depth * sizeof(float));
+
+ if (right_d_padding != 0) {
+ memset(
+ output_data + Offset(output_dims, output_depth - right_d_padding,
+ out_w, out_h, out_b),
+ 0, right_d_padding * sizeof(float));
+ }
+ }
+ if (right_w_padding != 0) {
+ memset(
+ output_data + Offset(output_dims, 0, output_width - right_w_padding,
+ out_h, out_b),
+ 0, right_w_padding * output_depth * sizeof(float));
+ }
+ }
+ if (right_h_padding != 0) {
+ memset(output_data + Offset(output_dims, 0, 0,
+ output_height - right_h_padding, out_b),
+ 0, right_h_padding * output_width * output_depth * sizeof(float));
+ }
+ }
+ if (right_b_padding != 0) {
+ memset(output_data +
+ Offset(output_dims, 0, 0, 0, output_batch - right_b_padding),
+ 0,
+ right_b_padding * output_height * output_width * output_depth *
+ sizeof(float));
+ }
+}
diff --git a/contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp b/contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp
index 4b7bfd7..a5b86ca 100644
--- a/contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp
+++ b/contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp
@@ -297,7 +297,7 @@ std::vector TFLiteOpCreator::createPad(InputOps inputs, InputPa
paddings.reserve(static_cast(num_dims));
// create strucuture with paddings
- for (int i = 0; i < num_dims; i++)
+ for (int i = 1; i < num_dims; i++)
paddings.emplace_back(paddings_tensor.at(Index({i, 0})), paddings_tensor.at(Index({i, 1})));
// create const value, it's float because we can't see input type
float const_value = 0.0; // not support different constant value
@@ -305,7 +305,7 @@ std::vector TFLiteOpCreator::createPad(InputOps inputs, InputPa
Scalar constant_value(reinterpret_cast(&const_value), DTYPE::FLOAT32, sizeof(float));
return createOp(ActivationFunctionType_NONE, inputs[0]->getOutput(0),
- num_dims, paddings, constant_value);
+ num_dims - 1, paddings, constant_value);
}
std::vector
diff --git a/contrib/nnc/unittests/core/TensorVariant.cpp b/contrib/nnc/unittests/core/TensorVariant.cpp
index ed9d7b9..1bf2555 100644
--- a/contrib/nnc/unittests/core/TensorVariant.cpp
+++ b/contrib/nnc/unittests/core/TensorVariant.cpp
@@ -24,7 +24,6 @@ TEST(TensorVariant, BasicTest) {
Shape shape{2,2};
char* ptr = (char*)(new float[4]);
std::shared_ptr mem(ptr, [](char* d){ delete[] (float*)d; } );
-
TensorVariant t(shape, mem, DTYPE::FLOAT32, sizeof(float));
ASSERT_EQ(t.getShape(), shape);
@@ -35,7 +34,6 @@ TEST(TensorVariant, ElementSizeDeductionTest) {
Shape shape{2, 2, 2};
std::shared_ptr mem(new float[8], [](float* f){ delete[] f; });
-
TensorVariant t(shape, mem, DTYPE::FLOAT32);
ASSERT_EQ(t.getElementSize(), sizeof(float));
diff --git a/contrib/nnc/unittests/soft_backend/CPPOperations.cpp b/contrib/nnc/unittests/soft_backend/CPPOperations.cpp
index d871db1..31c30aa 100644
--- a/contrib/nnc/unittests/soft_backend/CPPOperations.cpp
+++ b/contrib/nnc/unittests/soft_backend/CPPOperations.cpp
@@ -39,6 +39,7 @@
#include "code_snippets/cpp_elu.def"
#include "code_snippets/cpp_elementwise.def"
#include "code_snippets/cpp_tanh.def"
+#include "code_snippets/cpp_pad.def"
#include "CommonData.def"
#include "code_snippets/cpp_header_types.def"
@@ -67,12 +68,14 @@
#include "core/modelIR/operations/ElementwiseOp.h"
#include "core/modelIR/operations/Deconv2DOp.h"
#include "core/modelIR/operations/TanhOp.h"
+#include "core/modelIR/operations/PadOp.h"
// various headers
#include "core/modelIR/TensorVariant.h"
#include "core/modelIR/Tensor.h"
#include "core/modelIR/Graph.h"
#include "core/modelIR/ShapeRange.h"
+#include "core/modelIR/Scalar.h"
#include "passes/interpreter/Interpreter.h"
@@ -678,3 +681,27 @@ TEST(cpp_operations_test, reshape)
createAndRunTestGraph(opGenerator, reshape, inputNTensors, aInputTensor);
}
+
+TEST(cpp_operations_test, pad) {
+ // test on matrix 2x3
+ vector input_shape{2,3};
+
+ Tensor a_input_tensor;
+ vector> input_n_tensor(1);
+ fillTensors(input_n_tensor[0], a_input_tensor, input_shape, 1.0f);
+ // PadOp params
+ int num_dims = input_shape.size();
+ vector> paddings;
+ paddings.emplace_back(1, 1);
+ paddings.emplace_back(2, 2);
+
+ float const_value = 0.0;
+
+ mir::Scalar constant_value(reinterpret_cast(&const_value), mir::DTYPE::FLOAT32, sizeof(float));
+
+ auto op_generator = [num_dims, &paddings, &constant_value]
+ (mir::Graph& g, const std::vector& inputs)
+ { return g.create("y", inputs[0], num_dims, paddings, constant_value); };
+
+ createAndRunTestGraph(op_generator, pad, input_n_tensor, a_input_tensor);
+}
--
2.7.4