namespace nnc {
namespace mir {
-
/**
* @brief Scalar class
*/
#include "cpp_elu.generated.h"
#include "cpp_tanh.generated.h"
#include "cpp_elementwise.generated.h"
+#include "cpp_pad.generated.h"
namespace nnc
{
out.write(cpp_elementwise, sizeof(cpp_elementwise));
out.write(cpp_elu, sizeof(cpp_elu));
out.write(cpp_tanh, sizeof(cpp_tanh));
+ out.write(cpp_pad, sizeof(cpp_pad));
out.write(cpp_conv_transpose, sizeof(cpp_conv_transpose));
out.write(cpp_operations, sizeof(cpp_operations));
out.write(cpp_scale, sizeof(cpp_scale));
#include "core/modelIR/operations/ElementwiseOp.h"
#include "core/modelIR/operations/VariableOp.h"
#include "core/modelIR/operations/SqueezeOp.h"
+#include "core/modelIR/operations/PadOp.h"
#include "core/modelIR/operations/ReduceFOp.h"
using namespace std;
}
void ModelAnalyzer::visit(mir::ops::PadOp& op) {
- assert(false && "Not implemented yet");
+ addOpDescr(&op, "pad");
}
void ModelAnalyzer::visit(mir::ops::ReduceFOp& op) {
#include "core/modelIR/operations/TanhOp.h"
#include "core/modelIR/operations/ElementwiseOp.h"
#include "core/modelIR/operations/SqueezeOp.h"
+#include "core/modelIR/operations/PadOp.h"
#include "core/modelIR/operations/ReduceFOp.h"
#include "pass/PassException.h"
}
void Serializer::visit(mir::ops::PadOp& op) {
- throw PassException("Not implemented yet");
+ _curOp->_paramStartOffset = _buffer.size();
+
+ // serialize paddings
+ int num_dims = op.getNumDim();
+
+ // serialize output shape
+ serializeShape(op.getOutputShape(0));
+
+ // serialize num dimensions
+ serializeT<int32_t>(op.getNumDim());
+
+ for(int i = 0; i < num_dims; i++) {
+ std::pair<int32_t, int32_t> pair = op.getPaddingForDim(num_dims - 1 - i);
+ serializeT<int32_t>(pair.first);
+ serializeT<int32_t>(pair.second);
+ }
}
void Serializer::visit(mir::ops::ResizeOp& op) {
#include <cassert>
#include <cstdint>
#include <cstdlib>
+#include <vector>
#ifndef TFLITE_DCHECK
#define TFLITE_DCHECK(condition) (condition) ? (void)0 : assert(false)
out.reShape(out_s);
out.fillData(in.getData());
}
+
+void pad(Tensor& out, const char* params, const Tensor& in) {
+ const float* input = in.getData();
+ const Dims<4> input_dims = shapeToDims(in.getShape());
+
+ // deserialize output shape
+ Shape output_shape = deserializeShape(params);
+
+ // deserialize number of dimensions
+ const int32_t num_dim = deserializeT<int32_t>(params);
+
+ // deserialize paddings
+ std::vector<int> left_paddings, right_paddings;
+ for(int i = 0; i < num_dim; i++) {
+ left_paddings.push_back(deserializeT<int32_t>(params));
+ right_paddings.push_back(deserializeT<int32_t>(params));
+ }
+ for(int i = num_dim; i < 4; i++) {
+ left_paddings.push_back(0);
+ right_paddings.push_back(0);
+ }
+
+ out.reShape(output_shape);
+
+ float* output = out.getData();
+ const Dims<4> output_dims = shapeToDims(out.getShape());
+
+ Pad(input, input_dims, left_paddings, right_paddings, output, output_dims);
+}
--- /dev/null
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+inline void Pad(const float* input_data, const Dims<4>& input_dims,
+ const std::vector<int>& left_paddings,
+ const std::vector<int>& right_paddings, float* output_data,
+ const Dims<4>& output_dims) {
+
+ const int output_batch = ArraySize(output_dims, 3);
+ const int output_height = ArraySize(output_dims, 2);
+ const int output_width = ArraySize(output_dims, 1);
+ const int output_depth = ArraySize(output_dims, 0);
+
+ const int left_b_padding = left_paddings[3];
+ const int left_h_padding = left_paddings[2];
+ const int left_w_padding = left_paddings[1];
+ const int left_d_padding = left_paddings[0];
+
+ const int right_b_padding = right_paddings[3];
+ const int right_h_padding = right_paddings[2];
+ const int right_w_padding = right_paddings[1];
+ const int right_d_padding = right_paddings[0];
+
+ const int input_depth = ArraySize(input_dims, 0);
+
+ if (left_b_padding != 0) {
+ memset(output_data, 0,
+ left_b_padding * output_height * output_width * output_depth *
+ sizeof(float));
+ }
+ for (int out_b = left_b_padding; out_b < output_batch - right_b_padding;
+ ++out_b) {
+ if (left_h_padding != 0) {
+ memset(output_data + Offset(output_dims, 0, 0, 0, out_b), 0,
+ left_h_padding * output_width * output_depth * sizeof(float));
+ }
+ for (int out_h = left_h_padding; out_h < output_height - right_h_padding;
+ ++out_h) {
+ if (left_w_padding != 0) {
+ memset(output_data + Offset(output_dims, 0, 0, out_h, out_b), 0,
+ left_w_padding * output_depth * sizeof(float));
+ }
+ for (int out_w = left_w_padding; out_w < output_width - right_w_padding;
+ ++out_w) {
+ if (left_d_padding != 0) {
+ memset(output_data + Offset(output_dims, 0, out_w, out_h, out_b), 0,
+ left_d_padding * sizeof(float));
+ }
+
+ float* out = output_data +
+ Offset(output_dims, left_d_padding, out_w, out_h, out_b);
+ const float* in =
+ input_data + Offset(input_dims, 0, out_w - left_w_padding,
+ out_h - left_h_padding, out_b - left_b_padding);
+ memcpy(out, in, input_depth * sizeof(float));
+
+ if (right_d_padding != 0) {
+ memset(
+ output_data + Offset(output_dims, output_depth - right_d_padding,
+ out_w, out_h, out_b),
+ 0, right_d_padding * sizeof(float));
+ }
+ }
+ if (right_w_padding != 0) {
+ memset(
+ output_data + Offset(output_dims, 0, output_width - right_w_padding,
+ out_h, out_b),
+ 0, right_w_padding * output_depth * sizeof(float));
+ }
+ }
+ if (right_h_padding != 0) {
+ memset(output_data + Offset(output_dims, 0, 0,
+ output_height - right_h_padding, out_b),
+ 0, right_h_padding * output_width * output_depth * sizeof(float));
+ }
+ }
+ if (right_b_padding != 0) {
+ memset(output_data +
+ Offset(output_dims, 0, 0, 0, output_batch - right_b_padding),
+ 0,
+ right_b_padding * output_height * output_width * output_depth *
+ sizeof(float));
+ }
+}
paddings.reserve(static_cast<size_t>(num_dims));
// create strucuture with paddings
- for (int i = 0; i < num_dims; i++)
+ for (int i = 1; i < num_dims; i++)
paddings.emplace_back(paddings_tensor.at(Index({i, 0})), paddings_tensor.at(Index({i, 1})));
// create const value, it's float because we can't see input type
float const_value = 0.0; // not support different constant value
Scalar constant_value(reinterpret_cast<char*>(&const_value), DTYPE::FLOAT32, sizeof(float));
return createOp<ops::PadOp>(ActivationFunctionType_NONE, inputs[0]->getOutput(0),
- num_dims, paddings, constant_value);
+ num_dims - 1, paddings, constant_value);
}
std::vector<mir::Operation*>
Shape shape{2,2};
char* ptr = (char*)(new float[4]);
std::shared_ptr<char> mem(ptr, [](char* d){ delete[] (float*)d; } );
-
TensorVariant t(shape, mem, DTYPE::FLOAT32, sizeof(float));
ASSERT_EQ(t.getShape(), shape);
Shape shape{2, 2, 2};
std::shared_ptr<float> mem(new float[8], [](float* f){ delete[] f; });
-
TensorVariant t(shape, mem, DTYPE::FLOAT32);
ASSERT_EQ(t.getElementSize(), sizeof(float));
#include "code_snippets/cpp_elu.def"
#include "code_snippets/cpp_elementwise.def"
#include "code_snippets/cpp_tanh.def"
+#include "code_snippets/cpp_pad.def"
#include "CommonData.def"
#include "code_snippets/cpp_header_types.def"
#include "core/modelIR/operations/ElementwiseOp.h"
#include "core/modelIR/operations/Deconv2DOp.h"
#include "core/modelIR/operations/TanhOp.h"
+#include "core/modelIR/operations/PadOp.h"
// various headers
#include "core/modelIR/TensorVariant.h"
#include "core/modelIR/Tensor.h"
#include "core/modelIR/Graph.h"
#include "core/modelIR/ShapeRange.h"
+#include "core/modelIR/Scalar.h"
#include "passes/interpreter/Interpreter.h"
createAndRunTestGraph(opGenerator, reshape, inputNTensors, aInputTensor);
}
+
+TEST(cpp_operations_test, pad) {
+ // test on matrix 2x3
+ vector<int> input_shape{2,3};
+
+ Tensor a_input_tensor;
+ vector<unique_ptr<mir::TensorVariant>> input_n_tensor(1);
+ fillTensors(input_n_tensor[0], a_input_tensor, input_shape, 1.0f);
+ // PadOp params
+ int num_dims = input_shape.size();
+ vector<pair<int32_t, int32_t>> paddings;
+ paddings.emplace_back(1, 1);
+ paddings.emplace_back(2, 2);
+
+ float const_value = 0.0;
+
+ mir::Scalar constant_value(reinterpret_cast<char*>(&const_value), mir::DTYPE::FLOAT32, sizeof(float));
+
+ auto op_generator = [num_dims, &paddings, &constant_value]
+ (mir::Graph& g, const std::vector<mir::IODescriptor>& inputs)
+ { return g.create<mir::ops::PadOp>("y", inputs[0], num_dims, paddings, constant_value); };
+
+ createAndRunTestGraph(op_generator, pad, input_n_tensor, a_input_tensor);
+}