#include "ConstantInitializer.h"
+#include "kernel/OperationUtils.h"
+
+namespace
+{
+
+template <typename T>
+static void
+PermuteKernel(const neurun::model::Operand &model_obj, neurun::backend::operand::IObject &obj,
+ const neurun::model::Layout frontend_layout = neurun::model::Layout::UNKNOWN)
+{
+ const auto shape = model_obj.shape();
+ auto base = reinterpret_cast<const T *>(model_obj.data().base());
+
+ assert(shape.rank() == 4);
+
+ // TODO Support frontend layout
+ UNUSED_RELEASE(frontend_layout);
+
+ obj.access([&](::neurun::backend::operand::ITensor &tensor) {
+ // NOTE The srcn takes a HWOI layout as kernel filter even though image layout is NHWC.
+ // This policy is the same with the tensorflow policy.
+ // So using srcn library, we need to change kernel layout to HWOI from OHWI.
+ const int32_t outch = shape.dim(0);
+ const int32_t height = shape.dim(1);
+ const int32_t width = shape.dim(2);
+ const int32_t inch = shape.dim(3);
+ const auto to_dim = ::neurun::backend::srcn::kernel::convertCoordinates(
+ {outch, height, width, inch}, ::neurun::backend::srcn::kernel::FilterLayout::OHWI,
+ ::neurun::backend::srcn::kernel::FilterLayout::HWOI);
+ for (auto i = 0; i < outch; ++i)
+ {
+ for (auto j = 0; j < height; ++j)
+ {
+ for (auto k = 0; k < width; ++k)
+ {
+ for (auto l = 0; l < inch; ++l)
+ {
+ const auto coords = ::neurun::backend::srcn::kernel::convertCoordinates(
+ {i, j, k, l}, ::neurun::backend::srcn::kernel::FilterLayout::OHWI,
+ ::neurun::backend::srcn::kernel::FilterLayout::HWOI);
+ const size_t offset = coords[0] * to_dim[1] * to_dim[2] * to_dim[3] +
+ coords[1] * to_dim[2] * to_dim[3] + coords[2] * to_dim[3] +
+ coords[3];
+ T *into = reinterpret_cast<T *>(tensor.buffer() + offset * sizeof(T));
+ T value = *(base + i * height * width * inch + j * width * inch + k * inch + l);
+ *into = value;
+ }
+ }
+ }
+ }
+ });
+}
+}
+
namespace neurun
{
namespace backend
_init_map.clear();
}
+void ConstantInitializer::registerPermuteKernelInitializer(const model::OperandIndex &index,
+ const model::Operand &obj)
+{
+ // For only CONSTANTS
+ if (!obj.isConstant())
+ return;
+
+ VERBOSE(FillOperandData) << "[SRCN] Fill data for operand " << index.value() << std::endl;
+
+ const auto type = obj.typeInfo().type();
+ using neurun::model::DataType;
+ using namespace std::placeholders;
+
+ switch (type)
+ {
+ case DataType::FLOAT32:
+ _init_map[index] = std::bind(PermuteKernel<float>, _1, _2, _current_subg_layout);
+ break;
+ case DataType::INT32:
+ _init_map[index] = std::bind(PermuteKernel<int32_t>, _1, _2, _current_subg_layout);
+ break;
+ case DataType::UINT32:
+ _init_map[index] = std::bind(PermuteKernel<uint32_t>, _1, _2, _current_subg_layout);
+ break;
+ case DataType::BOOL8:
+ case DataType::QUANT8_ASYMM:
+ _init_map[index] = std::bind(PermuteKernel<uint8_t>, _1, _2, _current_subg_layout);
+ break;
+ default:
+ throw std::runtime_error("Not supported, yet");
+ break;
+ }
+}
+
+void ConstantInitializer::visit(const model::operation::TransposeConvNode &node)
+{
+ const auto &kernel_index = node.getInputs().at(model::operation::TransposeConvNode::KERNEL);
+ const auto &kernel_obj = _operands.at(kernel_index);
+ registerPermuteKernelInitializer(kernel_index, kernel_obj);
+}
+
} // namespace srcn
} // namespace backend
} // namespace neurun
public:
void run() override;
+public:
+ void registerPermuteKernelInitializer(const model::OperandIndex &index,
+ const model::Operand &obj);
+
+public:
+ void visit(const model::operation::TransposeConvNode &) override;
+
private:
const model::Operands &_operands;
std::shared_ptr<TensorBuilder> _tensor_builder;
#include "cpp14/memory.h"
#include "util/Padding.h"
+#include "kernel/TransposeConvLayer.h"
#include <backend/Backend.h>
#include <backend/IConfig.h>
}
}
+void KernelGenerator::visit(const model::operation::TransposeConvNode &node)
+{
+ using model::operation::TransposeConvNode;
+
+ const auto ofm_index{node.getOutputs().at(0)};
+ const auto ifm_index{node.getInputs().at(TransposeConvNode::Input::INPUT)};
+ const auto ker_index{node.getInputs().at(TransposeConvNode::Input::KERNEL)};
+ const auto output_shape_index{node.getInputs().at(TransposeConvNode::Input::OUTPUT_SHAPE)};
+
+ const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(_current_subg_layout);
+ const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(_current_subg_layout);
+ // Kernel format is [depth_out, kernel_height, kernel_width, depth_in].
+ const auto &ker_shape = _ctx.at(ker_index).shape();
+ const auto ker_height = ker_shape.dim(1);
+ const auto ker_width = ker_shape.dim(2);
+ const auto stride = node.param().stride;
+ const int padding_type = (node.param().padding.type == model::PaddingType::SAME);
+ const auto padding = neurun::util::calculatePadding(node.param().padding, ofm_shape, ifm_shape,
+ stride, ker_width, ker_height);
+
+ const auto ofm_backend_shape =
+ ::neurun::backend::srcn::kernel::getShape(_ctx.at(ofm_index), _current_subg_layout);
+ const auto ifm_backend_shape =
+ ::neurun::backend::srcn::kernel::getShape(_ctx.at(ifm_index), _current_subg_layout);
+ const auto ker_backend_shape =
+ ::neurun::backend::srcn::kernel::getShape(_ctx.at(ker_index), model::Layout::UNKNOWN);
+
+ auto ofm_alloc = _tensor_builder->at(ofm_index);
+ auto ifm_alloc = _tensor_builder->at(ifm_index);
+ auto ker_alloc = _tensor_builder->at(ker_index);
+
+ auto fn = nnfw::cpp14::make_unique<::neurun::backend::srcn::kernel::TransposeConvLayer>();
+
+ fn->configure(ifm_alloc->buffer(), ifm_backend_shape, ker_alloc->buffer(), ker_backend_shape,
+ padding_type, padding.left, padding.right, padding.top, padding.bottom,
+ stride.horizontal, stride.vertical, ofm_alloc->buffer(), ofm_backend_shape);
+
+ _execution_builder->append(std::move(fn));
+}
+
} // namespace srcn
} // namespace backend
} // namespace neurun
using IKernelGenerator::visit;
void visit(const model::Subgraph &) override;
+ void visit(const model::operation::TransposeConvNode &) override;
private:
const neurun::model::Operands &_ctx;
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "OperationUtils.h"
+
+#include <cmath>
+#include <algorithm>
+#include <cassert>
+
+#include "util/Utils.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace srcn
+{
+namespace kernel
+{
+
+uint32_t MatchingDim(const Shape &shape1, int index1, const Shape &shape2, int index2)
+{
+ UNUSED_RELEASE(shape2);
+ UNUSED_RELEASE(index2);
+ assert(shape1.dimensions[index1] == shape2.dimensions[index2]);
+ return shape1.dimensions[index1];
+}
+
+Coordinates convertCoordinates(const Coordinates &from_coordinates, FilterLayout from_layout,
+ FilterLayout to_layout)
+{
+ assert(from_coordinates.size() == 4);
+ Coordinates to{from_coordinates};
+ if (from_layout == FilterLayout::OHWI && to_layout == FilterLayout::HWOI)
+ {
+ to.set(0, from_coordinates[1]);
+ to.set(1, from_coordinates[2]);
+ to.set(2, from_coordinates[0]);
+ to.set(3, from_coordinates[3]);
+ }
+ else
+ {
+ throw std::runtime_error{"NYI"};
+ }
+
+ return to;
+}
+
+Shape getShape(const ::neurun::model::Operand &o, ::neurun::model::Layout frontend_layout)
+{
+ Shape shape;
+
+ auto dims = o.shape().dims();
+ if (frontend_layout == ::neurun::model::Layout::NCHW && o.shape().rank() == 4)
+ {
+ // NCHW -> NHWC
+ uint32_t permutation[4] = {0, 2, 3, 1};
+ for (int i = 0; i < o.shape().rank(); ++i)
+ {
+ dims.at(i) = o.shape().dim(permutation[i]);
+ }
+ }
+ shape.dimensions = std::vector<uint32_t>(dims.begin(), dims.end());
+ shape.type = static_cast<OperandType>(static_cast<int32_t>(o.typeInfo().type()));
+ shape.scale = o.typeInfo().scale();
+ shape.offset = o.typeInfo().offset();
+
+ // CPU backend assume that neurun internal shape's rank is always same or less than 4
+ assert(shape.dimensions.size() <= 4);
+
+ return shape;
+}
+
+} // namespace kernel
+} // namespace srcn
+} // namespace backend
+} // namespace neurun
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_SRCN_OPERATION_UTILS_H__
+#define __NEURUN_BACKEND_SRCN_OPERATION_UTILS_H__
+
+#include <iostream>
+#include <limits>
+#include <vector>
+
+#include "model/Operand.h"
+#include "model/DataType.h"
+#include <model/InternalType.h>
+
+using OperandType = neurun::model::DataType;
+using neurun::util::Coordinates;
+
+namespace neurun
+{
+namespace backend
+{
+namespace srcn
+{
+namespace kernel
+{
+
+struct Shape
+{
+ OperandType type;
+ std::vector<uint32_t> dimensions;
+ float scale;
+ int32_t offset;
+};
+
+union DataPtr {
+ uint8_t *u8;
+ int8_t *i8;
+ int32_t *i32;
+ float *f;
+ void *v;
+};
+
+enum FilterLayout
+{
+ OHWI = 0, // TfLite Kernel Layout when using NHWC image layout
+ HWOI, // SRCN Transpose Conv Kernel Layout when using NHWC image layout
+ OIHW, // SRCN Transpose Conv Kernel Layout when using NCHW image layout
+};
+
+uint32_t MatchingDim(const Shape &shape1, int index1, const Shape &shape2, int index2);
+
+Coordinates convertCoordinates(const Coordinates &from_coordinates, FilterLayout from_layout,
+ FilterLayout to_layout);
+
+Shape getShape(const ::neurun::model::Operand &o, ::neurun::model::Layout frontend_layout);
+
+} // namespace kernel
+} // namespace srcn
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_SRCN_OPERATION_UTILS_H__
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "TransposeConvLayer.h"
+
+#include "OperationUtils.h"
+#include "srcn/srcn_conv.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace srcn
+{
+namespace kernel
+{
+
+TransposeConvLayer::TransposeConvLayer()
+ : _inputData(), _kernelData(), _outputData(), _inputShape(), _kernelShape(), _outputShape(),
+ _paddingType(0), _paddingLeft(0), _paddingTop(0), _paddingRight(0), _paddingBottom(0),
+ _strideWidth(0), _strideHeight(0), _inputType(OperandType::FLOAT32)
+{
+ // DO NOTHING
+}
+
+void TransposeConvLayer::convFloat32()
+{
+ nnfw::srcn::convMat_t in_mat, out_mat, kernel_mat;
+ nnfw::srcn::convParams_t in_param;
+
+ const int batches = MatchingDim(_inputShape, 0, _outputShape, 0);
+ const int input_height = _inputShape.dimensions[1];
+ const int input_width = _inputShape.dimensions[2];
+ const int input_depth = MatchingDim(_inputShape, 3, _kernelShape, 3);
+ in_mat.c = input_depth;
+ in_mat.w = input_width;
+ in_mat.h = input_height;
+ in_mat.n = batches;
+ in_mat.data = _inputData.f;
+
+ const int output_height = _outputShape.dimensions[1];
+ const int output_width = _outputShape.dimensions[2];
+ const int output_depth = MatchingDim(_kernelShape, 0, _outputShape, 3);
+ out_mat.c = output_depth;
+ out_mat.w = output_width;
+ out_mat.h = output_height;
+ out_mat.n = batches;
+ out_mat.data = _outputData.f;
+
+ const int ker_height = _kernelShape.dimensions[1];
+ const int ker_width = _kernelShape.dimensions[2];
+ kernel_mat.c = output_depth;
+ kernel_mat.w = ker_width;
+ kernel_mat.h = ker_height;
+ kernel_mat.n = input_depth;
+ kernel_mat.data = _kernelData.f;
+
+ in_param.kernel_w = ker_width;
+ in_param.kernel_h = ker_height;
+ in_param.stride_w = _strideWidth;
+ in_param.stride_h = _strideHeight;
+ in_param.padding = _paddingType;
+ in_param.pad_w = _paddingLeft;
+ in_param.pad_h = _paddingTop;
+ in_param.dilation_w = 1;
+ in_param.dilation_h = 1;
+
+ nnfw::srcn::srcn_deconvolution2D(in_mat, kernel_mat, out_mat, in_param, 4, nnfw::srcn::col_major);
+}
+
+void TransposeConvLayer::configure(uint8_t *inputData, const Shape inputShape, uint8_t *kernelData,
+ const Shape kernelShape, const uint32_t paddingType,
+ const uint32_t paddingLeft, const uint32_t paddingRight,
+ const uint32_t paddingTop, const uint32_t paddingBottom,
+ const uint32_t strideWidth, const uint32_t strideHeight,
+ uint8_t *outputData, const Shape outputShape)
+{
+ _inputData.u8 = inputData;
+ _inputShape = inputShape;
+ _inputType = inputShape.type;
+ _kernelData.u8 = kernelData;
+ _kernelShape = kernelShape;
+ _paddingType = paddingType;
+ _paddingLeft = paddingLeft;
+ _paddingRight = paddingRight;
+ _paddingTop = paddingTop;
+ _paddingBottom = paddingBottom;
+ _strideWidth = strideWidth;
+ _strideHeight = strideHeight;
+ _outputData.u8 = outputData;
+ _outputShape = outputShape;
+}
+
+void TransposeConvLayer::run()
+{
+ if (_inputType == OperandType::FLOAT32)
+ {
+ convFloat32();
+ }
+ else if (_inputType == OperandType::QUANT8_ASYMM)
+ {
+ throw std::runtime_error("NYI");
+ }
+}
+
+} // namespace kernel
+} // namespace srcn
+} // namespace backend
+} // namespace neurun
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_BACKEND_SRCN_KERNEL_TRANSPOSECONV_LAYER_H__
+#define __NEURUN_BACKEND_SRCN_KERNEL_TRANSPOSECONV_LAYER_H__
+
+#include <exec/IFunction.h>
+
+#include "OperationUtils.h"
+
+namespace neurun
+{
+namespace backend
+{
+namespace srcn
+{
+namespace kernel
+{
+
+class TransposeConvLayer : public ::neurun::exec::IFunction
+{
+public:
+ TransposeConvLayer();
+
+public:
+ void convFloat32();
+ void configure(uint8_t *inputData, const Shape inputShape, uint8_t *kernelData,
+ const Shape kernelShape, const uint32_t paddingType, const uint32_t paddingLeft,
+ const uint32_t paddingRight, const uint32_t paddingTop,
+ const uint32_t paddingBottom, const uint32_t strideW, const uint32_t strideH,
+ uint8_t *outputData, const Shape outputShape);
+
+ void run();
+ void runSync()
+ {
+ // this abstract method is used just for profiling and called for
+ // backend::acl_common::AclFunction
+ run();
+ }
+
+private:
+ DataPtr _inputData;
+ DataPtr _kernelData;
+ DataPtr _outputData;
+
+ Shape _inputShape;
+ Shape _kernelShape;
+ Shape _outputShape;
+
+ uint32_t _paddingType;
+ uint32_t _paddingLeft;
+ uint32_t _paddingTop;
+ uint32_t _paddingRight;
+ uint32_t _paddingBottom;
+
+ uint32_t _strideWidth;
+ uint32_t _strideHeight;
+
+ OperandType _inputType;
+};
+
+} // namespace kernel
+} // namespace srcn
+} // namespace backend
+} // namespace neurun
+
+#endif // __NEURUN_BACKEND_SRCN_KERNEL_TRANSPOSECONV_LAYER_H__