* [moco/tf] Introduce TFDepthwiseConv2D IR.
This commit introduce TFDepthwiseConv2D IR.
Signed-off-by: seongwoo <sw4670.chae@samsung.com>
* apply 'Native' keyword.
#include "IR/TFBiasAdd.h"
#include "IR/TFConst.h"
#include "IR/TFConv2D.h"
+#include "IR/TFDepthwiseConv2dNative.h"
#include "IR/TFFusedBatchNorm.h"
#include "IR/TFIdentity.h"
#include "IR/TFMul.h"
TENSORFLOW_NODE(BiasAdd, TFBiasAdd)
TENSORFLOW_NODE(Const, TFConst)
TENSORFLOW_NODE(Conv2D, TFConv2D)
+TENSORFLOW_NODE(DepthwiseConv2dNative, TFDepthwiseConv2dNative)
TENSORFLOW_NODE(FusedBatchNorm, TFFusedBatchNorm)
TENSORFLOW_NODE(Identity, TFIdentity)
TENSORFLOW_NODE(Mul, TFMul)
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MOCO_TF_IR_TFDEPTHWISECONV2DNATIVE_H__
+#define __MOCO_TF_IR_TFDEPTHWISECONV2DNATIVE_H__
+
+#include "Dialect/TFNodeDecl.h"
+
+#include "Convert.h"
+
+#include <loco/IR/Stride.h>
+#include <loco/IR/Pad.h>
+
+#include <string>
+
+namespace moco
+{
+namespace tf
+{
+
+class TFDepthwiseConv2dNative final
+ : public loco::FixedArityNode<2, TFNodeImpl<TFOpcode::DepthwiseConv2dNative>>
+{
+public:
+ loco::Node *ifm(void) const { return at(0)->node(); }
+ void ifm(Node *node) { at(0)->node(node); }
+
+ loco::Node *ker(void) const { return at(1)->node(); }
+ void ker(Node *node) { at(1)->node(node); }
+
+public:
+ const TFPadding &padding(void) const { return _padding; }
+ void padding(const TFPadding &padding) { _padding = padding; }
+
+ const TFDataLayout &data_layout(void) const { return _data_layout; }
+ void data_layout(const TFDataLayout &data_layout) { _data_layout = data_layout; }
+
+ const std::vector<int64_t> &strides(void) const { return _strides; }
+ void strides(const std::vector<int64_t> &strides) { _strides = strides; }
+
+private:
+ TFPadding _padding;
+ TFDataLayout _data_layout;
+ std::vector<int64_t> _strides;
+ // TODO Support "Dilation"
+};
+
+} // namespace tf
+} // namespace moco
+
+#endif // __MOCO_TF_IR_TFDEPTHWISECONV2DNATIVE_H__
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "IR/TFDepthwiseConv2dNative.h"
+
+#include "Dialect/TFDialect.h"
+
+#include <gtest/gtest.h>
+
+TEST(TFDepthwiseConv2dNativeTest, constructor)
+{
+ moco::tf::TFDepthwiseConv2dNative depthwiseConv2dnative_node;
+
+ ASSERT_EQ(depthwiseConv2dnative_node.dialect(), moco::tf::TFDialect::get());
+ ASSERT_EQ(depthwiseConv2dnative_node.opcode(), moco::tf::TFOpcode::DepthwiseConv2dNative);
+
+ ASSERT_EQ(depthwiseConv2dnative_node.ifm(), nullptr);
+ ASSERT_EQ(depthwiseConv2dnative_node.ker(), nullptr);
+ ASSERT_EQ(depthwiseConv2dnative_node.padding(), "");
+ ASSERT_EQ(depthwiseConv2dnative_node.data_layout(), "");
+ ASSERT_EQ(depthwiseConv2dnative_node.strides().size(), 0);
+}
return true;
}
+bool fix_padding(moco::tf::TFDepthwiseConv2dNative *node)
+{
+ LOGGER(l);
+
+ auto pad_data_c = node->annot<PadData>();
+ if (pad_data_c != nullptr)
+ {
+ // padding conversion is already done
+ return false;
+ }
+
+ auto ofm_shapedata = node->annot<ShapeInferenceData>();
+ if (ofm_shapedata == nullptr)
+ {
+ // need output shape to calculate padding values
+ return false;
+ }
+
+ auto ifm = node->ifm();
+ assert(ifm != nullptr);
+ auto ifm_shapedata = ifm->annot<ShapeInferenceData>();
+ if (ifm_shapedata == nullptr)
+ {
+ // need input shape to calculate padding values
+ return false;
+ }
+
+ auto ker = node->ker();
+ assert(ker != nullptr);
+ auto ker_shapedata = ker->annot<ShapeInferenceData>();
+ if (ker_shapedata == nullptr)
+ {
+ return false;
+ }
+
+ auto stride_data = node->annot<StrideData>();
+ if (stride_data == nullptr)
+ {
+ // need stride data but not ready yet
+ return false;
+ }
+
+ auto padding = node->padding();
+ assert(padding == "VALID" || padding == "SAME");
+
+ auto data_layout = node->data_layout();
+ assert(data_layout == "NHWC");
+
+ auto ifm_tensor_shape = ifm_shapedata->tensor_shape(); // in NHWC
+ auto ker_tensor_shape = ker_shapedata->tensor_shape(); // in HWCM
+ auto ofm_tensor_shape = ofm_shapedata->tensor_shape(); // in NHWC
+ assert(ifm_tensor_shape.rank() == 4);
+ assert(ker_tensor_shape.rank() == 4);
+ assert(ofm_tensor_shape.rank() == 4);
+
+ uint32_t input_height = ifm_tensor_shape.dim(1).value();
+ uint32_t input_width = ifm_tensor_shape.dim(2).value();
+ uint32_t stride_height = stride_data->stride()->vertical();
+ uint32_t stride_width = stride_data->stride()->horizontal();
+ uint32_t ker_height = ker_tensor_shape.dim(0).value();
+ uint32_t ker_width = ker_tensor_shape.dim(1).value();
+ uint32_t output_height = ofm_tensor_shape.dim(1).value();
+ uint32_t output_width = ofm_tensor_shape.dim(2).value();
+
+ uint32_t dilation_height = 1; // TODO Consider dilation
+ uint32_t dilation_width = 1;
+ uint32_t effective_ker_height = dilation_height * (ker_height - 1) + 1;
+ uint32_t effective_ker_width = dilation_width * (ker_width - 1) + 1;
+ // calculate padding height, width
+ int32_t i_height = (output_height - 1) * stride_height + effective_ker_height - input_height;
+ int32_t i_width = (output_width - 1) * stride_width + effective_ker_width - input_width;
+ uint32_t height = i_height >= 0 ? i_height : 0U;
+ uint32_t width = i_width >= 0 ? i_width : 0U;
+
+ // annotation of pad data
+ auto pad_data = stdex::make_unique<PadData>();
+
+ pad_data->pad()->top(height / 2);
+ pad_data->pad()->bottom(height - pad_data->pad()->top());
+ pad_data->pad()->left(width / 2);
+ pad_data->pad()->right(width - pad_data->pad()->left());
+
+ node->annot(std::move(pad_data));
+
+ {
+ auto pad_data = node->annot<PadData>();
+ assert(pad_data != nullptr);
+
+ // clang-format off
+ INFO(l) << "Fix TFDepthwiseConv2dNative pad "
+ << "= T " << pad_data->pad()->top()
+ << ", L " << pad_data->pad()->left()
+ << ", B " << pad_data->pad()->bottom()
+ << ", R " << pad_data->pad()->right() << std::endl;
+ // clang-format on
+ }
+
+ return true;
+}
+
bool fix_padding(moco::tf::TFFusedBatchNorm *node)
{
// Nothing to do with padding
return true;
}
+bool fix_shape(moco::tf::TFDepthwiseConv2dNative *node)
+{
+ LOGGER(l);
+
+ auto shapedata = node->annot<ShapeInferenceData>();
+ if (shapedata != nullptr)
+ {
+ // shape inference is already done
+ return false;
+ }
+ auto ifm = node->ifm();
+ auto ifm_shapedata = ifm->annot<ShapeInferenceData>();
+ if (ifm_shapedata == nullptr)
+ {
+ // input node shape inference is not ready
+ return false;
+ }
+
+ auto ker = node->ker();
+ auto ker_shapedata = ker->annot<ShapeInferenceData>();
+ if (ker_shapedata == nullptr)
+ {
+ return false;
+ }
+
+ auto stride_data = node->annot<StrideData>();
+ assert(stride_data == nullptr);
+
+ auto stride_copy = stdex::make_unique<StrideData>();
+ auto strides = node->strides();
+ auto data_layout = as_DataLayout(node->data_layout());
+ if (data_layout == DataLayout::NHWC)
+ {
+ stride_copy->stride()->vertical(strides[1]);
+ stride_copy->stride()->horizontal(strides[2]);
+ }
+ else if (data_layout == DataLayout::NCHW)
+ {
+ stride_copy->stride()->vertical(strides[2]);
+ stride_copy->stride()->horizontal(strides[3]);
+ }
+ else
+ {
+ throw std::runtime_error{"Not supported for other data layout"};
+ }
+ node->annot(std::move(stride_copy));
+
+ INFO(l) << "FixShape TFDepthwiseConv2dNative strides = " << stride_data->stride()->vertical() << ", "
+ << stride_data->stride()->horizontal();
+
+ auto ifm_tensor_shape = ifm_shapedata->tensor_shape(); // in NHWC
+ auto ker_tensor_shape = ker_shapedata->tensor_shape(); // in HWCM
+ assert(ifm_tensor_shape.rank() == 4);
+ assert(ker_tensor_shape.rank() == 4);
+
+ uint32_t input_height = ifm_tensor_shape.dim(1).value();
+ uint32_t input_width = ifm_tensor_shape.dim(2).value();
+ uint32_t stride_height = stride_data->stride()->vertical();
+ uint32_t stride_width = stride_data->stride()->horizontal();
+ uint32_t ker_height = ker_tensor_shape.dim(0).value();
+ uint32_t ker_width = ker_tensor_shape.dim(1).value();
+ uint32_t dilation_height = 1; // TODO Consider dilation
+ uint32_t dilation_width = 1;
+ uint32_t effective_ker_height = dilation_height * (ker_height - 1) + 1;
+ uint32_t effective_ker_width = dilation_width * (ker_width - 1) + 1;
+ uint32_t output_height;
+ uint32_t output_width;
+
+ auto padding = node->padding();
+ assert(padding == "VALID" || padding == "SAME");
+
+ if (padding == "VALID")
+ {
+ output_height = (input_height + stride_height - effective_ker_height) / stride_height;
+ output_width = (input_width + stride_width - effective_ker_width) / stride_width;
+ }
+ else // padding == "SAME"
+ {
+ output_height = (input_height + stride_height - 1) / stride_height;
+ output_width = (input_width + stride_width - 1) / stride_width;
+ }
+
+ loco::TensorShape ofm_tensor_shape;
+ ofm_tensor_shape.rank(4);
+ ofm_tensor_shape.dim(0) = ifm_tensor_shape.dim(0);
+ ofm_tensor_shape.dim(1) = output_height;
+ ofm_tensor_shape.dim(2) = output_width;
+ ofm_tensor_shape.dim(3) =
+ loco::Dimension(ker_tensor_shape.dim(2).value() * ker_tensor_shape.dim(3).value());
+
+ auto shape_data = stdex::make_unique<ShapeInferenceData>();
+ shape_data->tensor_shape(ofm_tensor_shape);
+ node->annot(std::move(shape_data));
+
+ INFO(l) << "Fix TFDepthwiseConv2dNative shape = ifm" << ifm_tensor_shape << " ker" << ker_tensor_shape
+ << " --> ofm" << ofm_tensor_shape;
+
+ return true;
+}
+
bool fix_shape(moco::tf::TFFusedBatchNorm *node)
{
// Output shape is same as the input