#define __MOCO_TF_DIALECT_TFNODES_H__
#include "IR/TFAdd.h"
+#include "IR/TFConv2D.h"
#include "IR/TFFusedBatchNorm.h"
#include "IR/TFMul.h"
// TENSORFLOW_NODE(OPCODE, CLASS)
TENSORFLOW_NODE(Add, TFAdd)
+TENSORFLOW_NODE(Conv2D, TFConv2D)
TENSORFLOW_NODE(FusedBatchNorm, TFFusedBatchNorm)
TENSORFLOW_NODE(Mul, TFMul)
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MOCO_TF_IR_TFCONV2D_H__
+#define __MOCO_TF_IR_TFCONV2D_H__
+
+#include "Dialect/TFNodeDecl.h"
+
+#include "Convert.h"
+
+#include <loco/IR/Stride.h>
+#include <loco/IR/Pad.h>
+
+#include <string>
+
+namespace moco
+{
+namespace tf
+{
+
+/// @note These may be introduced as separate class
+using TFPadding = std::string;
+using TFDataLayout = std::string;
+
+class TFConv2D final : public loco::FixedArityNode<2, TFNodeImpl<TFOpcode::Conv2D>>
+{
+public:
+ loco::Node *ifm(void) const { return at(0)->node(); }
+ void ifm(Node *node) { at(0)->node(node); }
+
+ loco::Node *ker(void) const { return at(1)->node(); }
+ void ker(Node *node) { at(1)->node(node); }
+
+public:
+ const TFPadding &padding(void) const { return _padding; }
+ void padding(const TFPadding &padding) { _padding = padding; }
+
+ const TFDataLayout &data_layout(void) const { return _data_layout; }
+ void data_layout(const TFDataLayout &data_layout) { _data_layout = data_layout; }
+
+ const std::vector<int64_t> &strides(void) const { return _strides; }
+ void strides(const std::vector<int64_t> &strides) { _strides = strides; }
+
+private:
+ TFPadding _padding;
+ TFDataLayout _data_layout;
+ std::vector<int64_t> _strides;
+ // TODO Support "Dilation"
+};
+
+} // namespace tf
+} // namespace moco
+
+#endif // __MOCO_TF_IR_TFCONV2D_H__
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "IR/TFConv2D.h"
+
+#include "Dialect/TFDialect.h"
+
+#include <gtest/gtest.h>
+
+TEST(TFConv2DTest, constructor)
+{
+ moco::tf::TFConv2D conv2d_node;
+
+ ASSERT_EQ(conv2d_node.dialect(), moco::tf::TFDialect::get());
+ ASSERT_EQ(conv2d_node.opcode(), moco::tf::TFOpcode::Conv2D);
+
+ ASSERT_EQ(conv2d_node.ifm(), nullptr);
+ ASSERT_EQ(conv2d_node.ker(), nullptr);
+ ASSERT_EQ(conv2d_node.padding(), "");
+ ASSERT_EQ(conv2d_node.data_layout(), "");
+ ASSERT_EQ(conv2d_node.strides().size(), 0);
+}
return false;
}
+bool fix_padding(moco::tf::TFConv2D *node)
+{
+ // Nothing to do with padding
+ return false;
+}
+
bool fix_padding(moco::tf::TFFusedBatchNorm *node)
{
// Nothing to do with padding
return copy_shapedata(x, node);
}
+bool fix_shape(moco::tf::TFConv2D *node)
+{
+ LOGGER(l);
+
+ auto shapedata = node->annot<ShapeInferenceData>();
+ if (shapedata != nullptr)
+ {
+ // shape inference is already done
+ return false;
+ }
+ auto ifm = node->ifm();
+ auto ifm_shapedata = ifm->annot<ShapeInferenceData>();
+ if (ifm_shapedata == nullptr)
+ {
+ // input node shape inference is not ready
+ return false;
+ }
+
+ auto ker = node->ker();
+ auto ker_shapedata = ker->annot<ShapeInferenceData>();
+ if (ker_shapedata == nullptr)
+ {
+ return false;
+ }
+
+ auto padding = node->padding();
+
+ // TODO move this to some new Transformation...
+ auto strides = node->strides();
+ auto data_layout = as_DataLayout(node->data_layout());
+ loco::Stride<2> stride;
+ if (data_layout == DataLayout::NHWC)
+ {
+ stride.vertical(strides[1]);
+ stride.horizontal(strides[2]);
+ }
+ else if (data_layout == DataLayout::NCHW)
+ {
+ stride.vertical(strides[2]);
+ stride.horizontal(strides[3]);
+ }
+
+ auto ifm_tensor_shape = ifm_shapedata->tensor_shape(); // in NHWC
+ auto ker_tensor_shape = ker_shapedata->tensor_shape(); // in HWIO
+ assert(ifm_tensor_shape.rank() == 4);
+ assert(ker_tensor_shape.rank() == 4);
+
+ uint32_t input_height = ifm_tensor_shape.dim(1).value();
+ uint32_t input_width = ifm_tensor_shape.dim(2).value();
+ uint32_t stride_height = stride.vertical();
+ uint32_t stride_width = stride.horizontal();
+ uint32_t ker_height = ker_tensor_shape.dim(0).value();
+ uint32_t ker_width = ker_tensor_shape.dim(1).value();
+ uint32_t dilation_height = 1; // TODO Consider dilation
+ uint32_t dilation_width = 1;
+ uint32_t effective_ker_height = dilation_height * (ker_height - 1) + 1;
+ uint32_t effective_ker_width = dilation_width * (ker_width - 1) + 1;
+ uint32_t output_height;
+ uint32_t output_width;
+
+ if (padding == "VALID")
+ {
+ output_height = (input_height + stride_height - effective_ker_height) / stride_height;
+ output_width = (input_width + stride_width - effective_ker_width) / stride_width;
+ }
+ else if (padding == "SAME")
+ {
+ output_height = (input_height + stride_height - 1) / stride_height;
+ output_width = (input_width + stride_width - 1) / stride_width;
+ }
+ else
+ {
+ assert(false && "Unknown padding in fix_shape for TFConv2D");
+ }
+
+ loco::TensorShape ofm_tensor_shape;
+ ofm_tensor_shape.rank(4);
+ ofm_tensor_shape.dim(0) = ifm_tensor_shape.dim(0);
+ ofm_tensor_shape.dim(1) = output_height;
+ ofm_tensor_shape.dim(2) = output_width;
+ ofm_tensor_shape.dim(3) = ker_tensor_shape.dim(3);
+
+ auto shape_data = stdex::make_unique<ShapeInferenceData>();
+ shape_data->tensor_shape(ofm_tensor_shape);
+ node->annot(std::move(shape_data));
+
+ INFO(l) << "Fix TFConv2D shape = ifm(" << ifm_tensor_shape.dim(0).value() << ","
+ << ifm_tensor_shape.dim(1).value() << "," << ifm_tensor_shape.dim(2).value() << ","
+ << ifm_tensor_shape.dim(3).value() << "), "
+ << "ker(" << ker_tensor_shape.dim(0).value() << "," << ker_tensor_shape.dim(1).value()
+ << "," << ker_tensor_shape.dim(2).value() << "," << ker_tensor_shape.dim(3).value()
+ << ") "
+ << "--> ofm(" << ofm_tensor_shape.dim(0).value() << "," << ofm_tensor_shape.dim(1).value()
+ << "," << ofm_tensor_shape.dim(2).value() << "," << ofm_tensor_shape.dim(3).value() << ")"
+ << std::endl;
+ return true;
+}
+
bool fix_shape(moco::tf::TFFusedBatchNorm *node)
{
// Output shape is same as the input