From 83eb8348e225883b1af2d254ca4853268204fa4c Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EB=B0=95=EC=84=B8=ED=9D=AC/On-Device=20Lab=28SR=29/Princip?= =?utf8?q?al=20Engineer/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Wed, 23 Oct 2019 14:35:06 +0900 Subject: [PATCH] [moco] Introduce Support (#8407) This will introduce moco-support with TFShapeInferenceHelper and related Signed-off-by: SaeHie Park --- compiler/moco/CMakeLists.txt | 1 + compiler/moco/support/CMakeLists.txt | 6 + compiler/moco/support/README.md | 3 + .../support/include/moco/Support/TFDataLayout.h | 29 ++ .../moco/support/include/moco/Support/TFPadding.h | 29 ++ .../include/moco/Support/TFShapeInferenceHelper.h | 221 ++++++++++++++ .../moco/support/src/TFShapeInferenceHelper.cpp | 338 +++++++++++++++++++++ 7 files changed, 627 insertions(+) create mode 100644 compiler/moco/support/CMakeLists.txt create mode 100644 compiler/moco/support/README.md create mode 100644 compiler/moco/support/include/moco/Support/TFDataLayout.h create mode 100644 compiler/moco/support/include/moco/Support/TFPadding.h create mode 100644 compiler/moco/support/include/moco/Support/TFShapeInferenceHelper.h create mode 100644 compiler/moco/support/src/TFShapeInferenceHelper.cpp diff --git a/compiler/moco/CMakeLists.txt b/compiler/moco/CMakeLists.txt index 96fea6f..60496a6 100644 --- a/compiler/moco/CMakeLists.txt +++ b/compiler/moco/CMakeLists.txt @@ -1,2 +1,3 @@ +add_subdirectory(support) add_subdirectory(lang) add_subdirectory(import) diff --git a/compiler/moco/support/CMakeLists.txt b/compiler/moco/support/CMakeLists.txt new file mode 100644 index 0000000..d30b104 --- /dev/null +++ b/compiler/moco/support/CMakeLists.txt @@ -0,0 +1,6 @@ +file(GLOB_RECURSE SOURCES "src/*.cpp") + +add_library(moco_support SHARED ${SOURCES}) +target_include_directories(moco_support PRIVATE src) +target_include_directories(moco_support PUBLIC include) +target_link_libraries(moco_support PUBLIC loco) diff --git a/compiler/moco/support/README.md b/compiler/moco/support/README.md new file mode 100644 index 0000000..081f65d --- /dev/null +++ b/compiler/moco/support/README.md @@ -0,0 +1,3 @@ +# support + +_support_ privides _moco_ support libraries diff --git a/compiler/moco/support/include/moco/Support/TFDataLayout.h b/compiler/moco/support/include/moco/Support/TFDataLayout.h new file mode 100644 index 0000000..02da557 --- /dev/null +++ b/compiler/moco/support/include/moco/Support/TFDataLayout.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __MOCO_SUPPORT_TFDATALAYOUT_H__ +#define __MOCO_SUPPORT_TFDATALAYOUT_H__ + +#include + +namespace moco +{ + +using TFDataLayout = std::string; + +} // namespace moco + +#endif // __MOCO_SUPPORT_TFDATALAYOUT_H__ diff --git a/compiler/moco/support/include/moco/Support/TFPadding.h b/compiler/moco/support/include/moco/Support/TFPadding.h new file mode 100644 index 0000000..bbded2c --- /dev/null +++ b/compiler/moco/support/include/moco/Support/TFPadding.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __MOCO_SUPPORT_TFPADDING_H__ +#define __MOCO_SUPPORT_TFPADDING_H__ + +#include + +namespace moco +{ + +using TFPadding = std::string; + +} // namespace moco + +#endif // __MOCO_SUPPORT_TFPADDING_H__ diff --git a/compiler/moco/support/include/moco/Support/TFShapeInferenceHelper.h b/compiler/moco/support/include/moco/Support/TFShapeInferenceHelper.h new file mode 100644 index 0000000..3d574a5 --- /dev/null +++ b/compiler/moco/support/include/moco/Support/TFShapeInferenceHelper.h @@ -0,0 +1,221 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __MOCO_SUPPORT_SHAPE_INFERENCE_HELPER_H__ +#define __MOCO_SUPPORT_SHAPE_INFERENCE_HELPER_H__ + +#include "moco/Support/TFDataLayout.h" +#include "moco/Support/TFPadding.h" + +#include +#include +#include +#include +#include + +#include + +namespace moco +{ + +/** + * @note Helper for return broadcasted shape for binary operators having + * different shape for input x and y + */ +loco::TensorShape broadcast_shape(const loco::TensorShape &x, const loco::TensorShape &y); + +} // namespace moco + +namespace moco +{ + +/** + * @brief Return true if node has shape inference data for checking shape + * inference is done or not + * + * @note Will be deprecated in near future + */ +bool shape_inference_done(const loco::Node *node); + +/** + * @note While in shape inference, Node maybe Canonical, TF dialect or other dialects + * This will provide common loco::NodeShape as shape information + */ +loco::NodeShape node_shape(const loco::Node *node); +bool node_shape(const loco::Node *node, loco::NodeShape &nodeshape); + +loco::TensorShape as_tensor_shape(const loco::FeatureShape &feature_shape, + const TFDataLayout &data_layout); + +loco::FeatureShape as_feature_shape(const loco::NodeShape &nodeshape, + const TFDataLayout &data_layout); + +} // namespace moco + +namespace moco +{ + +struct PlaneShape +{ + loco::Dimension height; + loco::Dimension width; +}; + +class FeatureShapeUpdater final +{ +public: + FeatureShapeUpdater(loco::FeatureShape *ptr) : _feature_shape_ptr{ptr} + { + // DO NOTHING + } + +public: + void with(const PlaneShape &plane_shape) const + { + _feature_shape_ptr->height() = plane_shape.height; + _feature_shape_ptr->width() = plane_shape.width; + } + +private: + loco::FeatureShape *_feature_shape_ptr; +}; + +PlaneShape make_plane_shape(const loco::FeatureShape &feature_shape); + +FeatureShapeUpdater update(loco::FeatureShape &feature_shape); + +class PlaneInference +{ +protected: + struct Parameters + { + PlaneShape input; + PlaneShape stride; + PlaneShape window; + PlaneShape dilation; + PlaneShape effective_window; + PlaneShape output; + }; + + void fill(Parameters &p, const PlaneShape &in) + { + p.input.height = in.height; + p.input.width = in.width; + + p.stride.height = _stride.vertical(); + p.stride.width = _stride.horizontal(); + + p.window.height = _window.vertical(); + p.window.width = _window.horizontal(); + + // TODO support dilation + p.dilation.height = 1; + p.dilation.width = 1; + + p.effective_window.height = p.dilation.height.value() * (p.window.height.value() - 1) + 1; + p.effective_window.width = p.dilation.width.value() * (p.window.width.value() - 1) + 1; + } + + PlaneShape infer(const Parameters &p, const PlaneShape &) + { + PlaneShape res; + + if (_padding == "VALID") + { + res.height = + (p.input.height.value() + p.stride.height.value() - p.effective_window.height.value()) / + p.stride.height.value(); + res.width = + (p.input.width.value() + p.stride.width.value() - p.effective_window.width.value()) / + p.stride.width.value(); + } + else if (_padding == "SAME") + { + res.height = (p.input.height.value() + p.stride.height.value() - 1) / p.stride.height.value(); + res.width = (p.input.width.value() + p.stride.width.value() - 1) / p.stride.width.value(); + } + else + assert(false); + + return res; + } + +public: + PlaneShape operator()(const PlaneShape &in) + { + Parameters p; + + fill(p, in); + + return infer(p, in); + } + +public: + void padding(const TFPadding &value) { _padding = value; } + void window(const loco::Window<2> value) { _window = value; } + void stride(const loco::Stride<2> value) { _stride = value; } + +private: + TFPadding _padding; + loco::Window<2> _window; + loco::Stride<2> _stride; +}; + +class Padding2DInference final : public PlaneInference +{ +public: + loco::Padding2D operator()(const PlaneShape &in) + { + Parameters p; + + fill(p, in); + + auto output = infer(p, in); + + int64_t i_height = (int64_t)(output.height.value() - 1) * (int64_t)p.stride.height.value() + + (int64_t)p.effective_window.height.value() - (int64_t)p.input.height.value(); + int64_t i_width = (int64_t)(output.width.value() - 1) * (int64_t)p.stride.width.value() + + (int64_t)p.effective_window.width.value() - (int64_t)p.input.width.value(); + + uint32_t pad_height = i_height >= 0 ? (uint32_t)i_height : 0U; + uint32_t pad_width = i_width >= 0 ? (uint32_t)i_width : 0U; + + loco::Padding2D padding2d; + + padding2d.top(pad_height / 2); + padding2d.bottom(pad_height - padding2d.top()); + padding2d.left(pad_width / 2); + padding2d.right(pad_width - padding2d.left()); + + return padding2d; + } +}; + +} // namespace moco + +namespace moco +{ + +using TFStrides = std::vector; +using TFKSize = std::vector; + +loco::Stride<2> stride_of(const TFStrides &strides, const TFDataLayout &datalayout); +loco::Window<2> window_of(const TFKSize &ksize, const TFDataLayout &datalayout); +loco::Window<2> window_of(const loco::TensorShape &shape, const TFDataLayout &datalayout); + +} // namespace moco + +#endif // __MOCO_SERVICE_SHAPE_INFERENCE_HELPER_H__ diff --git a/compiler/moco/support/src/TFShapeInferenceHelper.cpp b/compiler/moco/support/src/TFShapeInferenceHelper.cpp new file mode 100644 index 0000000..0adbb5f --- /dev/null +++ b/compiler/moco/support/src/TFShapeInferenceHelper.cpp @@ -0,0 +1,338 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "moco/Support/TFShapeInferenceHelper.h" + +#include + +#include + +namespace +{ + +// TODO Use codes in loco and remove duplicate broadcast_shape() and related +/** + * @brief Create a higher-rank TensorShape following NumPy broadcasting semantics + * + * HOW TO USE: + * + * auto expanded_tensor_shape = expand(tensor_shape).to(N); + */ +class TensorShapeExpander +{ +public: + TensorShapeExpander(const loco::TensorShape &shape) : _shape{shape} + { + // DO NOTHING + } + +public: + loco::TensorShape to(uint32_t output_rank) + { + auto const &input_shape = _shape; + uint32_t const input_rank = input_shape.rank(); + + assert(input_rank <= output_rank && "Cannot shrink rank"); + uint32_t const axis_shift = output_rank - input_rank; + + loco::TensorShape output_shape; + + output_shape.rank(output_rank); + for (uint32_t axis = 0; axis < output_rank; ++axis) + { + output_shape.dim(axis) = (axis < axis_shift) ? 1 : input_shape.dim(axis - axis_shift); + } + + return output_shape; + } + +private: + const loco::TensorShape _shape; +}; + +/** + * @breif Expand shape x and y to same rank by align right and filling with 1 + */ +void expand_rank(loco::TensorShape &x, loco::TensorShape &y) +{ + auto x_rank = x.rank(); + auto y_rank = y.rank(); + + if (x_rank == y_rank) + return; + + TensorShapeExpander x_exp(x); + TensorShapeExpander y_exp(y); + + auto xy_rank = std::max(x_rank, y_rank); + + x = x_rank > y_rank ? x : x_exp.to(xy_rank); + y = y_rank > x_rank ? y : y_exp.to(xy_rank); +} + +/** + * @breif Returns shape of expanded dimension of input x and y having same rank + */ +loco::TensorShape expand_dimension(const loco::TensorShape &x, const loco::TensorShape &y) +{ + assert(x.rank() == y.rank()); + + auto rank = x.rank(); + + loco::TensorShape output_shape; + + output_shape.rank(rank); + for (uint32_t axis = 0; axis < rank; ++axis) + { + assert(x.dim(axis).known() && y.dim(axis).known()); + + auto x_dim = x.dim(axis).value(); + auto y_dim = y.dim(axis).value(); + + // each dimension of x and y should be same or one must be 1 if different + if (!((x_dim == y_dim) || (x_dim == 1 || y_dim == 1))) + throw std::runtime_error("Cannot produce expand_dimension of two shapes"); + + output_shape.dim(axis) = std::max(x_dim, y_dim); + } + + return output_shape; +} + +} // namespace + +namespace moco +{ + +loco::TensorShape broadcast_shape(const loco::TensorShape &x, const loco::TensorShape &y) +{ + auto x_match = x; + auto y_match = y; + + expand_rank(x_match, y_match); + + auto output_shape = expand_dimension(x_match, y_match); + + return output_shape; +} + +} // namespace moco + +namespace moco +{ + +loco::NodeShape node_shape(const loco::Node *node) +{ + loco::NodeShape nodeshape; // default domain is Unknown + + if (loco::shape_known(node)) + { + nodeshape = loco::shape_get(node); + } + + return nodeshape; +} + +bool node_shape(const loco::Node *node, loco::NodeShape &nodeshape) +{ + nodeshape = node_shape(node); + return (nodeshape.domain() != loco::Domain::Unknown); +} + +loco::TensorShape as_tensor_shape(const loco::FeatureShape &feature_shape, + const TFDataLayout &data_layout) +{ + loco::TensorShape tensor_shape; + + tensor_shape.rank(4); + if (data_layout == "NHWC") + { + tensor_shape.dim(0) = feature_shape.count(); + tensor_shape.dim(1) = feature_shape.height(); + tensor_shape.dim(2) = feature_shape.width(); + tensor_shape.dim(3) = feature_shape.depth(); + } + else if (data_layout == "NCHW") + { + tensor_shape.dim(0) = feature_shape.count(); + tensor_shape.dim(1) = feature_shape.depth(); + tensor_shape.dim(2) = feature_shape.height(); + tensor_shape.dim(3) = feature_shape.width(); + } + else + { + // TODO support for other data_layout if needed + throw std::runtime_error("as_tensor_shape: only supports NHWC or NCHW"); + } + + return tensor_shape; +} + +loco::FeatureShape as_feature_shape(const loco::NodeShape &nodeshape, + const TFDataLayout &data_layout) +{ + if (nodeshape.domain() == loco::Domain::Feature) + return nodeshape.as(); + + loco::FeatureShape feature_shape; + + // only convert from tensor to feature + if (nodeshape.domain() != loco::Domain::Tensor) + { + throw std::runtime_error("as_feature_shape: domain is not tensor"); + } + + loco::TensorShape tensor_shape = nodeshape.as(); + + if (tensor_shape.rank() != 4) + { + throw std::runtime_error("as_feature_shape: rank is not 4"); + } + + if (data_layout == "NHWC") + { + feature_shape.count() = tensor_shape.dim(0); + feature_shape.height() = tensor_shape.dim(1); + feature_shape.width() = tensor_shape.dim(2); + feature_shape.depth() = tensor_shape.dim(3); + } + else if (data_layout == "NCHW") + { + feature_shape.count() = tensor_shape.dim(0); + feature_shape.depth() = tensor_shape.dim(1); + feature_shape.height() = tensor_shape.dim(2); + feature_shape.width() = tensor_shape.dim(3); + } + else + { + // TODO support for other data_layout if needed + throw std::runtime_error("as_feature_shape: only supports NHWC or NCHW"); + } + + return feature_shape; +} + +} // namespace moco + +namespace moco +{ + +PlaneShape make_plane_shape(const loco::FeatureShape &feature_shape) +{ + PlaneShape plane_shape; + + plane_shape.height = feature_shape.height(); + plane_shape.width = feature_shape.width(); + + return plane_shape; +} + +FeatureShapeUpdater update(loco::FeatureShape &feature_shape) +{ + return FeatureShapeUpdater{&feature_shape}; +} + +} // namespace moco + +namespace +{ + +/** + * @brief Class to represent TensorFlow "data_format" attr. + */ +enum class DataLayout +{ + NHWC, + NCHW, +}; + +DataLayout as_data_layout(const std::string &tf_layout_str) +{ + if (tf_layout_str == "NHWC") + return DataLayout::NHWC; + else if (tf_layout_str == "NCHW") + return DataLayout::NCHW; + else + throw std::runtime_error("unknown data layout"); +} + +} // namespace + +namespace moco +{ + +loco::Stride<2> stride_of(const TFStrides &strides, const TFDataLayout &datalayout) +{ + loco::Stride<2> stride; + + auto data_layout = as_data_layout(datalayout); + if (data_layout == DataLayout::NHWC) + { + stride.vertical(strides[1]); + stride.horizontal(strides[2]); + } + else if (data_layout == DataLayout::NCHW) + { + stride.vertical(strides[2]); + stride.horizontal(strides[3]); + } + + return stride; +} + +loco::Window<2> window_of(const TFKSize &ksize, const TFDataLayout &datalayout) +{ + loco::Window<2> window; + + auto data_layout = as_data_layout(datalayout); + if (data_layout == DataLayout::NHWC) + { + window.vertical(ksize[1]); + window.horizontal(ksize[2]); + } + else if (data_layout == DataLayout::NCHW) + { + window.vertical(ksize[2]); + window.horizontal(ksize[3]); + } + + return window; +} + +loco::Window<2> window_of(const loco::TensorShape &shape, const TFDataLayout &datalayout) +{ + loco::Window<2> window; + + if (datalayout == "HWIO") + { + window.vertical(shape.dim(0).value()); + window.horizontal(shape.dim(1).value()); + } + else if (datalayout == "HWCM") + { + window.vertical(shape.dim(0).value()); + window.horizontal(shape.dim(1).value()); + } + else + { + // TODO add more datalayout supports if needed + assert(false); + } + + return window; +} + +} // namespace moco -- 2.7.4