#include "model/OperationVisitor.h"
#include "model/Operands.h"
#include "model/Index.h"
+#include "model/Layout.h"
namespace neurun
{
-namespace graph
+namespace shape_inference
{
-/**
- * @brief Class for inferring tensor shapes
- */
-class ShapeInference
-{
-public:
- using OperandsShapes = std::unordered_map<model::OperandIndex, model::Shape>;
+using Shapes = std::vector<model::Shape>;
- explicit ShapeInference(const neurun::model::Operands &ctx) : _ctx(ctx) {}
- OperandsShapes inferShapes(const model::operation::AddNode &);
- OperandsShapes inferShapes(const model::operation::AvgPool2DNode &);
+Shapes inferEltwiseShape(const model::Shape &lhs_shape, const model::Shape &rhs_shape);
-private:
- const neurun::model::Operands &_ctx;
-};
+Shapes inferAvgPoolShape(const model::Shape &in_shape,
+ const model::operation::AvgPool2DNode::Param ¶m,
+ model::Layout layout = model::Layout::NHWC);
-} // namespace graph
+} // namespace shape_inference
} // namespace neurun
#endif // __NEURUN_GRAPH_SHAPE_INFERENCE_H__
* limitations under the License.
*/
-#include "util/ShapeInference.h"
#include "util/Utils.h"
#include "model/InternalType.h"
#include "model/Shape.h"
+#include "model/operation/AvgPool2DNode.h"
+#include "util/ShapeInference.h"
namespace neurun
{
-namespace graph
+namespace shape_inference
{
//
return (dividend + divisor - 1) / divisor;
}
-// Calculate output shape of binary elementwise operation
-model::Shape calcEltwiseOutShape(const model::Shape &lhs_shape, const model::Shape &rhs_shape)
+// Calculate the result of broadcast of two shapes
+model::Shape broadcastShapes(const model::Shape &lhs_shape, const model::Shape &rhs_shape)
{
model::Shape out_shape;
auto max_rank = std::max(lhs_shape.rank(), rhs_shape.rank());
// Shape inference
//
-ShapeInference::OperandsShapes ShapeInference::inferShapes(const model::operation::AddNode &node)
+Shapes inferEltwiseShape(const model::Shape &lhs_shape, const model::Shape &rhs_shape)
{
- const auto lhs_index{node.getInputs().at(model::operation::AddNode::Input::LHS)};
- const auto rhs_index{node.getInputs().at(model::operation::AddNode::Input::RHS)};
- const auto lhs_shape{_ctx.at(lhs_index).shape()};
- const auto rhs_shape{_ctx.at(rhs_index).shape()};
- const auto out_index{node.getOutputs().at(0)};
- return {{out_index, calcEltwiseOutShape(lhs_shape, rhs_shape)}};
+ return {broadcastShapes(lhs_shape, rhs_shape)};
}
-ShapeInference::OperandsShapes
-ShapeInference::inferShapes(const model::operation::AvgPool2DNode &node)
+Shapes inferAvgPoolShape(const model::Shape &in_shape,
+ const model::operation::AvgPool2DNode::Param ¶m,
+ const model::Layout layout)
{
- const auto ofm_index{node.getOutputs().at(0)};
- const auto ifm_index{node.getInputs().at(model::operation::AvgPool2DNode::Input::INPUT)};
-
- const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
-
- const auto out_h_w =
- calcConvLikeHeightAndWidth(ifm_shape.H, ifm_shape.W, node.param().kh, node.param().kw,
- node.param().padding, node.param().stride);
-
- assert(_ctx.at(ifm_index).layout() == model::Layout::NHWC);
- // Average pool don't change number of channels and batch size
- return {{ofm_index, {ifm_shape.N, out_h_w.first, out_h_w.second, ifm_shape.C}}};
+ assert(layout == model::Layout::NHWC);
+ auto ifm_shape = in_shape.asFeature(layout);
+ const auto out_h_w = calcConvLikeHeightAndWidth(ifm_shape.H, ifm_shape.W, param.kh, param.kw,
+ param.padding, param.stride);
+ // Pooling don't change number of channels and batch size
+ return {model::Shape{ifm_shape.N, out_h_w.first, out_h_w.second, ifm_shape.C}};
}
-} // namespace graph
+} // namespace shape_inference
} // namespace neurun
+++ /dev/null
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <gtest/gtest.h>
-
-#include "model/Model.h"
-#include "model/InternalType.h"
-#include "model/TypeInfo.h"
-#include "model/DataType.h"
-#include "model/operation/AddNode.h"
-#include "util/ShapeInference.h"
-
-using namespace neurun::model;
-
-TEST(ShapeInference, AddNode)
-{
- Model model;
- TypeInfo type(DataType::FLOAT32);
- auto lhs_index = model.operands.emplace(Shape{1, 299, 299, 3}, type);
- auto rhs_index = model.operands.emplace(Shape{3}, type);
- auto out_index = model.operands.emplace(Shape{0}, type);
-
- operation::AddNode::Param param{Activation::NONE};
- operation::AddNode add_op({lhs_index.value(), rhs_index.value()}, {out_index.value()}, param);
-
- neurun::graph::ShapeInference shape_inference(model.operands);
- auto infered_shapes = shape_inference.inferShapes(add_op);
- auto infered_out_shape = infered_shapes.at(out_index);
-
- ASSERT_EQ(infered_out_shape.rank(), 4);
- ASSERT_EQ(infered_out_shape.dim(0), 1);
- ASSERT_EQ(infered_out_shape.dim(1), 299);
- ASSERT_EQ(infered_out_shape.dim(2), 299);
- ASSERT_EQ(infered_out_shape.dim(3), 3);
-}
-
-TEST(ShapeInference, IncorrectAddNode)
-{
- Model model;
- TypeInfo type(DataType::FLOAT32);
- auto lhs_index = model.operands.emplace(Shape{1, 299, 299, 3}, type);
- auto rhs_index = model.operands.emplace(Shape{5}, type);
- auto out_index = model.operands.emplace(Shape{0}, type);
-
- operation::AddNode::Param param{Activation::NONE};
- operation::AddNode addOp({lhs_index.value(), rhs_index.value()}, {out_index.value()}, param);
-
- neurun::graph::ShapeInference shape_inference(model.operands);
- ASSERT_THROW(shape_inference.inferShapes(addOp), std::runtime_error);
-}
-
-TEST(ShapeInference, AvgPool2DNodeSame)
-{
- Model model;
- TypeInfo type(DataType::FLOAT32);
- auto input_index = model.operands.emplace(Shape{10, 6, 12, 20}, type);
- auto out_index = model.operands.emplace(Shape{0}, type);
-
- Stride stride{3, 7};
- Padding padding{PaddingType::SAME};
- operation::AvgPool2DNode::Param param{3, 6, stride, padding, Activation::NONE};
- operation::AvgPool2DNode avg_pool_op({input_index.value()}, {out_index.value()}, param);
-
- neurun::graph::ShapeInference shape_inference(model.operands);
- auto infered_shapes = shape_inference.inferShapes(avg_pool_op);
- auto infered_out_shape = infered_shapes.at(out_index);
-
- ASSERT_EQ(infered_out_shape.rank(), 4);
- ASSERT_EQ(infered_out_shape.asFeature().N, 10);
- ASSERT_EQ(infered_out_shape.asFeature().H, 2);
- ASSERT_EQ(infered_out_shape.asFeature().W, 2);
- ASSERT_EQ(infered_out_shape.asFeature().C, 20);
-}
-
-TEST(ShapeInference, AvgPool2DNodeValid)
-{
- Model model;
- TypeInfo type(DataType::FLOAT32);
- auto input_index = model.operands.emplace(Shape{10, 6, 12, 20}, type);
- auto out_index = model.operands.emplace(Shape{0}, type);
-
- Stride stride{3, 7};
- Padding padding{PaddingType::VALID};
- operation::AvgPool2DNode::Param param{3, 6, stride, padding, Activation::NONE};
- operation::AvgPool2DNode avg_pool_op({input_index.value()}, {out_index.value()}, param);
-
- neurun::graph::ShapeInference shape_inference(model.operands);
- auto infered_shapes = shape_inference.inferShapes(avg_pool_op);
- auto infered_out_shape = infered_shapes.at(out_index);
-
- ASSERT_EQ(infered_out_shape.rank(), 4);
- ASSERT_EQ(infered_out_shape.asFeature().N, 10);
- ASSERT_EQ(infered_out_shape.asFeature().H, 2);
- ASSERT_EQ(infered_out_shape.asFeature().W, 1);
- ASSERT_EQ(infered_out_shape.asFeature().C, 20);
-}
-
-TEST(ShapeInference, AvgPool2DNodeExplicit)
-{
- Model model;
- TypeInfo type(DataType::FLOAT32);
- auto input_index = model.operands.emplace(Shape{10, 3, 5, 20}, type);
- auto out_index = model.operands.emplace(Shape{0}, type);
-
- Stride stride{3, 7};
- Padding padding{PaddingType::EXPLICIT, {4, 3, 2, 1}};
- operation::AvgPool2DNode::Param param{3, 6, stride, padding, Activation::NONE};
- operation::AvgPool2DNode avg_pool_op({input_index.value()}, {out_index.value()}, param);
-
- neurun::graph::ShapeInference shape_inference(model.operands);
- auto infered_shapes = shape_inference.inferShapes(avg_pool_op);
- auto infered_out_shape = infered_shapes.at(out_index);
-
- ASSERT_EQ(infered_out_shape.rank(), 4);
- ASSERT_EQ(infered_out_shape.asFeature().N, 10);
- ASSERT_EQ(infered_out_shape.asFeature().H, 2);
- ASSERT_EQ(infered_out_shape.asFeature().W, 1);
- ASSERT_EQ(infered_out_shape.asFeature().C, 20);
-}
--- /dev/null
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include "model/Layout.h"
+#include "util/ShapeInference.h"
+
+using namespace neurun::model;
+
+TEST(ShapeInference, AddNode)
+{
+ Shape lhs_shape{1, 299, 299, 3};
+ Shape rhs_shape{3};
+ auto infered_shapes = neurun::shape_inference::inferEltwiseShape(lhs_shape, rhs_shape);
+ auto infered_out_shape = infered_shapes[0];
+
+ ASSERT_EQ(infered_out_shape.rank(), 4);
+ ASSERT_EQ(infered_out_shape.dim(0), 1);
+ ASSERT_EQ(infered_out_shape.dim(1), 299);
+ ASSERT_EQ(infered_out_shape.dim(2), 299);
+ ASSERT_EQ(infered_out_shape.dim(3), 3);
+}
+
+TEST(ShapeInference, IncorrectAddNode)
+{
+ Shape lhs_shape{1, 299, 299, 3};
+ Shape rhs_shape{5, 3};
+ ASSERT_THROW(neurun::shape_inference::inferEltwiseShape(lhs_shape, rhs_shape),
+ std::runtime_error);
+}
+
+TEST(ShapeInference, AvgPool2DNodeSame)
+{
+ Shape in_shape{10, 6, 12, 20};
+ Stride stride{3, 7};
+ Padding padding{PaddingType::SAME};
+
+ operation::AvgPool2DNode::Param param{3, 6, stride, padding, Activation::NONE};
+ auto infered_shapes = neurun::shape_inference::inferAvgPoolShape(in_shape, param, Layout::NHWC);
+ auto infered_out_shape = infered_shapes[0];
+
+ ASSERT_EQ(infered_out_shape.rank(), 4);
+ ASSERT_EQ(infered_out_shape.asFeature().N, 10);
+ ASSERT_EQ(infered_out_shape.asFeature().H, 2);
+ ASSERT_EQ(infered_out_shape.asFeature().W, 2);
+ ASSERT_EQ(infered_out_shape.asFeature().C, 20);
+}
+
+TEST(ShapeInference, AvgPool2DNodeValid)
+{
+ Shape in_shape{10, 6, 12, 20};
+ Stride stride{3, 7};
+ Padding padding{PaddingType::VALID};
+
+ operation::AvgPool2DNode::Param param{3, 6, stride, padding, Activation::NONE};
+ auto infered_shapes = neurun::shape_inference::inferAvgPoolShape(in_shape, param, Layout::NHWC);
+ auto infered_out_shape = infered_shapes[0];
+
+ ASSERT_EQ(infered_out_shape.rank(), 4);
+ ASSERT_EQ(infered_out_shape.asFeature().N, 10);
+ ASSERT_EQ(infered_out_shape.asFeature().H, 2);
+ ASSERT_EQ(infered_out_shape.asFeature().W, 1);
+ ASSERT_EQ(infered_out_shape.asFeature().C, 20);
+}
+
+TEST(ShapeInference, AvgPool2DNodeExplicit)
+{
+ Shape in_shape{10, 3, 5, 20};
+
+ Stride stride{3, 7};
+ Padding padding{PaddingType::EXPLICIT, {4, 3, 2, 1}};
+
+ operation::AvgPool2DNode::Param param{3, 6, stride, padding, Activation::NONE};
+ auto infered_shapes = neurun::shape_inference::inferAvgPoolShape(in_shape, param, Layout::NHWC);
+ auto infered_out_shape = infered_shapes[0];
+
+ ASSERT_EQ(infered_out_shape.rank(), 4);
+ ASSERT_EQ(infered_out_shape.asFeature().N, 10);
+ ASSERT_EQ(infered_out_shape.asFeature().H, 2);
+ ASSERT_EQ(infered_out_shape.asFeature().W, 1);
+ ASSERT_EQ(infered_out_shape.asFeature().C, 20);
+}