Release 18.08
[platform/upstream/armnn.git] / src / armnn / layers / Pooling2dLayer.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // See LICENSE file in the project root for full license information.
4 //
5 #include "Pooling2dLayer.hpp"
6
7 #include "LayerCloneBase.hpp"
8
9 #include <armnn/TypesUtils.hpp>
10 #include <backends/WorkloadData.hpp>
11 #include <backends/WorkloadFactory.hpp>
12
13 namespace armnn
14 {
15
16 Pooling2dLayer::Pooling2dLayer(const Pooling2dDescriptor& param, const char* name)
17     : LayerWithParameters(1, 1, LayerType::Pooling2d, param, name)
18 {
19 }
20
21 std::unique_ptr<IWorkload> Pooling2dLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const
22 {
23     Pooling2dQueueDescriptor descriptor;
24     return factory.CreatePooling2d(descriptor, PrepInfoAndDesc(descriptor, graph));
25 }
26
27 Pooling2dLayer* Pooling2dLayer::Clone(Graph& graph) const
28 {
29     return CloneBase<Pooling2dLayer>(graph, m_Param, GetName());
30 }
31
32 std::vector<TensorShape> Pooling2dLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
33 {
34     BOOST_ASSERT(inputShapes.size() == 1);
35     const TensorShape& inputShape = inputShapes[0];
36
37     // If we support multiple batch dimensions in the future, then this assert will need to change.
38     BOOST_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Pooling2dLayer will always have 4D input.");
39
40
41     unsigned int inWidth = inputShape[3];
42     unsigned int inHeight = inputShape[2];
43     unsigned int inChannels = inputShape[1];
44     unsigned int inBatchSize = inputShape[0];
45
46     bool isGlobalPooling = (m_Param.m_StrideX==0 && m_Param.m_StrideY==0);
47     unsigned int outWidth = 1;
48     unsigned int outHeight = 1;
49     if (!isGlobalPooling)
50     {
51         BOOST_ASSERT_MSG(m_Param.m_StrideX!=0 && m_Param.m_StrideY!=0,
52                          "Stride can only be zero when performing global pooling");
53
54         auto CalcSize = [](auto inSize, auto lowPad, auto highPad, auto poolSize, auto stride, auto padMethod,
55                            auto outputShapeRounding)
56             {
57                 unsigned int readSize = inSize + lowPad + highPad - poolSize;
58                 float div = static_cast<float>(readSize) / static_cast<float>(stride);
59
60                 unsigned int size = 0;
61                 switch (outputShapeRounding)
62                 {
63                     case OutputShapeRounding::Ceiling:
64                         size = static_cast<unsigned int>(ceil(div)) + 1;
65                         break;
66                     case OutputShapeRounding ::Floor:
67                         size = static_cast<unsigned int>(floor(div)) + 1;
68                         break;
69                     default:
70                         BOOST_ASSERT_MSG(false, "Unsupported Output Shape Rounding");
71                 }
72
73                 // MakeS sure that border operations will start from inside the input and not the padded area.
74                 // This is what both Caffe and CL do...
75                 if ((size - 1)*stride >= inSize + lowPad)
76                 {
77                     --size;
78                 }
79
80                 return size;
81             };
82
83         outWidth = CalcSize(inWidth, m_Param.m_PadLeft, m_Param.m_PadRight, m_Param.m_PoolWidth, m_Param.m_StrideX,
84                             m_Param.m_PaddingMethod, m_Param.m_OutputShapeRounding);
85         outHeight= CalcSize(inHeight, m_Param.m_PadTop, m_Param.m_PadBottom, m_Param.m_PoolHeight, m_Param.m_StrideY,
86                             m_Param.m_PaddingMethod, m_Param.m_OutputShapeRounding);
87     }
88     unsigned int outChannels = inChannels;
89     unsigned int outBatchSize = inBatchSize;
90
91     return std::vector<TensorShape>({ TensorShape({outBatchSize, outChannels, outHeight, outWidth}) });
92 }
93
94 void Pooling2dLayer::ValidateTensorShapesFromInputs()
95 {
96     VerifyLayerConnections(1, CHECK_LOCATION());
97
98     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
99
100     BOOST_ASSERT(inferredShapes.size() == 1);
101
102     ConditionalThrowIfNotEqual<LayerValidationException>(
103         "Pooling2dLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
104         GetOutputSlot(0).GetTensorInfo().GetShape(),
105         inferredShapes[0]);
106 }
107
108 } // namespace armnn