IVGCVSW-1946: Remove armnn/src from the include paths
[platform/upstream/armnn.git] / src / armnn / layers / Pooling2dLayer.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #include "Pooling2dLayer.hpp"
6
7 #include "LayerCloneBase.hpp"
8
9 #include <armnn/TypesUtils.hpp>
10 #include <backendsCommon/WorkloadData.hpp>
11 #include <backendsCommon/WorkloadFactory.hpp>
12
13 namespace armnn
14 {
15
16 Pooling2dLayer::Pooling2dLayer(const Pooling2dDescriptor& param, const char* name)
17     : LayerWithParameters(1, 1, LayerType::Pooling2d, param, name)
18 {
19 }
20
21 std::unique_ptr<IWorkload> Pooling2dLayer::CreateWorkload(const Graph& graph, const IWorkloadFactory& factory) const
22 {
23     Pooling2dQueueDescriptor descriptor;
24     return factory.CreatePooling2d(descriptor, PrepInfoAndDesc(descriptor, graph));
25 }
26
27 Pooling2dLayer* Pooling2dLayer::Clone(Graph& graph) const
28 {
29     return CloneBase<Pooling2dLayer>(graph, m_Param, GetName());
30 }
31
32 std::vector<TensorShape> Pooling2dLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
33 {
34     BOOST_ASSERT(inputShapes.size() == 1);
35     const TensorShape& inputShape = inputShapes[0];
36
37     // If we support multiple batch dimensions in the future, then this assert will need to change.
38     BOOST_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Pooling2dLayer will always have 4D input.");
39
40     unsigned int inWidth = inputShape[m_Param.m_DataLayout.GetWidthIndex()];
41     unsigned int inHeight = inputShape[m_Param.m_DataLayout.GetHeightIndex()];
42     unsigned int inChannels = inputShape[m_Param.m_DataLayout.GetChannelsIndex()];
43     unsigned int inBatchSize = inputShape[0];
44
45     bool isGlobalPooling = (m_Param.m_StrideX==0 && m_Param.m_StrideY==0);
46     unsigned int outWidth = 1;
47     unsigned int outHeight = 1;
48     if (!isGlobalPooling)
49     {
50         BOOST_ASSERT_MSG(m_Param.m_StrideX!=0 && m_Param.m_StrideY!=0,
51                          "Stride can only be zero when performing global pooling");
52
53         auto CalcSize = [](auto inSize, auto lowPad, auto highPad, auto poolSize, auto stride, auto padMethod,
54                            auto outputShapeRounding)
55             {
56                 unsigned int readSize = inSize + lowPad + highPad - poolSize;
57                 float div = static_cast<float>(readSize) / static_cast<float>(stride);
58
59                 unsigned int size = 0;
60                 switch (outputShapeRounding)
61                 {
62                     case OutputShapeRounding::Ceiling:
63                         size = static_cast<unsigned int>(ceil(div)) + 1;
64                         break;
65                     case OutputShapeRounding ::Floor:
66                         size = static_cast<unsigned int>(floor(div)) + 1;
67                         break;
68                     default:
69                         BOOST_ASSERT_MSG(false, "Unsupported Output Shape Rounding");
70                 }
71
72                 // MakeS sure that border operations will start from inside the input and not the padded area.
73                 // This is what both Caffe and CL do...
74                 if ((size - 1)*stride >= inSize + lowPad)
75                 {
76                     --size;
77                 }
78
79                 return size;
80             };
81
82         outWidth = CalcSize(inWidth, m_Param.m_PadLeft, m_Param.m_PadRight, m_Param.m_PoolWidth, m_Param.m_StrideX,
83                             m_Param.m_PaddingMethod, m_Param.m_OutputShapeRounding);
84         outHeight= CalcSize(inHeight, m_Param.m_PadTop, m_Param.m_PadBottom, m_Param.m_PoolHeight, m_Param.m_StrideY,
85                             m_Param.m_PaddingMethod, m_Param.m_OutputShapeRounding);
86     }
87     unsigned int outChannels = inChannels;
88     unsigned int outBatchSize = inBatchSize;
89
90     TensorShape tensorShape = m_Param.m_DataLayout == armnn::DataLayout::NHWC ?
91         TensorShape( { outBatchSize, outHeight, outWidth, outChannels } ) :
92         TensorShape( { outBatchSize, outChannels, outHeight, outWidth });
93
94     return std::vector<TensorShape>({ tensorShape });
95 }
96
97 void Pooling2dLayer::ValidateTensorShapesFromInputs()
98 {
99     VerifyLayerConnections(1, CHECK_LOCATION());
100
101     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
102
103     BOOST_ASSERT(inferredShapes.size() == 1);
104
105     ConditionalThrowIfNotEqual<LayerValidationException>(
106         "Pooling2dLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
107         GetOutputSlot(0).GetTensorInfo().GetShape(),
108         inferredShapes[0]);
109 }
110
111 } // namespace armnn