IVGCVSW-1946: Remove armnn/src from the include paths
[platform/upstream/armnn.git] / src / armnn / layers / SpaceToBatchNdLayer.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "SpaceToBatchNdLayer.hpp"
7
8 #include "LayerCloneBase.hpp"
9
10 #include <armnn/TypesUtils.hpp>
11
12 #include <backendsCommon/WorkloadData.hpp>
13 #include <backendsCommon/WorkloadFactory.hpp>
14
15 #include <numeric>
16
17 namespace armnn
18 {
19
20 SpaceToBatchNdLayer::SpaceToBatchNdLayer(const SpaceToBatchNdDescriptor param, const char* name)
21     : LayerWithParameters(1, 1, LayerType::SpaceToBatchNd, param, name)
22 {}
23
24 std::unique_ptr<IWorkload> SpaceToBatchNdLayer::CreateWorkload(const Graph& graph,
25                                                                const IWorkloadFactory& factory) const
26 {
27     SpaceToBatchNdQueueDescriptor descriptor;
28     descriptor.m_Parameters.m_BlockShape = m_Param.m_BlockShape;
29     descriptor.m_Parameters.m_PadList = m_Param.m_PadList;
30
31     return factory.CreateSpaceToBatchNd(descriptor, PrepInfoAndDesc(descriptor, graph));
32 }
33
34 SpaceToBatchNdLayer* SpaceToBatchNdLayer::Clone(Graph& graph) const
35 {
36     return CloneBase<SpaceToBatchNdLayer>(graph, m_Param, GetName());
37 }
38
39 std::vector<TensorShape> SpaceToBatchNdLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
40 {
41     BOOST_ASSERT(inputShapes.size() == 1);
42
43     TensorShape inputShape = inputShapes[0];
44     TensorShape outputShape(inputShape);
45
46     outputShape[0] = inputShape[0] * std::accumulate(m_Param.m_BlockShape.begin(),
47                                                      m_Param.m_BlockShape.end(),
48                                                      1U,
49                                                      std::multiplies<>());
50
51     unsigned int heightIndex = m_Param.m_DataLayout.GetHeightIndex();
52     unsigned int widthIndex = m_Param.m_DataLayout.GetWidthIndex();
53
54     std::pair<unsigned int, unsigned int> heightPad = m_Param.m_PadList[0];
55     std::pair<unsigned int, unsigned int> widthPad = m_Param.m_PadList[1];
56
57     outputShape[heightIndex] =
58         (inputShape[heightIndex] + heightPad.first + heightPad.second) / m_Param.m_BlockShape[0];
59     outputShape[widthIndex] =
60         (inputShape[widthIndex] + widthPad.first + widthPad.second) / m_Param.m_BlockShape[1];
61
62     return std::vector<TensorShape>({ outputShape });
63 }
64
65 void SpaceToBatchNdLayer::ValidateTensorShapesFromInputs()
66 {
67     VerifyLayerConnections(1, CHECK_LOCATION());
68
69     std::vector<TensorShape> inferredShapes = InferOutputShapes({
70         GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
71
72     BOOST_ASSERT(inferredShapes.size() == 1);
73
74     ConditionalThrowIfNotEqual<LayerValidationException>(
75         "SpaceToBatchNdLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
76         GetOutputSlot(0).GetTensorInfo().GetShape(),
77         inferredShapes[0]);
78 }
79
80 } // namespace