arm_compute v17.10
[platform/upstream/armcl.git] / tests / validation / fixtures / ConvolutionLayerFixture.h
1 /*
2  * Copyright (c) 2017 ARM Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #ifndef ARM_COMPUTE_TEST_CONVOLUTION_LAYER_FIXTURE
25 #define ARM_COMPUTE_TEST_CONVOLUTION_LAYER_FIXTURE
26
27 #include "arm_compute/core/TensorShape.h"
28 #include "arm_compute/core/Types.h"
29 #include "arm_compute/runtime/NEON/NEScheduler.h"
30 #include "tests/AssetsLibrary.h"
31 #include "tests/Globals.h"
32 #include "tests/IAccessor.h"
33 #include "tests/framework/Asserts.h"
34 #include "tests/framework/Fixture.h"
35 #include "tests/validation/CPP/ConvolutionLayer.h"
36 #include "tests/validation/CPP/Utils.h"
37 #include "tests/validation/Helpers.h"
38
39 #include <random>
40
41 namespace arm_compute
42 {
43 class NEConvolutionLayer;
44
45 namespace test
46 {
47 namespace validation
48 {
49 template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
50 class ConvolutionValidationFixedPointFixture : public framework::Fixture
51 {
52 public:
53     template <typename...>
54     void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, bool reshape_weights, DataType data_type, int fractional_bits)
55     {
56         _fractional_bits = fractional_bits;
57         _data_type       = data_type;
58
59         _target    = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights, data_type, fractional_bits);
60         _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, data_type, fractional_bits);
61     }
62
63 protected:
64     template <typename U>
65     void fill(U &&tensor, int i)
66     {
67         switch(tensor.data_type())
68         {
69             case DataType::F16:
70             case DataType::F32:
71             {
72                 std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
73                 library->fill(tensor, distribution, i);
74                 break;
75             }
76             default:
77                 library->fill_tensor_uniform(tensor, i);
78         }
79     }
80
81     TensorType compute_target(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
82                               bool reshape_weights, DataType data_type, int fixed_point_position)
83     {
84         WeightsInfo weights_info(!reshape_weights, weights_shape.x(), weights_shape.y(), weights_shape[3]);
85         TensorShape reshaped_weights_shape(weights_shape);
86
87         if(!reshape_weights)
88         {
89             // Check if its a "fully connected" convolution
90             const bool is_fully_connected_convolution = (output_shape.x() == 1 && output_shape.y() == 1);
91             bool       is_optimised                   = false;
92 #if defined(__arm__)
93             is_optimised = std::is_same<FunctionType, NEConvolutionLayer>::value && NEScheduler::get().cpu_info().CPU == CPUTarget::ARMV7 && data_type == DataType::F32;
94 #elif defined(__aarch64__)
95             is_optimised = std::is_same<FunctionType, NEConvolutionLayer>::value && NEScheduler::get().cpu_info().CPU >= CPUTarget::ARMV8 && data_type == DataType::F32;
96 #endif /* defined(__arm__) || defined(__aarch64__) */
97
98             reshaped_weights_shape.collapse(3);
99
100             if(bias_shape.total_size() > 0)
101             {
102                 reshaped_weights_shape.set(0, reshaped_weights_shape.x() + 1);
103             }
104
105             if(is_fully_connected_convolution || is_optimised)
106             {
107                 const size_t shape_x = reshaped_weights_shape.x();
108                 reshaped_weights_shape.set(0, reshaped_weights_shape.y());
109                 reshaped_weights_shape.set(1, shape_x);
110             }
111             else
112             {
113                 const int interleave_width = 16 / data_size_from_type(data_type);
114                 reshaped_weights_shape.set(0, reshaped_weights_shape.x() * interleave_width);
115                 reshaped_weights_shape.set(1, static_cast<unsigned int>(std::ceil(reshaped_weights_shape.y() / static_cast<float>(interleave_width))));
116             }
117         }
118
119         // Create tensors
120         TensorType src     = create_tensor<TensorType>(input_shape, data_type, 1, fixed_point_position);
121         TensorType weights = create_tensor<TensorType>(reshaped_weights_shape, data_type, 1, fixed_point_position);
122         TensorType bias    = create_tensor<TensorType>(bias_shape, data_type, 1, fixed_point_position);
123         TensorType dst     = create_tensor<TensorType>(output_shape, data_type, 1, fixed_point_position);
124
125         // Create and configure function
126         FunctionType conv;
127         conv.configure(&src, &weights, &bias, &dst, info, weights_info);
128
129         ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
130         ARM_COMPUTE_EXPECT(weights.info()->is_resizable(), framework::LogLevel::ERRORS);
131         ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
132         ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
133
134         // Allocate tensors
135         src.allocator()->allocate();
136         weights.allocator()->allocate();
137         bias.allocator()->allocate();
138         dst.allocator()->allocate();
139
140         ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
141         ARM_COMPUTE_EXPECT(!weights.info()->is_resizable(), framework::LogLevel::ERRORS);
142         ARM_COMPUTE_EXPECT(!bias.info()->is_resizable(), framework::LogLevel::ERRORS);
143         ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
144
145         // Fill tensors
146         fill(AccessorType(src), 0);
147
148         if(!reshape_weights)
149         {
150             const bool is_fully_connected_convolution = (output_shape.x() == 1 && output_shape.y() == 1);
151             bool       is_optimised                   = false;
152 #if defined(__arm__)
153             is_optimised = std::is_same<FunctionType, NEConvolutionLayer>::value && NEScheduler::get().cpu_info().CPU == CPUTarget::ARMV7 && data_type == DataType::F32;
154 #elif defined(__aarch64__)
155             is_optimised = std::is_same<FunctionType, NEConvolutionLayer>::value && NEScheduler::get().cpu_info().CPU >= CPUTarget::ARMV8 && data_type == DataType::F32;
156 #endif /* defined(__arm__) || defined(__aarch64__) */
157
158             TensorShape     tmp_weights_shape(weights_shape);
159             SimpleTensor<T> tmp_weights(tmp_weights_shape, data_type, 1, fixed_point_position);
160             SimpleTensor<T> tmp_bias(bias_shape, data_type, 1, fixed_point_position);
161
162             // Fill with original shape
163             fill(tmp_weights, 1);
164             fill(tmp_bias, 2);
165
166             tmp_weights = linearise_weights(tmp_weights, &tmp_bias);
167
168             if(!is_fully_connected_convolution && !is_optimised)
169             {
170                 // Transpose with interleave
171                 const int interleave_size = 16 / tmp_weights.element_size();
172                 tmp_weights               = transpose(std::move(tmp_weights), interleave_size);
173             }
174
175             AccessorType weights_accessor(weights);
176
177             for(int i = 0; i < tmp_weights.num_elements(); ++i)
178             {
179                 Coordinates coord = index2coord(tmp_weights.shape(), i);
180                 std::copy_n(static_cast<const T *>(tmp_weights(coord)), 1, static_cast<T *>(weights_accessor(coord)));
181             }
182         }
183         else
184         {
185             fill(AccessorType(weights), 1);
186             fill(AccessorType(bias), 2);
187         }
188
189         // Compute NEConvolutionLayer function
190         conv.run();
191
192         return dst;
193     }
194
195     SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, const PadStrideInfo &info,
196                                       DataType data_type, int fixed_point_position)
197     {
198         // Create reference
199         SimpleTensor<T> src{ input_shape, data_type, 1, fixed_point_position };
200         SimpleTensor<T> weights{ weights_shape, data_type, 1, fixed_point_position };
201         SimpleTensor<T> bias{ bias_shape, data_type, 1, fixed_point_position };
202
203         // Fill reference
204         fill(src, 0);
205         fill(weights, 1);
206         fill(bias, 2);
207
208         return reference::convolution_layer<T>(src, weights, bias, output_shape, info);
209     }
210
211     TensorType      _target{};
212     SimpleTensor<T> _reference{};
213     int             _fractional_bits{};
214     DataType        _data_type{};
215
216 private:
217     template <typename U>
218     SimpleTensor<U> linearise_weights(const SimpleTensor<U> &weights, const SimpleTensor<U> *biases = nullptr)
219     {
220         TensorShape dst_shape(weights.shape());
221         dst_shape.collapse(3);
222
223         if(biases != nullptr)
224         {
225             dst_shape.set(0, dst_shape.x() + 1);
226         }
227
228         const size_t shape_x = dst_shape.x();
229         dst_shape.set(0, dst_shape.y());
230         dst_shape.set(1, shape_x);
231
232         SimpleTensor<U> dst(dst_shape, weights.data_type());
233
234         // Don't iterate over biases yet
235         for(int weights_idx = 0; weights_idx < weights.num_elements(); ++weights_idx)
236         {
237             Coordinates weights_coord = index2coord(weights.shape(), weights_idx);
238             const int   dst_row       = weights_idx % weights.shape().total_size_lower(3);
239             Coordinates dst_coord{ weights_coord[3], dst_row, weights_coord[4] };
240             const int   dst_idx = coord2index(dst.shape(), dst_coord);
241
242             dst[dst_idx] = weights[weights_idx];
243         }
244
245         if(biases != nullptr)
246         {
247             // Fill last row with biases
248             for(int bias_idx = 0; bias_idx < biases->num_elements(); ++bias_idx)
249             {
250                 Coordinates bias_coord = index2coord(biases->shape(), bias_idx);
251                 Coordinates dst_coord{ bias_coord.x(), static_cast<int>(dst.shape().y()) - 1, bias_coord.y() };
252                 int         dst_idx = coord2index(dst.shape(), dst_coord);
253
254                 dst[dst_idx] = (*biases)[bias_idx];
255             }
256         }
257
258         return dst;
259     }
260 };
261
262 template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
263 class ConvolutionValidationFixture : public ConvolutionValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>
264 {
265 public:
266     template <typename...>
267     void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, bool reshape_weights, DataType data_type)
268     {
269         ConvolutionValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(input_shape, weights_shape, bias_shape, output_shape, info, reshape_weights, data_type, 0);
270     }
271 };
272 } // namespace validation
273 } // namespace test
274 } // namespace arm_compute
275 #endif /* ARM_COMPUTE_TEST_CONVOLUTION_LAYER_FIXTURE */