Imported Upstream version 1.18.0
[platform/core/ml/nnfw.git] / compiler / luci-interpreter / src / kernels / FullyConnected.test.cpp
1 /*
2  * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *    http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include "kernels/FullyConnected.h"
18 #include "kernels/TestUtils.h"
19 #include "luci_interpreter/TestMemoryManager.h"
20
21 namespace luci_interpreter
22 {
23 namespace kernels
24 {
25 namespace
26 {
27
28 using namespace testing;
29
30 template <typename T>
31 void Check(std::initializer_list<int32_t> input_shape, std::initializer_list<int32_t> weights_shape,
32            std::initializer_list<int32_t> bias_shape, std::initializer_list<int32_t> output_shape,
33            std::initializer_list<float> input_data, std::initializer_list<float> weights_data,
34            std::initializer_list<float> bias_data, std::initializer_list<float> output_data)
35 {
36   std::unique_ptr<IMemoryManager> memory_manager = std::make_unique<TestMemoryManager>();
37   Tensor input_tensor =
38     makeInputTensor<DataType::FLOAT32>(input_shape, input_data, memory_manager.get());
39   Tensor weights_tensor =
40     makeInputTensor<DataType::FLOAT32>(weights_shape, weights_data, memory_manager.get());
41   Tensor bias_tensor =
42     makeInputTensor<DataType::FLOAT32>(bias_shape, bias_data, memory_manager.get());
43   Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
44
45   FullyConnectedParams params{};
46   params.activation = Activation::RELU;
47
48   FullyConnected kernel(&input_tensor, &weights_tensor, &bias_tensor, &output_tensor, params);
49   kernel.configure();
50   memory_manager->allocate_memory(output_tensor);
51   kernel.execute();
52
53   EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shape));
54   EXPECT_THAT(extractTensorData<T>(output_tensor), FloatArrayNear(output_data));
55 }
56
57 template <>
58 void Check<int8_t>(std::initializer_list<int32_t> input_shape,
59                    std::initializer_list<int32_t> weights_shape,
60                    std::initializer_list<int32_t> bias_shape,
61                    std::initializer_list<int32_t> output_shape,
62                    std::initializer_list<float> input_data,
63                    std::initializer_list<float> weights_data,
64                    std::initializer_list<float> bias_data, std::initializer_list<float> output_data)
65 {
66   std::unique_ptr<IMemoryManager> memory_manager = std::make_unique<TestMemoryManager>();
67   const float quantized_tolerance = getTolerance(-127, 128, 255);
68   std::pair<float, int32_t> input_quant_param = quantizationParams<int8_t>(-63.5, 64);
69   std::pair<float, int32_t> output_quant_param = quantizationParams<int8_t>(-127, 128);
70   Tensor input_tensor =
71     makeInputTensor<DataType::S8>(input_shape, input_quant_param.first, input_quant_param.second,
72                                   input_data, memory_manager.get());
73   Tensor weights_tensor =
74     makeInputTensor<DataType::S8>(weights_shape, input_quant_param.first, input_quant_param.second,
75                                   weights_data, memory_manager.get());
76   Tensor bias_tensor =
77     makeInputTensor<DataType::S32>(bias_shape, input_quant_param.first * input_quant_param.first, 0,
78                                    bias_data, memory_manager.get());
79   Tensor output_tensor =
80     makeOutputTensor(DataType::S8, output_quant_param.first, output_quant_param.second);
81
82   FullyConnectedParams params{};
83   params.activation = Activation::RELU;
84
85   FullyConnected kernel(&input_tensor, &weights_tensor, &bias_tensor, &output_tensor, params);
86   kernel.configure();
87   memory_manager->allocate_memory(output_tensor);
88   kernel.execute();
89
90   EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shape));
91   EXPECT_THAT(dequantizeTensorData(output_tensor),
92               FloatArrayNear(output_data, quantized_tolerance));
93 }
94
95 template <>
96 void Check<uint8_t>(
97   std::initializer_list<int32_t> input_shape, std::initializer_list<int32_t> weights_shape,
98   std::initializer_list<int32_t> bias_shape, std::initializer_list<int32_t> output_shape,
99   std::initializer_list<float> input_data, std::initializer_list<float> weights_data,
100   std::initializer_list<float> bias_data, std::initializer_list<float> output_data)
101 {
102   std::unique_ptr<IMemoryManager> memory_manager = std::make_unique<TestMemoryManager>();
103   const float quantized_tolerance = getTolerance(-127, 128, 255);
104   std::pair<float, int32_t> input_quant_param = quantizationParams<uint8_t>(-63.5, 64);
105   std::pair<float, int32_t> output_quant_param = quantizationParams<uint8_t>(-127, 128);
106   Tensor input_tensor =
107     makeInputTensor<DataType::U8>(input_shape, input_quant_param.first, input_quant_param.second,
108                                   input_data, memory_manager.get());
109   Tensor weights_tensor =
110     makeInputTensor<DataType::U8>(weights_shape, input_quant_param.first, input_quant_param.second,
111                                   weights_data, memory_manager.get());
112   Tensor bias_tensor =
113     makeInputTensor<DataType::S32>(bias_shape, input_quant_param.first * input_quant_param.first, 0,
114                                    bias_data, memory_manager.get());
115   Tensor output_tensor =
116     makeOutputTensor(DataType::U8, output_quant_param.first, output_quant_param.second);
117
118   FullyConnectedParams params{};
119   params.activation = Activation::RELU;
120
121   FullyConnected kernel(&input_tensor, &weights_tensor, &bias_tensor, &output_tensor, params);
122   kernel.configure();
123   memory_manager->allocate_memory(output_tensor);
124   kernel.execute();
125
126   EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shape));
127   EXPECT_THAT(dequantizeTensorData(output_tensor),
128               FloatArrayNear(output_data, quantized_tolerance));
129 }
130
131 template <typename T> class FullyConnectedTest : public ::testing::Test
132 {
133 };
134
135 using DataTypes = ::testing::Types<float, uint8_t, int8_t>;
136 TYPED_TEST_CASE(FullyConnectedTest, DataTypes);
137
138 TYPED_TEST(FullyConnectedTest, Simple)
139 {
140   Check<TypeParam>({3, 2, 2, 1}, {3, 6}, {3}, {2, 3},
141                    {
142                      -3, -5, 5, 4, 9, -2,  // batch = 0
143                      -3, -2, -4, 9, -8, 1, // batch = 1
144                    },
145                    {
146                      -3, -7, 4, -4, -6, 4, // unit = 0
147                      3, 5, 2, 3, -3, -8,   // unit = 1
148                      -3, 7, 4, 9, 0, -5,   // unit = 2
149                    },
150                    {-1, -5, -8},
151                    {
152                      0, 0, 32,   // batch = 0
153                      22, 11, 47, // batch = 1
154                    });
155 }
156
157 TEST(FullyConnectedTest, InvalidBiasType_NEG)
158 {
159   Shape input_shape{3, 2, 2, 1};
160   std::vector<float> input_data{
161     -3, -5, 5,  4, 9,  -2, // batch = 0
162     -3, -2, -4, 9, -8, 1,  // batch = 1
163   };
164   Shape weights_shape{3, 6};
165   std::vector<float> weights_data{
166     -3, -7, 4, -4, -6, 4,  // unit = 0
167     3,  5,  2, 3,  -3, -8, // unit = 1
168     -3, 7,  4, 9,  0,  -5, // unit = 2
169   };
170   Shape bias_shape{3};
171   std::vector<int32_t> bias_data{-1, -5, -8};
172
173   std::unique_ptr<IMemoryManager> memory_manager = std::make_unique<TestMemoryManager>();
174
175   Tensor input_tensor =
176     makeInputTensor<DataType::FLOAT32>(input_shape, input_data, memory_manager.get());
177   Tensor weights_tensor =
178     makeInputTensor<DataType::FLOAT32>(weights_shape, weights_data, memory_manager.get());
179   Tensor bias_tensor = makeInputTensor<DataType::S32>(bias_shape, bias_data, memory_manager.get());
180   Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
181
182   FullyConnectedParams params{};
183   params.activation = Activation::RELU;
184
185   FullyConnected kernel(&input_tensor, &weights_tensor, &bias_tensor, &output_tensor, params);
186   EXPECT_ANY_THROW(kernel.configure());
187 }
188
189 TEST(FullyConnectedTest, InvalidWeightShapeDim_NEG)
190 {
191   Shape input_shape{3, 2, 2, 1};
192   std::vector<float> input_data{
193     -3, -5, 5,  4, 9,  -2, // batch = 0
194     -3, -2, -4, 9, -8, 1,  // batch = 1
195   };
196   Shape weights_shape{1, 3, 6};
197   std::vector<float> weights_data{
198     -3, -7, 4, -4, -6, 4,  // unit = 0
199     3,  5,  2, 3,  -3, -8, // unit = 1
200     -3, 7,  4, 9,  0,  -5, // unit = 2
201   };
202   Shape bias_shape{3};
203   std::vector<float> bias_data{-1, -5, -8};
204
205   std::unique_ptr<IMemoryManager> memory_manager = std::make_unique<TestMemoryManager>();
206
207   Tensor input_tensor =
208     makeInputTensor<DataType::FLOAT32>(input_shape, input_data, memory_manager.get());
209   Tensor weights_tensor =
210     makeInputTensor<DataType::FLOAT32>(weights_shape, weights_data, memory_manager.get());
211   Tensor bias_tensor =
212     makeInputTensor<DataType::FLOAT32>(bias_shape, bias_data, memory_manager.get());
213   Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
214
215   FullyConnectedParams params{};
216   params.activation = Activation::RELU;
217
218   FullyConnected kernel(&input_tensor, &weights_tensor, &bias_tensor, &output_tensor, params);
219   EXPECT_ANY_THROW(kernel.configure());
220 }
221
222 TEST(FullyConnectedTest, BiasElementNumWeightDimMismatch_NEG)
223 {
224   Shape input_shape{3, 2, 2, 1};
225   std::vector<float> input_data{
226     -3, -5, 5,  4, 9,  -2, // batch = 0
227     -3, -2, -4, 9, -8, 1,  // batch = 1
228   };
229   Shape weights_shape{6, 3};
230   std::vector<float> weights_data{
231     -3, -7, 4,  // unit = 0
232     -4, -6, 4,  // unit = 1
233     3,  5,  2,  // unit = 2
234     3,  -3, -8, // unit = 3
235     -3, 7,  4,  // unit = 4
236     9,  0,  -5, // unit = 5
237   };
238   Shape bias_shape{3};
239   std::vector<float> bias_data{-1, -5, -8};
240
241   std::unique_ptr<IMemoryManager> memory_manager = std::make_unique<TestMemoryManager>();
242
243   Tensor input_tensor =
244     makeInputTensor<DataType::FLOAT32>(input_shape, input_data, memory_manager.get());
245   Tensor weights_tensor =
246     makeInputTensor<DataType::FLOAT32>(weights_shape, weights_data, memory_manager.get());
247   Tensor bias_tensor =
248     makeInputTensor<DataType::FLOAT32>(bias_shape, bias_data, memory_manager.get());
249   Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
250
251   FullyConnectedParams params{};
252   params.activation = Activation::RELU;
253
254   FullyConnected kernel(&input_tensor, &weights_tensor, &bias_tensor, &output_tensor, params);
255   EXPECT_ANY_THROW(kernel.configure());
256 }
257
258 } // namespace
259 } // namespace kernels
260 } // namespace luci_interpreter