feda0d83d8a281e8db42ab1d824d6f4870c4c418
[platform/core/ml/nnfw.git] / onert-micro / luci-interpreter / src / kernels / FullyConnected.test.cpp
1 /*
2  * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3  * Copyright 2019 The TensorFlow Authors. All Rights Reserved.
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  *    http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 // TODO enable it
18 #if 0
19 #include "kernels/FullyConnected.h"
20 #include "kernels/TestUtils.h"
21 #include "luci_interpreter/TestMemoryManager.h"
22
23 namespace luci_interpreter
24 {
25 namespace kernels
26 {
27 namespace
28 {
29
30 using namespace testing;
31
32 template <typename T>
33 void Check(std::initializer_list<int32_t> input_shape, std::initializer_list<int32_t> weights_shape,
34            std::initializer_list<int32_t> bias_shape, std::initializer_list<int32_t> output_shape,
35            std::initializer_list<float> input_data, std::initializer_list<float> weights_data,
36            std::initializer_list<float> bias_data, std::initializer_list<float> output_data)
37 {
38   std::unique_ptr<IMemoryManager> memory_manager = std::make_unique<TestMemoryManager>();
39   Tensor input_tensor =
40     makeInputTensor<DataType::FLOAT32>(input_shape, input_data, memory_manager.get());
41   Tensor weights_tensor =
42     makeInputTensor<DataType::FLOAT32>(weights_shape, weights_data, memory_manager.get());
43   Tensor bias_tensor =
44     makeInputTensor<DataType::FLOAT32>(bias_shape, bias_data, memory_manager.get());
45   Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
46
47   FullyConnectedParams params{};
48   params.activation = Activation::RELU;
49
50   FullyConnected kernel(&input_tensor, &weights_tensor, &bias_tensor, &output_tensor, params);
51   kernel.configure();
52   memory_manager->allocate_memory(output_tensor);
53   kernel.execute();
54
55   EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shape));
56   EXPECT_THAT(extractTensorData<T>(output_tensor), FloatArrayNear(output_data));
57 }
58
59 template <>
60 void Check<int8_t>(std::initializer_list<int32_t> input_shape,
61                    std::initializer_list<int32_t> weights_shape,
62                    std::initializer_list<int32_t> bias_shape,
63                    std::initializer_list<int32_t> output_shape,
64                    std::initializer_list<float> input_data,
65                    std::initializer_list<float> weights_data,
66                    std::initializer_list<float> bias_data, std::initializer_list<float> output_data)
67 {
68   std::unique_ptr<IMemoryManager> memory_manager = std::make_unique<TestMemoryManager>();
69   const float quantized_tolerance = getTolerance(-127, 128, 255);
70   std::pair<float, int32_t> input_quant_param = quantizationParams<int8_t>(-63.5, 64);
71   std::pair<float, int32_t> output_quant_param = quantizationParams<int8_t>(-127, 128);
72   Tensor input_tensor =
73     makeInputTensor<DataType::S8>(input_shape, input_quant_param.first, input_quant_param.second,
74                                   input_data, memory_manager.get());
75   Tensor weights_tensor =
76     makeInputTensor<DataType::S8>(weights_shape, input_quant_param.first, input_quant_param.second,
77                                   weights_data, memory_manager.get());
78   Tensor bias_tensor =
79     makeInputTensor<DataType::S32>(bias_shape, input_quant_param.first * input_quant_param.first, 0,
80                                    bias_data, memory_manager.get());
81   Tensor output_tensor =
82     makeOutputTensor(DataType::S8, output_quant_param.first, output_quant_param.second);
83
84   FullyConnectedParams params{};
85   params.activation = Activation::RELU;
86
87   FullyConnected kernel(&input_tensor, &weights_tensor, &bias_tensor, &output_tensor, params);
88   kernel.configure();
89   memory_manager->allocate_memory(output_tensor);
90   kernel.execute();
91
92   EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shape));
93   EXPECT_THAT(dequantizeTensorData(output_tensor),
94               FloatArrayNear(output_data, quantized_tolerance));
95 }
96
97 template <>
98 void Check<uint8_t>(
99   std::initializer_list<int32_t> input_shape, std::initializer_list<int32_t> weights_shape,
100   std::initializer_list<int32_t> bias_shape, std::initializer_list<int32_t> output_shape,
101   std::initializer_list<float> input_data, std::initializer_list<float> weights_data,
102   std::initializer_list<float> bias_data, std::initializer_list<float> output_data)
103 {
104   std::unique_ptr<IMemoryManager> memory_manager = std::make_unique<TestMemoryManager>();
105   const float quantized_tolerance = getTolerance(-127, 128, 255);
106   std::pair<float, int32_t> input_quant_param = quantizationParams<uint8_t>(-63.5, 64);
107   std::pair<float, int32_t> output_quant_param = quantizationParams<uint8_t>(-127, 128);
108   Tensor input_tensor =
109     makeInputTensor<DataType::U8>(input_shape, input_quant_param.first, input_quant_param.second,
110                                   input_data, memory_manager.get());
111   Tensor weights_tensor =
112     makeInputTensor<DataType::U8>(weights_shape, input_quant_param.first, input_quant_param.second,
113                                   weights_data, memory_manager.get());
114   Tensor bias_tensor =
115     makeInputTensor<DataType::S32>(bias_shape, input_quant_param.first * input_quant_param.first, 0,
116                                    bias_data, memory_manager.get());
117   Tensor output_tensor =
118     makeOutputTensor(DataType::U8, output_quant_param.first, output_quant_param.second);
119
120   FullyConnectedParams params{};
121   params.activation = Activation::RELU;
122
123   FullyConnected kernel(&input_tensor, &weights_tensor, &bias_tensor, &output_tensor, params);
124   kernel.configure();
125   memory_manager->allocate_memory(output_tensor);
126   kernel.execute();
127
128   EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shape));
129   EXPECT_THAT(dequantizeTensorData(output_tensor),
130               FloatArrayNear(output_data, quantized_tolerance));
131 }
132
133 template <typename T> class FullyConnectedTest : public ::testing::Test
134 {
135 };
136
137 using DataTypes = ::testing::Types<float, uint8_t, int8_t>;
138 TYPED_TEST_SUITE(FullyConnectedTest, DataTypes);
139
140 TYPED_TEST(FullyConnectedTest, Simple)
141 {
142   Check<TypeParam>({3, 2, 2, 1}, {3, 6}, {3}, {2, 3},
143                    {
144                      -3, -5, 5, 4, 9, -2,  // batch = 0
145                      -3, -2, -4, 9, -8, 1, // batch = 1
146                    },
147                    {
148                      -3, -7, 4, -4, -6, 4, // unit = 0
149                      3, 5, 2, 3, -3, -8,   // unit = 1
150                      -3, 7, 4, 9, 0, -5,   // unit = 2
151                    },
152                    {-1, -5, -8},
153                    {
154                      0, 0, 32,   // batch = 0
155                      22, 11, 47, // batch = 1
156                    });
157 }
158
159 TEST(FullyConnectedTest, InvalidBiasType_NEG)
160 {
161   Shape input_shape{3, 2, 2, 1};
162   std::vector<float> input_data{
163     -3, -5, 5,  4, 9,  -2, // batch = 0
164     -3, -2, -4, 9, -8, 1,  // batch = 1
165   };
166   Shape weights_shape{3, 6};
167   std::vector<float> weights_data{
168     -3, -7, 4, -4, -6, 4,  // unit = 0
169     3,  5,  2, 3,  -3, -8, // unit = 1
170     -3, 7,  4, 9,  0,  -5, // unit = 2
171   };
172   Shape bias_shape{3};
173   std::vector<int32_t> bias_data{-1, -5, -8};
174
175   std::unique_ptr<IMemoryManager> memory_manager = std::make_unique<TestMemoryManager>();
176
177   Tensor input_tensor =
178     makeInputTensor<DataType::FLOAT32>(input_shape, input_data, memory_manager.get());
179   Tensor weights_tensor =
180     makeInputTensor<DataType::FLOAT32>(weights_shape, weights_data, memory_manager.get());
181   Tensor bias_tensor = makeInputTensor<DataType::S32>(bias_shape, bias_data, memory_manager.get());
182   Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
183
184   FullyConnectedParams params{};
185   params.activation = Activation::RELU;
186
187   FullyConnected kernel(&input_tensor, &weights_tensor, &bias_tensor, &output_tensor, params);
188   EXPECT_ANY_THROW(kernel.configure());
189 }
190
191 TEST(FullyConnectedTest, InvalidWeightShapeDim_NEG)
192 {
193   Shape input_shape{3, 2, 2, 1};
194   std::vector<float> input_data{
195     -3, -5, 5,  4, 9,  -2, // batch = 0
196     -3, -2, -4, 9, -8, 1,  // batch = 1
197   };
198   Shape weights_shape{1, 3, 6};
199   std::vector<float> weights_data{
200     -3, -7, 4, -4, -6, 4,  // unit = 0
201     3,  5,  2, 3,  -3, -8, // unit = 1
202     -3, 7,  4, 9,  0,  -5, // unit = 2
203   };
204   Shape bias_shape{3};
205   std::vector<float> bias_data{-1, -5, -8};
206
207   std::unique_ptr<IMemoryManager> memory_manager = std::make_unique<TestMemoryManager>();
208
209   Tensor input_tensor =
210     makeInputTensor<DataType::FLOAT32>(input_shape, input_data, memory_manager.get());
211   Tensor weights_tensor =
212     makeInputTensor<DataType::FLOAT32>(weights_shape, weights_data, memory_manager.get());
213   Tensor bias_tensor =
214     makeInputTensor<DataType::FLOAT32>(bias_shape, bias_data, memory_manager.get());
215   Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
216
217   FullyConnectedParams params{};
218   params.activation = Activation::RELU;
219
220   FullyConnected kernel(&input_tensor, &weights_tensor, &bias_tensor, &output_tensor, params);
221   EXPECT_ANY_THROW(kernel.configure());
222 }
223
224 TEST(FullyConnectedTest, BiasElementNumWeightDimMismatch_NEG)
225 {
226   Shape input_shape{3, 2, 2, 1};
227   std::vector<float> input_data{
228     -3, -5, 5,  4, 9,  -2, // batch = 0
229     -3, -2, -4, 9, -8, 1,  // batch = 1
230   };
231   Shape weights_shape{6, 3};
232   std::vector<float> weights_data{
233     -3, -7, 4,  // unit = 0
234     -4, -6, 4,  // unit = 1
235     3,  5,  2,  // unit = 2
236     3,  -3, -8, // unit = 3
237     -3, 7,  4,  // unit = 4
238     9,  0,  -5, // unit = 5
239   };
240   Shape bias_shape{3};
241   std::vector<float> bias_data{-1, -5, -8};
242
243   std::unique_ptr<IMemoryManager> memory_manager = std::make_unique<TestMemoryManager>();
244
245   Tensor input_tensor =
246     makeInputTensor<DataType::FLOAT32>(input_shape, input_data, memory_manager.get());
247   Tensor weights_tensor =
248     makeInputTensor<DataType::FLOAT32>(weights_shape, weights_data, memory_manager.get());
249   Tensor bias_tensor =
250     makeInputTensor<DataType::FLOAT32>(bias_shape, bias_data, memory_manager.get());
251   Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
252
253   FullyConnectedParams params{};
254   params.activation = Activation::RELU;
255
256   FullyConnected kernel(&input_tensor, &weights_tensor, &bias_tensor, &output_tensor, params);
257   EXPECT_ANY_THROW(kernel.configure());
258 }
259
260 } // namespace
261 } // namespace kernels
262 } // namespace luci_interpreter
263 #endif