2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "kernels/FullyConnected.h"
18 #include "kernels/TestUtils.h"
19 #include "luci_interpreter/TestMemoryManager.h"
21 namespace luci_interpreter
28 using namespace testing;
31 void Check(std::initializer_list<int32_t> input_shape, std::initializer_list<int32_t> weights_shape,
32 std::initializer_list<int32_t> bias_shape, std::initializer_list<int32_t> output_shape,
33 std::initializer_list<float> input_data, std::initializer_list<float> weights_data,
34 std::initializer_list<float> bias_data, std::initializer_list<float> output_data)
36 std::unique_ptr<IMemoryManager> memory_manager = std::make_unique<TestMemoryManager>();
38 makeInputTensor<DataType::FLOAT32>(input_shape, input_data, memory_manager.get());
39 Tensor weights_tensor =
40 makeInputTensor<DataType::FLOAT32>(weights_shape, weights_data, memory_manager.get());
42 makeInputTensor<DataType::FLOAT32>(bias_shape, bias_data, memory_manager.get());
43 Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
45 FullyConnectedParams params{};
46 params.activation = Activation::RELU;
48 FullyConnected kernel(&input_tensor, &weights_tensor, &bias_tensor, &output_tensor, params);
50 memory_manager->allocate_memory(output_tensor);
53 EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shape));
54 EXPECT_THAT(extractTensorData<T>(output_tensor), FloatArrayNear(output_data));
58 void Check<int8_t>(std::initializer_list<int32_t> input_shape,
59 std::initializer_list<int32_t> weights_shape,
60 std::initializer_list<int32_t> bias_shape,
61 std::initializer_list<int32_t> output_shape,
62 std::initializer_list<float> input_data,
63 std::initializer_list<float> weights_data,
64 std::initializer_list<float> bias_data, std::initializer_list<float> output_data)
66 std::unique_ptr<IMemoryManager> memory_manager = std::make_unique<TestMemoryManager>();
67 const float quantized_tolerance = getTolerance(-127, 128, 255);
68 std::pair<float, int32_t> input_quant_param = quantizationParams<int8_t>(-63.5, 64);
69 std::pair<float, int32_t> output_quant_param = quantizationParams<int8_t>(-127, 128);
71 makeInputTensor<DataType::S8>(input_shape, input_quant_param.first, input_quant_param.second,
72 input_data, memory_manager.get());
73 Tensor weights_tensor =
74 makeInputTensor<DataType::S8>(weights_shape, input_quant_param.first, input_quant_param.second,
75 weights_data, memory_manager.get());
77 makeInputTensor<DataType::S32>(bias_shape, input_quant_param.first * input_quant_param.first, 0,
78 bias_data, memory_manager.get());
79 Tensor output_tensor =
80 makeOutputTensor(DataType::S8, output_quant_param.first, output_quant_param.second);
82 FullyConnectedParams params{};
83 params.activation = Activation::RELU;
85 FullyConnected kernel(&input_tensor, &weights_tensor, &bias_tensor, &output_tensor, params);
87 memory_manager->allocate_memory(output_tensor);
90 EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shape));
91 EXPECT_THAT(dequantizeTensorData(output_tensor),
92 FloatArrayNear(output_data, quantized_tolerance));
97 std::initializer_list<int32_t> input_shape, std::initializer_list<int32_t> weights_shape,
98 std::initializer_list<int32_t> bias_shape, std::initializer_list<int32_t> output_shape,
99 std::initializer_list<float> input_data, std::initializer_list<float> weights_data,
100 std::initializer_list<float> bias_data, std::initializer_list<float> output_data)
102 std::unique_ptr<IMemoryManager> memory_manager = std::make_unique<TestMemoryManager>();
103 const float quantized_tolerance = getTolerance(-127, 128, 255);
104 std::pair<float, int32_t> input_quant_param = quantizationParams<uint8_t>(-63.5, 64);
105 std::pair<float, int32_t> output_quant_param = quantizationParams<uint8_t>(-127, 128);
106 Tensor input_tensor =
107 makeInputTensor<DataType::U8>(input_shape, input_quant_param.first, input_quant_param.second,
108 input_data, memory_manager.get());
109 Tensor weights_tensor =
110 makeInputTensor<DataType::U8>(weights_shape, input_quant_param.first, input_quant_param.second,
111 weights_data, memory_manager.get());
113 makeInputTensor<DataType::S32>(bias_shape, input_quant_param.first * input_quant_param.first, 0,
114 bias_data, memory_manager.get());
115 Tensor output_tensor =
116 makeOutputTensor(DataType::U8, output_quant_param.first, output_quant_param.second);
118 FullyConnectedParams params{};
119 params.activation = Activation::RELU;
121 FullyConnected kernel(&input_tensor, &weights_tensor, &bias_tensor, &output_tensor, params);
123 memory_manager->allocate_memory(output_tensor);
126 EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shape));
127 EXPECT_THAT(dequantizeTensorData(output_tensor),
128 FloatArrayNear(output_data, quantized_tolerance));
131 template <typename T> class FullyConnectedTest : public ::testing::Test
135 using DataTypes = ::testing::Types<float, uint8_t, int8_t>;
136 TYPED_TEST_CASE(FullyConnectedTest, DataTypes);
138 TYPED_TEST(FullyConnectedTest, Simple)
140 Check<TypeParam>({3, 2, 2, 1}, {3, 6}, {3}, {2, 3},
142 -3, -5, 5, 4, 9, -2, // batch = 0
143 -3, -2, -4, 9, -8, 1, // batch = 1
146 -3, -7, 4, -4, -6, 4, // unit = 0
147 3, 5, 2, 3, -3, -8, // unit = 1
148 -3, 7, 4, 9, 0, -5, // unit = 2
152 0, 0, 32, // batch = 0
153 22, 11, 47, // batch = 1
157 TEST(FullyConnectedTest, InvalidBiasType_NEG)
159 Shape input_shape{3, 2, 2, 1};
160 std::vector<float> input_data{
161 -3, -5, 5, 4, 9, -2, // batch = 0
162 -3, -2, -4, 9, -8, 1, // batch = 1
164 Shape weights_shape{3, 6};
165 std::vector<float> weights_data{
166 -3, -7, 4, -4, -6, 4, // unit = 0
167 3, 5, 2, 3, -3, -8, // unit = 1
168 -3, 7, 4, 9, 0, -5, // unit = 2
171 std::vector<int32_t> bias_data{-1, -5, -8};
173 std::unique_ptr<IMemoryManager> memory_manager = std::make_unique<TestMemoryManager>();
175 Tensor input_tensor =
176 makeInputTensor<DataType::FLOAT32>(input_shape, input_data, memory_manager.get());
177 Tensor weights_tensor =
178 makeInputTensor<DataType::FLOAT32>(weights_shape, weights_data, memory_manager.get());
179 Tensor bias_tensor = makeInputTensor<DataType::S32>(bias_shape, bias_data, memory_manager.get());
180 Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
182 FullyConnectedParams params{};
183 params.activation = Activation::RELU;
185 FullyConnected kernel(&input_tensor, &weights_tensor, &bias_tensor, &output_tensor, params);
186 EXPECT_ANY_THROW(kernel.configure());
189 TEST(FullyConnectedTest, InvalidWeightShapeDim_NEG)
191 Shape input_shape{3, 2, 2, 1};
192 std::vector<float> input_data{
193 -3, -5, 5, 4, 9, -2, // batch = 0
194 -3, -2, -4, 9, -8, 1, // batch = 1
196 Shape weights_shape{1, 3, 6};
197 std::vector<float> weights_data{
198 -3, -7, 4, -4, -6, 4, // unit = 0
199 3, 5, 2, 3, -3, -8, // unit = 1
200 -3, 7, 4, 9, 0, -5, // unit = 2
203 std::vector<float> bias_data{-1, -5, -8};
205 std::unique_ptr<IMemoryManager> memory_manager = std::make_unique<TestMemoryManager>();
207 Tensor input_tensor =
208 makeInputTensor<DataType::FLOAT32>(input_shape, input_data, memory_manager.get());
209 Tensor weights_tensor =
210 makeInputTensor<DataType::FLOAT32>(weights_shape, weights_data, memory_manager.get());
212 makeInputTensor<DataType::FLOAT32>(bias_shape, bias_data, memory_manager.get());
213 Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
215 FullyConnectedParams params{};
216 params.activation = Activation::RELU;
218 FullyConnected kernel(&input_tensor, &weights_tensor, &bias_tensor, &output_tensor, params);
219 EXPECT_ANY_THROW(kernel.configure());
222 TEST(FullyConnectedTest, BiasElementNumWeightDimMismatch_NEG)
224 Shape input_shape{3, 2, 2, 1};
225 std::vector<float> input_data{
226 -3, -5, 5, 4, 9, -2, // batch = 0
227 -3, -2, -4, 9, -8, 1, // batch = 1
229 Shape weights_shape{6, 3};
230 std::vector<float> weights_data{
231 -3, -7, 4, // unit = 0
232 -4, -6, 4, // unit = 1
234 3, -3, -8, // unit = 3
235 -3, 7, 4, // unit = 4
236 9, 0, -5, // unit = 5
239 std::vector<float> bias_data{-1, -5, -8};
241 std::unique_ptr<IMemoryManager> memory_manager = std::make_unique<TestMemoryManager>();
243 Tensor input_tensor =
244 makeInputTensor<DataType::FLOAT32>(input_shape, input_data, memory_manager.get());
245 Tensor weights_tensor =
246 makeInputTensor<DataType::FLOAT32>(weights_shape, weights_data, memory_manager.get());
248 makeInputTensor<DataType::FLOAT32>(bias_shape, bias_data, memory_manager.get());
249 Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
251 FullyConnectedParams params{};
252 params.activation = Activation::RELU;
254 FullyConnected kernel(&input_tensor, &weights_tensor, &bias_tensor, &output_tensor, params);
255 EXPECT_ANY_THROW(kernel.configure());
259 } // namespace kernels
260 } // namespace luci_interpreter