2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3 * Copyright 2019 The TensorFlow Authors. All Rights Reserved.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
19 #include "kernels/FullyConnected.h"
20 #include "kernels/TestUtils.h"
21 #include "luci_interpreter/TestMemoryManager.h"
23 namespace luci_interpreter
30 using namespace testing;
33 void Check(std::initializer_list<int32_t> input_shape, std::initializer_list<int32_t> weights_shape,
34 std::initializer_list<int32_t> bias_shape, std::initializer_list<int32_t> output_shape,
35 std::initializer_list<float> input_data, std::initializer_list<float> weights_data,
36 std::initializer_list<float> bias_data, std::initializer_list<float> output_data)
38 std::unique_ptr<IMemoryManager> memory_manager = std::make_unique<TestMemoryManager>();
40 makeInputTensor<DataType::FLOAT32>(input_shape, input_data, memory_manager.get());
41 Tensor weights_tensor =
42 makeInputTensor<DataType::FLOAT32>(weights_shape, weights_data, memory_manager.get());
44 makeInputTensor<DataType::FLOAT32>(bias_shape, bias_data, memory_manager.get());
45 Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
47 FullyConnectedParams params{};
48 params.activation = Activation::RELU;
50 FullyConnected kernel(&input_tensor, &weights_tensor, &bias_tensor, &output_tensor, params);
52 memory_manager->allocate_memory(output_tensor);
55 EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shape));
56 EXPECT_THAT(extractTensorData<T>(output_tensor), FloatArrayNear(output_data));
60 void Check<int8_t>(std::initializer_list<int32_t> input_shape,
61 std::initializer_list<int32_t> weights_shape,
62 std::initializer_list<int32_t> bias_shape,
63 std::initializer_list<int32_t> output_shape,
64 std::initializer_list<float> input_data,
65 std::initializer_list<float> weights_data,
66 std::initializer_list<float> bias_data, std::initializer_list<float> output_data)
68 std::unique_ptr<IMemoryManager> memory_manager = std::make_unique<TestMemoryManager>();
69 const float quantized_tolerance = getTolerance(-127, 128, 255);
70 std::pair<float, int32_t> input_quant_param = quantizationParams<int8_t>(-63.5, 64);
71 std::pair<float, int32_t> output_quant_param = quantizationParams<int8_t>(-127, 128);
73 makeInputTensor<DataType::S8>(input_shape, input_quant_param.first, input_quant_param.second,
74 input_data, memory_manager.get());
75 Tensor weights_tensor =
76 makeInputTensor<DataType::S8>(weights_shape, input_quant_param.first, input_quant_param.second,
77 weights_data, memory_manager.get());
79 makeInputTensor<DataType::S32>(bias_shape, input_quant_param.first * input_quant_param.first, 0,
80 bias_data, memory_manager.get());
81 Tensor output_tensor =
82 makeOutputTensor(DataType::S8, output_quant_param.first, output_quant_param.second);
84 FullyConnectedParams params{};
85 params.activation = Activation::RELU;
87 FullyConnected kernel(&input_tensor, &weights_tensor, &bias_tensor, &output_tensor, params);
89 memory_manager->allocate_memory(output_tensor);
92 EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shape));
93 EXPECT_THAT(dequantizeTensorData(output_tensor),
94 FloatArrayNear(output_data, quantized_tolerance));
99 std::initializer_list<int32_t> input_shape, std::initializer_list<int32_t> weights_shape,
100 std::initializer_list<int32_t> bias_shape, std::initializer_list<int32_t> output_shape,
101 std::initializer_list<float> input_data, std::initializer_list<float> weights_data,
102 std::initializer_list<float> bias_data, std::initializer_list<float> output_data)
104 std::unique_ptr<IMemoryManager> memory_manager = std::make_unique<TestMemoryManager>();
105 const float quantized_tolerance = getTolerance(-127, 128, 255);
106 std::pair<float, int32_t> input_quant_param = quantizationParams<uint8_t>(-63.5, 64);
107 std::pair<float, int32_t> output_quant_param = quantizationParams<uint8_t>(-127, 128);
108 Tensor input_tensor =
109 makeInputTensor<DataType::U8>(input_shape, input_quant_param.first, input_quant_param.second,
110 input_data, memory_manager.get());
111 Tensor weights_tensor =
112 makeInputTensor<DataType::U8>(weights_shape, input_quant_param.first, input_quant_param.second,
113 weights_data, memory_manager.get());
115 makeInputTensor<DataType::S32>(bias_shape, input_quant_param.first * input_quant_param.first, 0,
116 bias_data, memory_manager.get());
117 Tensor output_tensor =
118 makeOutputTensor(DataType::U8, output_quant_param.first, output_quant_param.second);
120 FullyConnectedParams params{};
121 params.activation = Activation::RELU;
123 FullyConnected kernel(&input_tensor, &weights_tensor, &bias_tensor, &output_tensor, params);
125 memory_manager->allocate_memory(output_tensor);
128 EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shape));
129 EXPECT_THAT(dequantizeTensorData(output_tensor),
130 FloatArrayNear(output_data, quantized_tolerance));
133 template <typename T> class FullyConnectedTest : public ::testing::Test
137 using DataTypes = ::testing::Types<float, uint8_t, int8_t>;
138 TYPED_TEST_SUITE(FullyConnectedTest, DataTypes);
140 TYPED_TEST(FullyConnectedTest, Simple)
142 Check<TypeParam>({3, 2, 2, 1}, {3, 6}, {3}, {2, 3},
144 -3, -5, 5, 4, 9, -2, // batch = 0
145 -3, -2, -4, 9, -8, 1, // batch = 1
148 -3, -7, 4, -4, -6, 4, // unit = 0
149 3, 5, 2, 3, -3, -8, // unit = 1
150 -3, 7, 4, 9, 0, -5, // unit = 2
154 0, 0, 32, // batch = 0
155 22, 11, 47, // batch = 1
159 TEST(FullyConnectedTest, InvalidBiasType_NEG)
161 Shape input_shape{3, 2, 2, 1};
162 std::vector<float> input_data{
163 -3, -5, 5, 4, 9, -2, // batch = 0
164 -3, -2, -4, 9, -8, 1, // batch = 1
166 Shape weights_shape{3, 6};
167 std::vector<float> weights_data{
168 -3, -7, 4, -4, -6, 4, // unit = 0
169 3, 5, 2, 3, -3, -8, // unit = 1
170 -3, 7, 4, 9, 0, -5, // unit = 2
173 std::vector<int32_t> bias_data{-1, -5, -8};
175 std::unique_ptr<IMemoryManager> memory_manager = std::make_unique<TestMemoryManager>();
177 Tensor input_tensor =
178 makeInputTensor<DataType::FLOAT32>(input_shape, input_data, memory_manager.get());
179 Tensor weights_tensor =
180 makeInputTensor<DataType::FLOAT32>(weights_shape, weights_data, memory_manager.get());
181 Tensor bias_tensor = makeInputTensor<DataType::S32>(bias_shape, bias_data, memory_manager.get());
182 Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
184 FullyConnectedParams params{};
185 params.activation = Activation::RELU;
187 FullyConnected kernel(&input_tensor, &weights_tensor, &bias_tensor, &output_tensor, params);
188 EXPECT_ANY_THROW(kernel.configure());
191 TEST(FullyConnectedTest, InvalidWeightShapeDim_NEG)
193 Shape input_shape{3, 2, 2, 1};
194 std::vector<float> input_data{
195 -3, -5, 5, 4, 9, -2, // batch = 0
196 -3, -2, -4, 9, -8, 1, // batch = 1
198 Shape weights_shape{1, 3, 6};
199 std::vector<float> weights_data{
200 -3, -7, 4, -4, -6, 4, // unit = 0
201 3, 5, 2, 3, -3, -8, // unit = 1
202 -3, 7, 4, 9, 0, -5, // unit = 2
205 std::vector<float> bias_data{-1, -5, -8};
207 std::unique_ptr<IMemoryManager> memory_manager = std::make_unique<TestMemoryManager>();
209 Tensor input_tensor =
210 makeInputTensor<DataType::FLOAT32>(input_shape, input_data, memory_manager.get());
211 Tensor weights_tensor =
212 makeInputTensor<DataType::FLOAT32>(weights_shape, weights_data, memory_manager.get());
214 makeInputTensor<DataType::FLOAT32>(bias_shape, bias_data, memory_manager.get());
215 Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
217 FullyConnectedParams params{};
218 params.activation = Activation::RELU;
220 FullyConnected kernel(&input_tensor, &weights_tensor, &bias_tensor, &output_tensor, params);
221 EXPECT_ANY_THROW(kernel.configure());
224 TEST(FullyConnectedTest, BiasElementNumWeightDimMismatch_NEG)
226 Shape input_shape{3, 2, 2, 1};
227 std::vector<float> input_data{
228 -3, -5, 5, 4, 9, -2, // batch = 0
229 -3, -2, -4, 9, -8, 1, // batch = 1
231 Shape weights_shape{6, 3};
232 std::vector<float> weights_data{
233 -3, -7, 4, // unit = 0
234 -4, -6, 4, // unit = 1
236 3, -3, -8, // unit = 3
237 -3, 7, 4, // unit = 4
238 9, 0, -5, // unit = 5
241 std::vector<float> bias_data{-1, -5, -8};
243 std::unique_ptr<IMemoryManager> memory_manager = std::make_unique<TestMemoryManager>();
245 Tensor input_tensor =
246 makeInputTensor<DataType::FLOAT32>(input_shape, input_data, memory_manager.get());
247 Tensor weights_tensor =
248 makeInputTensor<DataType::FLOAT32>(weights_shape, weights_data, memory_manager.get());
250 makeInputTensor<DataType::FLOAT32>(bias_shape, bias_data, memory_manager.get());
251 Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
253 FullyConnectedParams params{};
254 params.activation = Activation::RELU;
256 FullyConnected kernel(&input_tensor, &weights_tensor, &bias_tensor, &output_tensor, params);
257 EXPECT_ANY_THROW(kernel.configure());
261 } // namespace kernels
262 } // namespace luci_interpreter