2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "kernels/DepthwiseConv2D.h"
18 #include "kernels/TestUtils.h"
19 #include "luci_interpreter/TestMemoryManager.h"
21 namespace luci_interpreter
28 using namespace testing;
30 class DepthwiseConv2DTest : public ::testing::Test
33 void SetUp() override { _memory_manager = std::make_unique<TestMemoryManager>(); }
35 std::unique_ptr<IMemoryManager> _memory_manager;
38 TEST_F(DepthwiseConv2DTest, Float)
40 Shape input_shape{1, 4, 2, 2};
41 Shape filter_shape{1, 2, 2, 4};
43 std::vector<float> input_data{
49 std::vector<float> filter_data{
55 std::vector<float> bias_data{1, 2, 3, 4};
57 makeInputTensor<DataType::FLOAT32>(input_shape, input_data, _memory_manager.get());
58 Tensor filter_tensor =
59 makeInputTensor<DataType::FLOAT32>(filter_shape, filter_data, _memory_manager.get());
61 makeInputTensor<DataType::FLOAT32>(bias_shape, bias_data, _memory_manager.get());
62 Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
64 DepthwiseConv2DParams params{};
65 params.padding = Padding::VALID;
66 params.depth_multiplier = 2;
67 params.stride_height = 2;
68 params.stride_width = 1;
69 params.dilation_height_factor = 1;
70 params.dilation_width_factor = 1;
71 params.activation = Activation::RELU;
73 DepthwiseConv2D kernel(&input_tensor, &filter_tensor, &bias_tensor, &output_tensor, params);
75 _memory_manager->allocate_memory(output_tensor);
78 std::vector<float> ref_output_data{
82 EXPECT_THAT(extractTensorData<float>(output_tensor), FloatArrayNear(ref_output_data));
83 EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray({1, 2, 1, 4}));
86 TEST_F(DepthwiseConv2DTest, Uint8)
88 std::vector<float> input_data{
89 1, 2, 7, 8, // column 1
90 3, 4, 9, 10, // column 2
91 5, 6, 11, 12, // column 3
93 std::vector<float> filter_data{
99 std::vector<float> bias_data{1, 2, 3, 4};
101 std::pair<float, int32_t> input_quant_param = quantizationParams<uint8_t>(-63.5, 64);
102 std::pair<float, int32_t> output_quant_param = quantizationParams<uint8_t>(-127, 128);
104 Tensor input_tensor =
105 makeInputTensor<DataType::U8>({1, 3, 2, 2}, input_quant_param.first, input_quant_param.second,
106 input_data, _memory_manager.get());
107 Tensor filter_tensor =
108 makeInputTensor<DataType::U8>({1, 2, 2, 4}, input_quant_param.first, input_quant_param.second,
109 filter_data, _memory_manager.get());
110 Tensor bias_tensor = makeInputTensor<DataType::S32>(
111 {4}, input_quant_param.first * input_quant_param.first, 0, bias_data, _memory_manager.get());
112 Tensor output_tensor =
113 makeOutputTensor(DataType::U8, output_quant_param.first, output_quant_param.second);
115 DepthwiseConv2DParams params{};
116 params.padding = Padding::VALID;
117 params.depth_multiplier = 2;
118 params.stride_height = 1;
119 params.stride_width = 1;
120 params.dilation_height_factor = 1;
121 params.dilation_width_factor = 1;
122 params.activation = Activation::NONE;
124 DepthwiseConv2D kernel(&input_tensor, &filter_tensor, &bias_tensor, &output_tensor, params);
126 _memory_manager->allocate_memory(output_tensor);
129 std::vector<float> ref_output_data{
133 EXPECT_THAT(dequantizeTensorData(output_tensor), FloatArrayNear(ref_output_data));
134 EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray({1, 2, 1, 4}));
137 TEST_F(DepthwiseConv2DTest, SInt16)
139 Shape input_shape{1, 4, 2, 2};
140 Shape filter_shape{1, 2, 2, 4};
142 std::vector<int32_t> ref_output_shape{1, 2, 1, 4};
144 std::vector<float> input_data{
150 std::vector<float> filter_data{
156 std::vector<float> bias_data{1, 2, 3, 4};
157 std::vector<float> ref_output_data{
162 Tensor input_tensor =
163 makeInputTensor<DataType::S16>(input_shape, 0.25, 0, input_data, _memory_manager.get());
164 Tensor filter_tensor =
165 makeInputTensor<DataType::S16>(filter_shape, 0.2, 0, filter_data, _memory_manager.get());
167 makeInputTensor<DataType::S64>(bias_shape, 0.25 * 0.2, 0, bias_data, _memory_manager.get());
168 Tensor output_tensor = makeOutputTensor(DataType::S16, 0.5, 0);
170 DepthwiseConv2DParams params{};
171 params.padding = Padding::VALID;
172 params.depth_multiplier = 2;
173 params.stride_height = 2;
174 params.stride_width = 1;
175 params.dilation_height_factor = 1;
176 params.dilation_width_factor = 1;
177 params.activation = Activation::RELU;
179 DepthwiseConv2D kernel(&input_tensor, &filter_tensor, &bias_tensor, &output_tensor, params);
181 _memory_manager->allocate_memory(output_tensor);
184 EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(ref_output_shape));
185 EXPECT_THAT(dequantizeTensorData(output_tensor), FloatArrayNear(ref_output_data));
188 TEST_F(DepthwiseConv2DTest, SInt16_CWQ_weights)
190 const int output_channels = 4;
191 Shape input_shape{1, 4, 2, 2};
192 Shape filter_shape{1, 2, 2, output_channels};
194 std::vector<int32_t> ref_output_shape{1, 2, 1, output_channels};
196 std::vector<float> input_data{
202 std::vector<float> filter_data{
208 std::vector<float> bias_data{1, 2, 3, 4};
209 std::vector<float> ref_output_data{
214 float input_scale = 0.25;
215 std::vector<float> filter_scales{0.2f, 1.f, 0.5f, 0.1f};
216 std::vector<float> bias_scales;
217 for (int i = 0; i < output_channels; ++i)
218 bias_scales.push_back(filter_scales[i] * input_scale);
219 std::vector<int32_t> zerop(4, 0);
220 Tensor input_tensor =
221 makeInputTensor<DataType::S16>(input_shape, input_scale, 0, input_data, _memory_manager.get());
222 Tensor filter_tensor = makeInputTensor<DataType::S16>(filter_shape, filter_scales, zerop, 3,
223 filter_data, _memory_manager.get());
224 Tensor bias_tensor = makeInputTensor<DataType::S64>(bias_shape, bias_scales, zerop, 0, bias_data,
225 _memory_manager.get());
226 Tensor output_tensor = makeOutputTensor(DataType::S16, 0.5, 0);
228 DepthwiseConv2DParams params{};
229 params.padding = Padding::VALID;
230 params.depth_multiplier = 2;
231 params.stride_height = 2;
232 params.stride_width = 1;
233 params.dilation_height_factor = 1;
234 params.dilation_width_factor = 1;
235 params.activation = Activation::RELU;
237 DepthwiseConv2D kernel(&input_tensor, &filter_tensor, &bias_tensor, &output_tensor, params);
239 _memory_manager->allocate_memory(output_tensor);
242 EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(ref_output_shape));
243 EXPECT_THAT(dequantizeTensorData(output_tensor), FloatArrayNear(ref_output_data));
246 TEST_F(DepthwiseConv2DTest, Uint8_CWQ_weights)
248 const int output_channels = 4;
249 Shape input_shape{1, 3, 2, 2};
250 Shape filter_shape{1, 2, 2, output_channels};
252 std::vector<int32_t> ref_output_shape{1, 2, 1, output_channels};
254 std::vector<float> input_data{
259 std::vector<float> filter_data{
265 std::vector<float> bias_data{1, 2, 3, 4};
266 std::vector<float> ref_output_data{
271 std::pair<float, int32_t> input_quant_param = quantizationParams<uint8_t>(0, 16);
272 std::pair<float, int32_t> output_quant_param = quantizationParams<uint8_t>(-127, 128);
274 std::vector<std::pair<float, int32_t>> filter_quant_params;
275 filter_quant_params.push_back(quantizationParams<uint8_t>(-9, 13));
276 filter_quant_params.push_back(quantizationParams<uint8_t>(-14, 10));
277 filter_quant_params.push_back(quantizationParams<uint8_t>(-11, 15));
278 filter_quant_params.push_back(quantizationParams<uint8_t>(-16, 12));
280 std::vector<float> filter_scales;
281 std::vector<int32_t> filter_zerops;
282 for (auto iter : filter_quant_params)
284 filter_scales.push_back(iter.first);
285 filter_zerops.push_back(iter.second);
288 std::vector<float> bias_scales;
289 for (int i = 0; i < output_channels; ++i)
290 bias_scales.push_back(filter_quant_params[i].first * input_quant_param.first);
291 std::vector<int32_t> zerop(output_channels, 0);
293 Tensor input_tensor =
294 makeInputTensor<DataType::U8>(input_shape, input_quant_param.first, input_quant_param.second,
295 input_data, _memory_manager.get());
296 Tensor filter_tensor = makeInputTensor<DataType::U8>(filter_shape, filter_scales, filter_zerops,
297 3, filter_data, _memory_manager.get());
298 Tensor bias_tensor = makeInputTensor<DataType::S32>(bias_shape, bias_scales, zerop, 0, bias_data,
299 _memory_manager.get());
300 Tensor output_tensor =
301 makeOutputTensor(DataType::U8, output_quant_param.first, output_quant_param.second);
303 DepthwiseConv2DParams params{};
304 params.padding = Padding::VALID;
305 params.depth_multiplier = 2;
306 params.stride_height = 1;
307 params.stride_width = 1;
308 params.dilation_height_factor = 1;
309 params.dilation_width_factor = 1;
310 params.activation = Activation::NONE;
312 DepthwiseConv2D kernel(&input_tensor, &filter_tensor, &bias_tensor, &output_tensor, params);
314 _memory_manager->allocate_memory(output_tensor);
317 EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(ref_output_shape));
318 EXPECT_THAT(dequantizeTensorData(output_tensor),
319 FloatArrayNear(ref_output_data, output_quant_param.first));
322 TEST_F(DepthwiseConv2DTest, SInt8_CWQ_weights)
324 const int output_channels = 4;
325 Shape input_shape{1, 3, 2, 2};
326 Shape filter_shape{1, 2, 2, output_channels};
328 std::vector<int32_t> ref_output_shape{1, 2, 1, output_channels};
330 std::vector<float> input_data{
335 std::vector<float> filter_data{
341 std::vector<float> bias_data{1, 2, 3, 4};
342 std::vector<float> ref_output_data{
347 std::pair<float, int32_t> input_quant_param = quantizationParams<int8_t>(-128, 127);
348 std::pair<float, int32_t> output_quant_param = quantizationParams<int8_t>(-127, 128);
350 std::vector<std::pair<float, int32_t>> filter_quant_params;
351 filter_quant_params.push_back(std::pair<float, int32_t>(0.5, 0));
352 filter_quant_params.push_back(std::pair<float, int32_t>(0.25, 0));
353 filter_quant_params.push_back(std::pair<float, int32_t>(1, 0));
354 filter_quant_params.push_back(std::pair<float, int32_t>(0.125, 0));
356 std::vector<float> filter_scales;
357 std::vector<int32_t> filter_zerops;
358 for (auto iter : filter_quant_params)
360 filter_scales.push_back(iter.first);
361 filter_zerops.push_back(iter.second);
364 std::vector<float> bias_scales;
365 for (int i = 0; i < output_channels; ++i)
366 bias_scales.push_back(filter_quant_params[i].first * input_quant_param.first);
367 std::vector<int32_t> zerop(output_channels, 0);
369 Tensor input_tensor =
370 makeInputTensor<DataType::S8>(input_shape, input_quant_param.first, input_quant_param.second,
371 input_data, _memory_manager.get());
372 Tensor filter_tensor = makeInputTensor<DataType::S8>(filter_shape, filter_scales, filter_zerops,
373 3, filter_data, _memory_manager.get());
374 Tensor bias_tensor = makeInputTensor<DataType::S32>(bias_shape, bias_scales, zerop, 0, bias_data,
375 _memory_manager.get());
376 Tensor output_tensor =
377 makeOutputTensor(DataType::S8, output_quant_param.first, output_quant_param.second);
379 DepthwiseConv2DParams params{};
380 params.padding = Padding::VALID;
381 params.depth_multiplier = 2;
382 params.stride_height = 1;
383 params.stride_width = 1;
384 params.dilation_height_factor = 1;
385 params.dilation_width_factor = 1;
386 params.activation = Activation::NONE;
388 DepthwiseConv2D kernel(&input_tensor, &filter_tensor, &bias_tensor, &output_tensor, params);
390 _memory_manager->allocate_memory(output_tensor);
393 EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(ref_output_shape));
394 EXPECT_THAT(dequantizeTensorData(output_tensor),
395 FloatArrayNear(ref_output_data, output_quant_param.first));
398 TEST_F(DepthwiseConv2DTest, InvalidBiasType_NEG)
400 Shape input_shape{1, 4, 2, 2};
401 Shape filter_shape{1, 2, 2, 4};
403 std::vector<float> input_data{
409 std::vector<float> filter_data{
415 std::vector<int32_t> bias_data{1, 2, 3, 4};
416 Tensor input_tensor =
417 makeInputTensor<DataType::FLOAT32>(input_shape, input_data, _memory_manager.get());
418 Tensor filter_tensor =
419 makeInputTensor<DataType::FLOAT32>(filter_shape, filter_data, _memory_manager.get());
420 Tensor bias_tensor = makeInputTensor<DataType::S32>(bias_shape, bias_data, _memory_manager.get());
421 Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
423 DepthwiseConv2DParams params{};
424 params.padding = Padding::VALID;
425 params.depth_multiplier = 2;
426 params.stride_height = 2;
427 params.stride_width = 1;
428 params.dilation_height_factor = 1;
429 params.dilation_width_factor = 1;
430 params.activation = Activation::RELU;
432 DepthwiseConv2D kernel(&input_tensor, &filter_tensor, &bias_tensor, &output_tensor, params);
433 EXPECT_ANY_THROW(kernel.configure());
436 TEST_F(DepthwiseConv2DTest, InOutTypeMismatch_NEG)
438 Shape input_shape{1, 4, 2, 2};
439 Shape filter_shape{1, 2, 2, 4};
441 std::vector<float> input_data{
447 std::vector<float> filter_data{
453 std::vector<float> bias_data{1, 2, 3, 4};
454 Tensor input_tensor =
455 makeInputTensor<DataType::FLOAT32>(input_shape, input_data, _memory_manager.get());
456 Tensor filter_tensor =
457 makeInputTensor<DataType::FLOAT32>(filter_shape, filter_data, _memory_manager.get());
459 makeInputTensor<DataType::FLOAT32>(bias_shape, bias_data, _memory_manager.get());
460 Tensor output_tensor = makeOutputTensor(DataType::U8);
462 DepthwiseConv2DParams params{};
463 params.padding = Padding::VALID;
464 params.depth_multiplier = 2;
465 params.stride_height = 2;
466 params.stride_width = 1;
467 params.dilation_height_factor = 1;
468 params.dilation_width_factor = 1;
469 params.activation = Activation::RELU;
471 DepthwiseConv2D kernel(&input_tensor, &filter_tensor, &bias_tensor, &output_tensor, params);
472 EXPECT_ANY_THROW(kernel.configure());
475 TEST_F(DepthwiseConv2DTest, InvalidInputShape_NEG)
477 Shape input_shape{4, 2, 2};
478 Shape filter_shape{2, 2, 4};
480 std::vector<float> input_data{
486 std::vector<float> filter_data{
492 std::vector<float> bias_data{1, 2, 3, 4};
493 Tensor input_tensor =
494 makeInputTensor<DataType::FLOAT32>(input_shape, input_data, _memory_manager.get());
495 Tensor filter_tensor =
496 makeInputTensor<DataType::FLOAT32>(filter_shape, filter_data, _memory_manager.get());
498 makeInputTensor<DataType::FLOAT32>(bias_shape, bias_data, _memory_manager.get());
499 Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
501 DepthwiseConv2DParams params{};
502 params.padding = Padding::VALID;
503 params.depth_multiplier = 2;
504 params.stride_height = 2;
505 params.stride_width = 1;
506 params.dilation_height_factor = 1;
507 params.dilation_width_factor = 1;
508 params.activation = Activation::RELU;
510 DepthwiseConv2D kernel(&input_tensor, &filter_tensor, &bias_tensor, &output_tensor, params);
511 EXPECT_ANY_THROW(kernel.configure());
514 TEST_F(DepthwiseConv2DTest, InvalidFilterShape_NEG)
516 Shape input_shape{1, 4, 2, 2};
517 Shape filter_shape{2, 1, 2, 4};
519 std::vector<float> input_data{
525 std::vector<float> filter_data{
531 std::vector<float> bias_data{1, 2, 3, 4};
532 Tensor input_tensor =
533 makeInputTensor<DataType::FLOAT32>(input_shape, input_data, _memory_manager.get());
534 Tensor filter_tensor =
535 makeInputTensor<DataType::FLOAT32>(filter_shape, filter_data, _memory_manager.get());
537 makeInputTensor<DataType::FLOAT32>(bias_shape, bias_data, _memory_manager.get());
538 Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
540 DepthwiseConv2DParams params{};
541 params.padding = Padding::VALID;
542 params.depth_multiplier = 2;
543 params.stride_height = 2;
544 params.stride_width = 1;
545 params.dilation_height_factor = 1;
546 params.dilation_width_factor = 1;
547 params.activation = Activation::RELU;
549 DepthwiseConv2D kernel(&input_tensor, &filter_tensor, &bias_tensor, &output_tensor, params);
550 EXPECT_ANY_THROW(kernel.configure());
553 TEST_F(DepthwiseConv2DTest, InvalidBiasDim_NEG)
555 Shape input_shape{1, 4, 2, 2};
556 Shape filter_shape{1, 2, 4, 2};
558 std::vector<float> input_data{
564 std::vector<float> filter_data{
570 std::vector<float> bias_data{1, 2, 3, 4};
571 Tensor input_tensor =
572 makeInputTensor<DataType::FLOAT32>(input_shape, input_data, _memory_manager.get());
573 Tensor filter_tensor =
574 makeInputTensor<DataType::FLOAT32>(filter_shape, filter_data, _memory_manager.get());
576 makeInputTensor<DataType::FLOAT32>(bias_shape, bias_data, _memory_manager.get());
577 Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
579 DepthwiseConv2DParams params{};
580 params.padding = Padding::VALID;
581 params.depth_multiplier = 2;
582 params.stride_height = 2;
583 params.stride_width = 1;
584 params.dilation_height_factor = 1;
585 params.dilation_width_factor = 1;
586 params.activation = Activation::RELU;
588 DepthwiseConv2D kernel(&input_tensor, &filter_tensor, &bias_tensor, &output_tensor, params);
589 EXPECT_ANY_THROW(kernel.configure());
593 } // namespace kernels
594 } // namespace luci_interpreter