2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "GenModelTest.h"
19 TEST_F(GenModelTest, OneOp_DepthwiseConv2D)
22 std::vector<float> weight_data{1, 2, 3, 4, -9, 10, -11, 12, 5, 6, 7, 8, 13, -14, 15, -16};
23 uint32_t weight_buf = cgen.addBuffer(weight_data);
24 std::vector<float> bias_data{1, 2, 3, 4};
25 uint32_t bias_buf = cgen.addBuffer(bias_data);
26 int in = cgen.addTensor({{1, 3, 2, 2}, circle::TensorType::TensorType_FLOAT32});
27 int weight = cgen.addTensor({{1, 2, 2, 4}, circle::TensorType::TensorType_FLOAT32, weight_buf});
28 int bias = cgen.addTensor({{1, 1, 1, 4}, circle::TensorType::TensorType_FLOAT32, bias_buf});
29 int out = cgen.addTensor({{1, 2, 1, 4}, circle::TensorType::TensorType_FLOAT32});
30 cgen.addOperatorDepthwiseConv2D({{in, weight, bias}, {out}}, circle::Padding_VALID, 1, 1, 2,
31 circle::ActivationFunctionType_NONE);
32 cgen.setInputsAndOutputs({in}, {out});
34 _context = std::make_unique<GenModelTestContext>(cgen.finish());
35 _context->addTestCase(uniformTCD<float>({{1, 2, 7, 8, 3, 4, 9, 10, 5, 6, 11, 12}},
36 {{71, -34, 99, -20, 91, -26, 127, -4}}));
37 _context->setBackends({"acl_cl", "acl_neon", "cpu", "xnnpack"});
42 TEST_F(GenModelTest, OneOp_DepthwiseConv2D_No_Multiplier)
45 std::vector<float> weight_data{0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f};
46 uint32_t weight_buf = cgen.addBuffer(weight_data);
47 std::vector<float> bias_data{0.5f, -0.5f};
48 uint32_t bias_buf = cgen.addBuffer(bias_data);
49 int in = cgen.addTensor({{1, 2, 2, 2}, circle::TensorType::TensorType_FLOAT32});
50 int weight = cgen.addTensor({{1, 3, 1, 2}, circle::TensorType::TensorType_FLOAT32, weight_buf});
51 int bias = cgen.addTensor({{1, 1, 1, 2}, circle::TensorType::TensorType_FLOAT32, bias_buf});
52 int out = cgen.addTensor({{1, 2, 2, 2}, circle::TensorType::TensorType_FLOAT32});
53 cgen.addOperatorDepthwiseConv2D({{in, weight, bias}, {out}}, circle::Padding_SAME, 1, 1, 1,
54 circle::ActivationFunctionType_NONE);
55 cgen.setInputsAndOutputs({in}, {out});
57 _context = std::make_unique<GenModelTestContext>(cgen.finish());
58 _context->addTestCase(
59 uniformTCD<float>({{0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f}},
60 {{16.5f, 27.5f, 28.5f, 43.5f, 8.5f, 15.5f, 12.5f, 23.5f}}));
61 _context->setBackends({"acl_cl", "acl_neon", "cpu", "gpu_cl"});
65 TEST_F(GenModelTest, OneOp_DepthwiseConv2D_No_Multiplier_RELU6)
68 std::vector<float> weight_data{0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f};
69 uint32_t weight_buf = cgen.addBuffer(weight_data);
70 std::vector<float> bias_data{0.5f, -0.5f};
71 uint32_t bias_buf = cgen.addBuffer(bias_data);
72 int in = cgen.addTensor({{1, 2, 2, 2}, circle::TensorType::TensorType_FLOAT32});
73 int weight = cgen.addTensor({{1, 3, 1, 2}, circle::TensorType::TensorType_FLOAT32, weight_buf});
74 int bias = cgen.addTensor({{1, 1, 1, 2}, circle::TensorType::TensorType_FLOAT32, bias_buf});
75 int out = cgen.addTensor({{1, 2, 2, 2}, circle::TensorType::TensorType_FLOAT32});
76 cgen.addOperatorDepthwiseConv2D({{in, weight, bias}, {out}}, circle::Padding_SAME, 1, 1, 1,
77 circle::ActivationFunctionType_RELU6);
78 cgen.setInputsAndOutputs({in}, {out});
80 _context = std::make_unique<GenModelTestContext>(cgen.finish());
81 _context->addTestCase(uniformTCD<float>({{0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f}},
82 {{6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f, 6.0f}}));
83 _context->setBackends({"acl_cl", "acl_neon", "cpu", "gpu_cl"});
87 TEST_F(GenModelTest, OneOp_DepthwiseConv2D_3x3)
90 std::vector<float> weight_data{0.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f,
91 1.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f};
92 uint32_t weight_buf = cgen.addBuffer(weight_data);
93 std::vector<float> bias_data{0.0f, 0.0f};
94 uint32_t bias_buf = cgen.addBuffer(bias_data);
95 int in = cgen.addTensor({{1, 2, 2, 2}, circle::TensorType::TensorType_FLOAT32});
96 int weight = cgen.addTensor({{1, 3, 3, 2}, circle::TensorType::TensorType_FLOAT32, weight_buf});
97 int bias = cgen.addTensor({{1, 1, 1, 2}, circle::TensorType::TensorType_FLOAT32, bias_buf});
98 int out = cgen.addTensor({{1, 2, 2, 2}, circle::TensorType::TensorType_FLOAT32});
99 cgen.addOperatorDepthwiseConv2D({{in, weight, bias}, {out}}, circle::Padding_SAME, 1, 1, 1,
100 circle::ActivationFunctionType_NONE);
101 cgen.setInputsAndOutputs({in}, {out});
103 _context = std::make_unique<GenModelTestContext>(cgen.finish());
104 _context->addTestCase(
105 uniformTCD<float>({{0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f}},
106 {{6.0f, 16.0f, 8.0f, 16.0f, 10.0f, 16.0f, 12.0f, 16.0f}}));
107 _context->setBackends({"acl_cl", "acl_neon", "cpu", "gpu_cl"});
111 TEST_F(GenModelTest, OneOp_DepthwiseConv2D_Dilation)
114 std::vector<float> weight_data{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
115 uint32_t weight_buf = cgen.addBuffer(weight_data);
116 std::vector<float> bias_data{0, 0, 0, 0};
117 uint32_t bias_buf = cgen.addBuffer(bias_data);
118 int in = cgen.addTensor({{1, 4, 4, 2}, circle::TensorType::TensorType_FLOAT32});
119 int weight = cgen.addTensor({{1, 2, 2, 4}, circle::TensorType::TensorType_FLOAT32, weight_buf});
120 int bias = cgen.addTensor({{1, 1, 1, 4}, circle::TensorType::TensorType_FLOAT32, bias_buf});
121 int out = cgen.addTensor({{1, 2, 2, 4}, circle::TensorType::TensorType_FLOAT32});
122 cgen.addOperatorDepthwiseConv2D({{in, weight, bias}, {out}}, circle::Padding_VALID, 1, 1, 2,
123 circle::ActivationFunctionType_NONE, 2, 2);
124 cgen.setInputsAndOutputs({in}, {out});
126 _context = std::make_unique<GenModelTestContext>(cgen.finish());
127 _context->addTestCase(uniformTCD<float>({{
128 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0,
129 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
131 {{13, 14, 0, 0, 0, 0, 11, 12, 5, 6, 0, 0, 0, 0, 3, 4}}));
132 _context->setBackends({"acl_cl", "acl_neon", "cpu", "xnnpack"});
137 TEST_F(GenModelTest, OneOp_DepthwiseConv2D_Dilation_N_Stride)
140 std::vector<float> weight_data{1, 2, 3, 4};
141 uint32_t weight_buf = cgen.addBuffer(weight_data);
142 std::vector<float> bias_data{0, 0, 0, 0};
143 uint32_t bias_buf = cgen.addBuffer(bias_data);
144 int in = cgen.addTensor({{1, 6, 6, 1}, circle::TensorType::TensorType_FLOAT32});
145 int weight = cgen.addTensor({{1, 2, 2, 1}, circle::TensorType::TensorType_FLOAT32, weight_buf});
146 int bias = cgen.addTensor({{1, 1, 1, 1}, circle::TensorType::TensorType_FLOAT32, bias_buf});
147 int out = cgen.addTensor({{1, 3, 3, 1}, circle::TensorType::TensorType_FLOAT32});
148 cgen.addOperatorDepthwiseConv2D({{in, weight, bias}, {out}}, circle::Padding_SAME, 2, 2, 1,
149 circle::ActivationFunctionType_NONE, 3, 3);
150 cgen.setInputsAndOutputs({in}, {out});
152 _context = std::make_unique<GenModelTestContext>(cgen.finish());
153 _context->addTestCase(uniformTCD<float>({{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0,
154 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
155 {{4, 0, 3, 0, 0, 0, 2, 0, 1}}));
156 _context->setBackends({"acl_cl", "acl_neon", "cpu", "xnnpack", "gpu_cl"});
161 TEST_F(GenModelTest, OneOp_DepthwiseConv2D_U8_PerChannel)
166 std::vector<uint8_t> weight_data{2, 1, 2,
171 uint32_t weight_buf = cgen.addBuffer(weight_data);
172 std::vector<float> weight_scales = {.5, 1, 2};
173 std::vector<int64_t> weight_zeropoints = {2, 0, 1};
174 int weight = cgen.addTensor({{1, 2, 2, 3}, circle::TensorType::TensorType_UINT8, weight_buf},
175 weight_scales, weight_zeropoints);
177 std::vector<int32_t> bias_data{4, -8, -4};
178 uint32_t bias_buf = cgen.addBuffer(bias_data);
179 int bias = cgen.addTensor({{1, 1, 1, 3}, circle::TensorType::TensorType_INT32, bias_buf}, 1., 0);
182 int in = cgen.addTensor({{1, 2, 2, 3}, circle::TensorType::TensorType_UINT8}, 2., 1);
183 int out = cgen.addTensor({{1, 1, 1, 3}, circle::TensorType::TensorType_UINT8}, 4., 2);
185 cgen.addOperatorDepthwiseConv2D({{in, weight, bias}, {out}}, circle::Padding_VALID, 1, 1, 1,
186 circle::ActivationFunctionType_NONE);
187 cgen.setInputsAndOutputs({in}, {out});
189 _context = std::make_unique<GenModelTestContext>(cgen.finish());
191 _context->addTestCase(uniformTCD<uint8_t>({{5, 5, 5, // NHWC
201 _context->setBackends({"cpu"});
206 TEST_F(GenModelTest, OneOp_DepthwiseConv2D_I8_Hybrid_PerChannel)
211 std::vector<int8_t> weight_data{1, 2, 1, 2, -9, 10, -9, 10,
212 5, 6, 5, 6, 13, -14, 13, -14};
214 uint32_t weight_buf = cgen.addBuffer(weight_data);
215 std::vector<float> weight_scales = {1, 1, 1, 1};
216 std::vector<int64_t> weight_zeropoints = {0, 0, 0, 0};
217 int weight = cgen.addTensor({{1, 2, 2, 4}, circle::TensorType::TensorType_INT8, weight_buf},
218 weight_scales, weight_zeropoints);
220 std::vector<float> bias_data{0, 1, 2, 3};
221 uint32_t bias_buf = cgen.addBuffer(bias_data);
222 int bias = cgen.addTensor({{1, 1, 1, 4}, circle::TensorType::TensorType_FLOAT32, bias_buf});
225 int in = cgen.addTensor({{1, 3, 2, 2}, circle::TensorType::TensorType_FLOAT32});
226 int out = cgen.addTensor({{1, 2, 1, 4}, circle::TensorType::TensorType_FLOAT32});
228 cgen.addOperatorDepthwiseConv2D({{in, weight, bias}, {out}}, circle::Padding_VALID, 1, 1, 2,
229 circle::ActivationFunctionType_NONE);
230 cgen.setInputsAndOutputs({in}, {out});
232 _context = std::make_unique<GenModelTestContext>(cgen.finish());
234 _context->addTestCase(uniformTCD<float>({{0, 1, 2, 3,
240 _context->setBackends({"cpu"});
245 TEST_F(GenModelTest, neg_OneOp_DepthwiseConv2D_Stride)
248 std::vector<float> weight_data{1, 2, 3, 4, -9, 10, -11, 12, 5, 6, 7, 8, 13, -14, 15, -16};
249 uint32_t weight_buf = cgen.addBuffer(weight_data);
250 std::vector<float> bias_data{1, 2, 3, 4};
251 uint32_t bias_buf = cgen.addBuffer(bias_data);
252 int in = cgen.addTensor({{1, 3, 2, 2}, circle::TensorType::TensorType_FLOAT32});
253 int weight = cgen.addTensor({{1, 2, 2, 4}, circle::TensorType::TensorType_FLOAT32, weight_buf});
254 int bias = cgen.addTensor({{1, 1, 1, 4}, circle::TensorType::TensorType_FLOAT32, bias_buf});
255 int out = cgen.addTensor({{1, 2, 1, 4}, circle::TensorType::TensorType_FLOAT32});
256 cgen.addOperatorDepthwiseConv2D({{in, weight, bias}, {out}}, circle::Padding_VALID, 0, 0, 2,
257 circle::ActivationFunctionType_NONE);
258 cgen.setInputsAndOutputs({in}, {out});
260 _context = std::make_unique<GenModelTestContext>(cgen.finish());
261 _context->expectFailModelLoad();
266 TEST_F(GenModelTest, neg_OneOp_DepthwiseConv2D_Dilation)
269 std::vector<float> weight_data{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
270 uint32_t weight_buf = cgen.addBuffer(weight_data);
271 std::vector<float> bias_data{0, 0, 0, 0};
272 uint32_t bias_buf = cgen.addBuffer(bias_data);
273 int in = cgen.addTensor({{1, 4, 4, 2}, circle::TensorType::TensorType_FLOAT32});
274 int weight = cgen.addTensor({{1, 2, 2, 4}, circle::TensorType::TensorType_FLOAT32, weight_buf});
275 int bias = cgen.addTensor({{1, 1, 1, 4}, circle::TensorType::TensorType_FLOAT32, bias_buf});
276 int out = cgen.addTensor({{1, 2, 2, 4}, circle::TensorType::TensorType_FLOAT32});
277 cgen.addOperatorDepthwiseConv2D({{in, weight, bias}, {out}}, circle::Padding_VALID, 1, 1, 2,
278 circle::ActivationFunctionType_NONE, 0, 0);
279 cgen.setInputsAndOutputs({in}, {out});
281 _context = std::make_unique<GenModelTestContext>(cgen.finish());
282 _context->expectFailModelLoad();
287 TEST_F(GenModelTest, neg_OneOp_DepthwiseConv2D_Type)
290 std::vector<float> weight_data{1, 2, 3, 4, -9, 10, -11, 12, 5, 6, 7, 8, 13, -14, 15, -16};
291 uint32_t weight_buf = cgen.addBuffer(weight_data);
292 std::vector<float> bias_data{1, 2, 3, 4};
293 uint32_t bias_buf = cgen.addBuffer(bias_data);
294 int in = cgen.addTensor({{1, 3, 2, 2}, circle::TensorType::TensorType_FLOAT32});
295 int weight = cgen.addTensor({{1, 2, 2, 4}, circle::TensorType::TensorType_FLOAT32, weight_buf});
296 int bias = cgen.addTensor({{1, 1, 1, 4}, circle::TensorType::TensorType_FLOAT32, bias_buf});
297 int out = cgen.addTensor({{1, 2, 1, 4}, circle::TensorType::TensorType_UINT8});
298 cgen.addOperatorDepthwiseConv2D({{in, weight, bias}, {out}}, circle::Padding_VALID, 1, 1, 2,
299 circle::ActivationFunctionType_NONE);
300 cgen.setInputsAndOutputs({in}, {out});
302 _context = std::make_unique<GenModelTestContext>(cgen.finish());
303 _context->expectFailModelLoad();
308 // Generate a model for negative test cases
309 CircleBuffer genNegTestDepthwiseConv2DModel(circle::Padding padding, int stride_w, int stride_h,
310 int depth_multiplier,
311 circle::ActivationFunctionType actfn)
314 uint32_t ker_buf = cgen.addBuffer(std::vector<uint8_t>{0, 1, 2, 3, 0, 1, 2, 3});
315 uint32_t bias_buf = cgen.addBuffer(std::vector<int32_t>{0, 0});
316 int in = cgen.addTensor({{1, 2, 2, 2}, circle::TensorType_UINT8}, 0.5, 0);
317 int ker = cgen.addTensor({{1, 2, 2, 2}, circle::TensorType_UINT8, ker_buf}, 0.5, 0);
318 int bias = cgen.addTensor({{2}, circle::TensorType_INT32, bias_buf}, 0.25, 0);
319 int out = cgen.addTensor({{1, 1, 1, 2}, circle::TensorType_UINT8}, 1, 0);
320 cgen.addOperatorDepthwiseConv2D({{in, ker, bias}, {out}}, padding, stride_w, stride_h,
321 depth_multiplier, actfn, 0, 0);
322 cgen.setInputsAndOutputs({in}, {out});
323 return cgen.finish();
326 template <typename T> struct DepthwiseConv2DQuantTestParam
328 int stride = 1; // Used for both height and width
330 int depth_multiplier = 1;
331 std::vector<T> ref_output;
334 template <typename T>
335 class DepthwiseConv2DQuantTest
336 : public GenModelTest,
337 public ::testing::WithParamInterface<DepthwiseConv2DQuantTestParam<T>>
341 using DepthwiseConv2DQuantTestParamU8 = DepthwiseConv2DQuantTestParam<uint8_t>;
342 using DepthwiseConv2DQuantTestU8 = DepthwiseConv2DQuantTest<uint8_t>;
344 // Test with different InputDepth and DepthMultiplier. The values are intended to test optimized CPU
346 INSTANTIATE_TEST_SUITE_P(
347 GenModelTest, DepthwiseConv2DQuantTestU8,
350 DepthwiseConv2DQuantTestParamU8{1, 8, 1, std::vector<uint8_t>{0, 3, 5, 8, 0, 3, 5, 8}},
351 DepthwiseConv2DQuantTestParamU8{1, 4, 2, std::vector<uint8_t>{0, 0, 2, 3, 0, 2, 6, 9}},
352 DepthwiseConv2DQuantTestParamU8{
353 1, 2, 8, std::vector<uint8_t>{0, 1, 2, 3, 0, 1, 2, 3, 0, 2, 4, 6, 0, 2, 4, 6}},
354 DepthwiseConv2DQuantTestParamU8{1, 2, 2, std::vector<uint8_t>{0, 1, 4, 6}},
355 DepthwiseConv2DQuantTestParamU8{1, 2, 1, std::vector<uint8_t>{2, 5}},
356 DepthwiseConv2DQuantTestParamU8{1, 1, 2, std::vector<uint8_t>{2, 4}},
357 DepthwiseConv2DQuantTestParamU8{1, 1, 4, std::vector<uint8_t>{0, 2, 3, 5}},
358 DepthwiseConv2DQuantTestParamU8{1, 4, 1, std::vector<uint8_t>{0, 1, 4, 9}},
359 DepthwiseConv2DQuantTestParamU8{
360 1, 4, 4, std::vector<uint8_t>{0, 0, 0, 0, 0, 1, 2, 3, 0, 2, 4, 6, 0, 3, 6, 9}},
361 DepthwiseConv2DQuantTestParamU8{1, 12, 1,
362 std::vector<uint8_t>{0, 3, 7, 12, 0, 4, 7, 12, 0, 4, 9, 16}},
364 DepthwiseConv2DQuantTestParamU8{2, 4, 1, std::vector<uint8_t>{0, 1, 4, 9}},
365 DepthwiseConv2DQuantTestParamU8{2, 2, 1, std::vector<uint8_t>{2, 5}},
366 DepthwiseConv2DQuantTestParamU8{2, 1, 8, std::vector<uint8_t>{0, 2, 3, 5, 0, 2, 3, 5}},
367 DepthwiseConv2DQuantTestParamU8{2, 1, 32, std::vector<uint8_t>{0, 2, 3, 5, 0, 2, 3, 5, 0, 2, 3,
368 5, 0, 2, 3, 5, 0, 2, 3, 5, 0, 2,
369 3, 5, 0, 2, 3, 5, 0, 2, 3, 5}},
370 DepthwiseConv2DQuantTestParamU8{
371 2, 1, 20, std::vector<uint8_t>{0, 2, 3, 5, 0, 2, 3, 5, 0, 2, 3, 5, 0, 2, 3, 5, 0, 2, 3, 5}},
372 DepthwiseConv2DQuantTestParamU8{
373 2, 1, 16, std::vector<uint8_t>{0, 2, 3, 5, 0, 2, 3, 5, 0, 2, 3, 5, 0, 2, 3, 5}},
374 DepthwiseConv2DQuantTestParamU8{2, 8, 1, std::vector<uint8_t>{0, 3, 5, 8, 0, 3, 5, 8}},
375 DepthwiseConv2DQuantTestParamU8{
376 2, 8, 2, std::vector<uint8_t>{0, 3, 5, 8, 0, 3, 5, 8, 0, 3, 5, 8, 0, 3, 5, 8}},
377 DepthwiseConv2DQuantTestParamU8{
378 2, 16, 1, std::vector<uint8_t>{0, 3, 8, 16, 0, 4, 7, 12, 0, 3, 7, 13, 0, 4, 7, 12}}));
380 CircleBuffer genDepthwiseConv2DQuantU8Model(int stride, int input_depth, int depth_multiplier)
382 assert(1 <= stride && stride <= 2);
383 assert(1 <= input_depth && input_depth <= 16);
384 assert(1 <= depth_multiplier && depth_multiplier <= 32);
386 const int output_depth = input_depth * depth_multiplier;
387 assert(1 <= output_depth && output_depth <= 32);
390 uint32_t ker_buf = cgen.addBuffer(std::vector<uint8_t>{
391 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1,
392 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3,
393 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1,
394 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3,
395 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3});
396 uint32_t bias_buf = cgen.addBuffer(std::vector<int32_t>(output_depth, 0));
397 int in = cgen.addTensor({{1, 2, 2, input_depth}, circle::TensorType_UINT8}, 0.5, 0);
398 int ker = cgen.addTensor({{1, 2, 2, output_depth}, circle::TensorType_UINT8, ker_buf}, 0.5, 0);
399 int bias = cgen.addTensor({{output_depth}, circle::TensorType_INT32, bias_buf}, 0.25, 0);
400 int out = cgen.addTensor({{1, 1, 1, output_depth}, circle::TensorType_UINT8}, 1, 0);
401 cgen.addOperatorDepthwiseConv2D({{in, ker, bias}, {out}}, circle::Padding::Padding_VALID, stride,
402 stride, depth_multiplier, circle::ActivationFunctionType_NONE);
403 cgen.setInputsAndOutputs({in}, {out});
404 return cgen.finish();
407 TEST_P(DepthwiseConv2DQuantTestU8, Test)
409 // Same input is used for all tests but output differs
410 static const std::vector<uint8_t> input64{
411 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 5, 4, 3, 2, 5, 4, 3, 2, 5, 4, 3, 2, 5, 4, 3, 2,
412 2, 4, 6, 8, 2, 4, 6, 8, 2, 4, 6, 8, 2, 4, 6, 8, 2, 3, 5, 8, 8, 5, 3, 2, 1, 2, 3, 4, 5, 4, 3, 2};
414 auto ¶m = GetParam();
415 _context = std::make_unique<GenModelTestContext>(
416 genDepthwiseConv2DQuantU8Model(param.stride, param.input_depth, param.depth_multiplier));
417 std::vector<uint8_t> ref_input(input64.begin(), input64.begin() + param.input_depth * 4);
418 _context->addTestCase(uniformTCD<uint8_t>({ref_input}, {param.ref_output}));
419 _context->setBackends({"acl_cl", "acl_neon", "cpu"});
424 using DepthwiseConv2DQuantTestParamI8 = DepthwiseConv2DQuantTestParam<int8_t>;
425 using DepthwiseConv2DQuantTestI8 = DepthwiseConv2DQuantTest<int8_t>;
427 // Test with different InputDepth and DepthMultiplier. The values are intended to test optimized CPU
429 INSTANTIATE_TEST_SUITE_P(
430 GenModelTest, DepthwiseConv2DQuantTestI8,
433 DepthwiseConv2DQuantTestParamI8{1, 8, 1, std::vector<int8_t>{0, 3, 5, 8, 0, 3, 5, 8}},
434 DepthwiseConv2DQuantTestParamI8{1, 4, 2, std::vector<int8_t>{0, 0, 2, 3, 0, 2, 6, 9}},
435 DepthwiseConv2DQuantTestParamI8{
436 1, 2, 8, std::vector<int8_t>{0, 1, 2, 3, 0, 1, 2, 3, 0, 2, 4, 6, 0, 2, 4, 6}},
437 DepthwiseConv2DQuantTestParamI8{1, 2, 2, std::vector<int8_t>{0, 1, 4, 6}},
438 DepthwiseConv2DQuantTestParamI8{1, 2, 1, std::vector<int8_t>{2, 5}},
439 DepthwiseConv2DQuantTestParamI8{1, 1, 2, std::vector<int8_t>{2, 4}},
440 DepthwiseConv2DQuantTestParamI8{1, 1, 4, std::vector<int8_t>{0, 2, 3, 5}},
441 DepthwiseConv2DQuantTestParamI8{1, 4, 1, std::vector<int8_t>{0, 1, 4, 9}},
442 DepthwiseConv2DQuantTestParamI8{
443 1, 4, 4, std::vector<int8_t>{0, 0, 0, 0, 0, 1, 2, 3, 0, 2, 4, 6, 0, 3, 6, 9}},
444 DepthwiseConv2DQuantTestParamI8{1, 12, 1,
445 std::vector<int8_t>{0, 3, 7, 12, 0, 4, 7, 12, 0, 4, 9, 16}},
447 DepthwiseConv2DQuantTestParamI8{2, 4, 1, std::vector<int8_t>{0, 1, 4, 9}},
448 DepthwiseConv2DQuantTestParamI8{2, 2, 1, std::vector<int8_t>{2, 5}},
449 DepthwiseConv2DQuantTestParamI8{2, 1, 8, std::vector<int8_t>{0, 2, 3, 5, 0, 2, 3, 5}},
450 DepthwiseConv2DQuantTestParamI8{2, 1, 32, std::vector<int8_t>{0, 2, 3, 5, 0, 2, 3, 5, 0, 2, 3,
451 5, 0, 2, 3, 5, 0, 2, 3, 5, 0, 2,
452 3, 5, 0, 2, 3, 5, 0, 2, 3, 5}},
453 DepthwiseConv2DQuantTestParamI8{
454 2, 1, 20, std::vector<int8_t>{0, 2, 3, 5, 0, 2, 3, 5, 0, 2, 3, 5, 0, 2, 3, 5, 0, 2, 3, 5}},
455 DepthwiseConv2DQuantTestParamI8{
456 2, 1, 16, std::vector<int8_t>{0, 2, 3, 5, 0, 2, 3, 5, 0, 2, 3, 5, 0, 2, 3, 5}},
457 DepthwiseConv2DQuantTestParamI8{2, 8, 1, std::vector<int8_t>{0, 3, 5, 8, 0, 3, 5, 8}},
458 DepthwiseConv2DQuantTestParamI8{
459 2, 8, 2, std::vector<int8_t>{0, 3, 5, 8, 0, 3, 5, 8, 0, 3, 5, 8, 0, 3, 5, 8}},
460 DepthwiseConv2DQuantTestParamI8{
461 2, 16, 1, std::vector<int8_t>{0, 3, 8, 16, 0, 4, 7, 12, 0, 3, 7, 13, 0, 4, 7, 12}}));
463 CircleBuffer genDepthwiseConv2DQuantI8Model(int stride, int input_depth, int depth_multiplier)
465 assert(1 <= stride && stride <= 2);
466 assert(1 <= input_depth && input_depth <= 16);
467 assert(1 <= depth_multiplier && depth_multiplier <= 32);
469 const int output_depth = input_depth * depth_multiplier;
470 assert(1 <= output_depth && output_depth <= 32);
473 uint32_t ker_buf = cgen.addBuffer(std::vector<int8_t>{
474 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1,
475 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3,
476 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1,
477 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3,
478 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3});
479 uint32_t bias_buf = cgen.addBuffer(std::vector<int32_t>(output_depth, 0));
480 int in = cgen.addTensor({{1, 2, 2, input_depth}, circle::TensorType_INT8}, 0.5, 0);
481 int ker = cgen.addTensor({{1, 2, 2, output_depth}, circle::TensorType_INT8, ker_buf}, 0.5, 0);
482 int bias = cgen.addTensor({{output_depth}, circle::TensorType_INT32, bias_buf}, 0.25, 0);
483 int out = cgen.addTensor({{1, 1, 1, output_depth}, circle::TensorType_INT8}, 1, 0);
484 cgen.addOperatorDepthwiseConv2D({{in, ker, bias}, {out}}, circle::Padding::Padding_VALID, stride,
485 stride, depth_multiplier, circle::ActivationFunctionType_NONE);
486 cgen.setInputsAndOutputs({in}, {out});
487 return cgen.finish();
490 TEST_P(DepthwiseConv2DQuantTestI8, Test)
492 // Same input is used for all tests but output differs
493 static const std::vector<int8_t> input64{
494 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 5, 4, 3, 2, 5, 4, 3, 2, 5, 4, 3, 2, 5, 4, 3, 2,
495 2, 4, 6, 8, 2, 4, 6, 8, 2, 4, 6, 8, 2, 4, 6, 8, 2, 3, 5, 8, 8, 5, 3, 2, 1, 2, 3, 4, 5, 4, 3, 2};
497 auto ¶m = GetParam();
498 _context = std::make_unique<GenModelTestContext>(
499 genDepthwiseConv2DQuantI8Model(param.stride, param.input_depth, param.depth_multiplier));
500 std::vector<int8_t> ref_input(input64.begin(), input64.begin() + param.input_depth * 4);
501 _context->addTestCase(uniformTCD<int8_t>({ref_input}, {param.ref_output}));
502 _context->setBackends({"acl_cl", "acl_neon", "cpu"});
507 TEST_F(GenModelTest, neg_OneOp_DepthwiseConv2D_InvalidPaddingType)
509 _context = std::make_unique<GenModelTestContext>(genNegTestDepthwiseConv2DModel(
510 static_cast<circle::Padding>(99), 1, 1, 1, circle::ActivationFunctionType_NONE));
511 _context->expectFailModelLoad();
512 _context->setBackends({"acl_cl", "acl_neon", "cpu", "xnnpack"});
517 // TODO add other invalid operation tests like above
519 TEST_F(GenModelTest, neg_OneOp_DepthwiseConv2D_I8_NonZero_ZeroPoints)
522 std::vector<int8_t> weight_data{1, 2, 3, 4, 5, 6, 7, 8};
523 uint32_t weight_buf = cgen.addBuffer(weight_data);
524 std::vector<int32_t> bias_data{0, 2};
525 uint32_t bias_buf = cgen.addBuffer(bias_data);
526 int in = cgen.addTensor({{1, 3, 3, 2}, circle::TensorType::TensorType_INT8}, 0.5, 0);
527 std::vector<float> weight_scales = {0.5, 1};
528 std::vector<int64_t> weight_zeropoints = {0, 10};
529 int weight = cgen.addTensor({{1, 2, 2, 2}, circle::TensorType::TensorType_INT8, weight_buf},
530 weight_scales, weight_zeropoints);
531 int bias = cgen.addTensor({{1, 1, 1, 2}, circle::TensorType::TensorType_INT32, bias_buf});
532 int out = cgen.addTensor({{1, 2, 2, 2}, circle::TensorType::TensorType_FLOAT32}, 1.0, 0);
533 cgen.addOperatorDepthwiseConv2D({{in, weight, bias}, {out}}, circle::Padding_VALID, 1, 1, 2,
534 circle::ActivationFunctionType_NONE);
535 cgen.setInputsAndOutputs({in}, {out});
536 _context = std::make_unique<GenModelTestContext>(cgen.finish());
537 _context->setBackends({"cpu"});
538 _context->expectFailModelLoad();
543 TEST_F(GenModelTest, neg_OneOp_DepthwiseConv2D_I8_Hybrid_PerTensor)
545 // PerTensor Quantized Weight is not supported
547 std::vector<int8_t> weight_data{1, 2, 3};
548 uint32_t weight_buf = cgen.addBuffer(weight_data);
549 std::vector<float> bias_data{0, 2, 4};
550 uint32_t bias_buf = cgen.addBuffer(bias_data);
551 int in = cgen.addTensor({{1, 1, 1, 3}, circle::TensorType::TensorType_FLOAT32});
552 // Hybrid does not support per-tensor.
553 std::vector<float> weight_scales = {0.5};
554 std::vector<int64_t> weight_zeropoints = {0};
555 int weight = cgen.addTensor({{1, 1, 1, 3}, circle::TensorType::TensorType_INT8, weight_buf},
556 weight_scales, weight_zeropoints);
557 int bias = cgen.addTensor({{1, 1, 1, 3}, circle::TensorType::TensorType_FLOAT32, bias_buf});
558 int out = cgen.addTensor({{1, 1, 1, 3}, circle::TensorType::TensorType_FLOAT32});
559 cgen.addOperatorDepthwiseConv2D({{in, weight, bias}, {out}}, circle::Padding_VALID, 1, 1,
560 /* depth_multiplier */ 1, circle::ActivationFunctionType_NONE);
561 cgen.setInputsAndOutputs({in}, {out});
563 _context = std::make_unique<GenModelTestContext>(cgen.finish());
564 _context->expectFailCompile();
565 _context->setBackends({"cpu"});