2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3 * Copyright 2019 The TensorFlow Authors. All Rights Reserved.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include "kernels/TransposeConv.h"
19 #include "kernels/TestUtils.h"
20 #include "luci_interpreter/TestMemoryManager.h"
22 namespace luci_interpreter
29 using namespace testing;
31 template <typename T, typename B>
32 void Check(std::initializer_list<int32_t> output_shape_shape,
33 std::initializer_list<int32_t> weight_shape, std::initializer_list<int32_t> input_shape,
34 std::initializer_list<int32_t> bias_shape, std::initializer_list<int32_t> output_shape,
35 std::initializer_list<int32_t> output_shape_data, std::initializer_list<T> weight_data,
36 std::initializer_list<T> input_data, std::initializer_list<B> bias_data,
37 std::initializer_list<T> output_data, luci::Padding padding, int32_t stride_height,
40 std::unique_ptr<IMemoryManager> memory_manager = std::make_unique<TestMemoryManager>();
42 constexpr DataType element_type = getElementType<T>();
43 Tensor output_shape_tensor =
44 makeInputTensor<DataType::S32>(output_shape_shape, output_shape_data, memory_manager.get());
45 Tensor weight_tensor =
46 makeInputTensor<element_type>(weight_shape, weight_data, memory_manager.get());
47 Tensor input_data_tensor =
48 makeInputTensor<element_type>(input_shape, input_data, memory_manager.get());
50 DataType scratch_data_type = element_type == DataType::S16 ? DataType::S64 : DataType::S32;
51 Tensor scratch_tensor(scratch_data_type, Shape({}), {}, "");
52 Tensor output_tensor = makeOutputTensor(element_type);
54 TransposeConvParams params{};
55 params.padding = padding;
56 params.stride_height = stride_height;
57 params.stride_width = stride_width;
59 if (bias_data.size() != 0)
62 makeInputTensor<getElementType<B>()>(bias_shape, bias_data, memory_manager.get());
63 TransposeConv kernel(&output_shape_tensor, &weight_tensor, &input_data_tensor, &bias_tensor,
64 &output_tensor, &scratch_tensor, params);
66 memory_manager->allocate_memory(output_tensor);
67 memory_manager->allocate_memory(scratch_tensor);
72 TransposeConv kernel(&output_shape_tensor, &weight_tensor, &input_data_tensor, nullptr,
73 &output_tensor, &scratch_tensor, params);
75 memory_manager->allocate_memory(output_tensor);
76 memory_manager->allocate_memory(scratch_tensor);
79 EXPECT_THAT(extractTensorData<T>(output_tensor), ::testing::ElementsAreArray(output_data));
82 TEST(TransposeConvTest, FloatSimple)
85 /*output_shape_shape=*/{4}, /*weight_shape=*/{1, 3, 3, 1}, /*input_shape=*/{1, 4, 4, 1},
86 /*bias_shape=*/{}, /*output_shape=*/{1, 4, 4, 1}, /*output_shape_data=*/{1, 4, 4, 1},
87 /*weight_data=*/{1, 2, 3, 4, 5, 6, 7, 8, 9},
88 /*input_data=*/{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
90 /*output_data=*/{29, 62, 83, 75, 99, 192, 237, 198, 207, 372, 417, 330, 263, 446, 485, 365},
91 /*params.padding=*/luci::Padding::SAME, /*stride_height=*/1, /*stride_width=*/1);
96 TEST(TransposeConvTest, FloatTwoFiltersTest)
99 /*output_shape_shape=*/{4}, /*weight_shape=*/{1, 3, 3, 2}, /*input_shape=*/{1, 4, 4, 2},
100 /*bias_shape=*/{}, /*output_shape=*/{1, 4, 4, 1}, /*output_shape_data=*/{1, 4, 4, 1},
101 /*weight_data=*/{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18},
102 /*input_data=*/{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
103 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32},
106 {184, 412, 568, 528, 678, 1347, 1689, 1434, 1494, 2715, 3057, 2442, 1968, 3352, 3652, 2760},
107 /*params.padding=*/luci::Padding::SAME, /*stride_height=*/1, /*stride_width=*/1);
112 TEST(TransposeConvTest, SimpleBiasTest)
115 /*output_shape_shape=*/{4}, /*weight_shape=*/{2, 3, 3, 1},
116 /*input_shape=*/{1, 2, 2, 1},
117 /*bias_shape=*/{2}, /*output_shape=*/{1, 4, 4, 1}, /*output_shape_data=*/{1, 5, 5, 2},
118 /*weight_data=*/{1, 3, 5, 7, 9, 11, 13, 15, 17, 2, 4, 6, 8, 10, 12, 14, 16, 18},
119 /*input_data=*/{1, 2, 3, 4},
120 /*bias_data=*/{3, 4},
121 /*output_data=*/{4, 6, 6, 8, 10, 14, 9, 12, 13, 16, 10, 12, 12, 14, 28, 32, 21,
122 24, 25, 28, 19, 24, 27, 32, 65, 76, 45, 52, 57, 64, 24, 28, 30, 34,
123 64, 72, 39, 44, 47, 52, 42, 46, 48, 52, 106, 114, 63, 68, 71, 76},
124 /*params.padding=*/luci::Padding::VALID, /*stride_height=*/2, /*stride_width=*/2);
129 TEST(TransposeConvTest, UInt8)
131 std::unique_ptr<IMemoryManager> memory_manager = std::make_unique<TestMemoryManager>();
133 std::vector<float> input_data{1, 2, 3, 4};
134 std::vector<float> filter_data{1, 3, 5, 7, 9, 11, 13, 15, 17, 2, 4, 6, 8, 10, 12, 14, 16, 18};
135 std::vector<float> bias_data{3, 4};
136 std::vector<int32_t> output_shape_data{1, 5, 5, 2};
137 std::vector<float> ref_output_data{
138 4, 6, 6, 8, 10, 14, 9, 12, 13, 16, //
139 10, 12, 12, 14, 28, 32, 21, 24, 25, 28, //
140 19, 24, 27, 32, 65, 76, 45, 52, 57, 64, //
141 24, 28, 30, 34, 64, 72, 39, 44, 47, 52, //
142 42, 46, 48, 52, 106, 114, 63, 68, 71, 76, //
145 // Choose quantization parameters carefully.
146 auto input_quant = quantizationParams<uint8_t>(-8.0, 7.9375); // s = 1 / 16, zp = 128
147 auto filter_quant = quantizationParams<uint8_t>(-24.0, 39.75); // s = 1 / 4, zp = 96
148 auto output_quant = quantizationParams<uint8_t>(-64.0, 191.0); // s = 1, zp = 64
150 Tensor input_tensor = makeInputTensor<DataType::U8>(
151 {1, 2, 2, 1}, input_quant.first, input_quant.second, input_data, memory_manager.get());
152 Tensor filter_tensor = makeInputTensor<DataType::U8>(
153 {2, 3, 3, 1}, filter_quant.first, filter_quant.second, filter_data, memory_manager.get());
154 Tensor bias_tensor = makeInputTensor<DataType::S32>({2}, input_quant.first * filter_quant.first,
155 0, bias_data, memory_manager.get());
156 Tensor output_shape_tensor =
157 makeInputTensor<DataType::S32>({4}, output_shape_data, memory_manager.get());
158 Tensor output_tensor = makeOutputTensor(DataType::U8, output_quant.first, output_quant.second);
160 DataType scratch_data_type =
161 input_tensor.element_type() == DataType::S16 ? DataType::S64 : DataType::S32;
162 Tensor scratch_tensor(scratch_data_type, Shape({}), {}, "");
164 TransposeConvParams params{};
165 params.padding = Padding::VALID;
166 params.stride_height = 2;
167 params.stride_width = 2;
169 TransposeConv kernel(&output_shape_tensor, &filter_tensor, &input_tensor, &bias_tensor,
170 &output_tensor, &scratch_tensor, params);
172 memory_manager->allocate_memory(output_tensor);
173 memory_manager->allocate_memory(scratch_tensor);
176 EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shape_data));
177 EXPECT_THAT(dequantizeTensorData(output_tensor), FloatArrayNear(ref_output_data));
180 TEST(TransposeConvTest, UInt8_CWQ)
182 std::unique_ptr<IMemoryManager> memory_manager = std::make_unique<TestMemoryManager>();
184 const int32_t output_channels = 2;
185 std::vector<float> input_data{1, 2, 3, 4};
186 std::vector<float> filter_data{1, 3, 5, 7, 9, 11, 13, 15, 17, 2, 4, 6, 8, 10, 12, 14, 16, 18};
187 std::vector<float> bias_data{3, 4};
188 std::vector<int32_t> output_shape_data{1, 5, 5, 2};
189 std::vector<float> ref_output_data{
190 4, 6, 6, 8, 10, 14, 9, 12, 13, 16, //
191 10, 12, 12, 14, 28, 32, 21, 24, 25, 28, //
192 19, 24, 27, 32, 65, 76, 45, 52, 57, 64, //
193 24, 28, 30, 34, 64, 72, 39, 44, 47, 52, //
194 42, 46, 48, 52, 106, 114, 63, 68, 71, 76, //
197 // Choose quantization parameters carefully.
198 auto input_quant = quantizationParams<uint8_t>(-8.0, 7.9375); // s = 1 / 16, zp = 128
199 auto output_quant = quantizationParams<uint8_t>(-64.0, 191.0); // s = 1, zp = 64
201 std::vector<std::pair<float, int32_t>> filter_quant_params;
202 filter_quant_params.push_back(quantizationParams<uint8_t>(0, 17));
203 filter_quant_params.push_back(quantizationParams<uint8_t>(0, 18));
205 std::vector<float> filter_scales;
206 std::vector<int32_t> filter_zerops;
207 for (auto iter : filter_quant_params)
209 filter_scales.push_back(iter.first);
210 filter_zerops.push_back(iter.second);
213 std::vector<float> bias_scales;
214 for (int i = 0; i < output_channels; ++i)
215 bias_scales.push_back(filter_quant_params[i].first * input_quant.first);
216 std::vector<int32_t> zerop(output_channels, 0);
218 Tensor input_tensor = makeInputTensor<DataType::U8>(
219 {1, 2, 2, 1}, input_quant.first, input_quant.second, input_data, memory_manager.get());
220 Tensor filter_tensor = makeInputTensor<DataType::U8>(
221 {output_channels, 3, 3, 1}, filter_scales, filter_zerops, 0, filter_data, memory_manager.get());
222 Tensor bias_tensor = makeInputTensor<DataType::S32>({output_channels}, bias_scales, zerop, 0,
223 bias_data, memory_manager.get());
224 Tensor output_shape_tensor =
225 makeInputTensor<DataType::S32>({4}, output_shape_data, memory_manager.get());
226 Tensor output_tensor = makeOutputTensor(DataType::U8, output_quant.first, output_quant.second);
228 DataType scratch_data_type =
229 input_tensor.element_type() == DataType::S16 ? DataType::S64 : DataType::S32;
230 Tensor scratch_tensor(scratch_data_type, Shape({}), {}, "");
232 TransposeConvParams params{};
233 params.padding = Padding::VALID;
234 params.stride_height = 2;
235 params.stride_width = 2;
237 TransposeConv kernel(&output_shape_tensor, &filter_tensor, &input_tensor, &bias_tensor,
238 &output_tensor, &scratch_tensor, params);
240 memory_manager->allocate_memory(output_tensor);
241 memory_manager->allocate_memory(scratch_tensor);
244 EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shape_data));
245 EXPECT_THAT(dequantizeTensorData(output_tensor), FloatArrayNear(ref_output_data));
248 TEST(TransposeConvTest, SInt16)
250 std::unique_ptr<IMemoryManager> memory_manager = std::make_unique<TestMemoryManager>();
252 std::vector<float> input_data{1, 2, 3, 4};
253 std::vector<float> filter_data{1, 3, 5, 7, 9, 11, 13, 15, 17, 2, 4, 6, 8, 10, 12, 14, 16, 18};
254 std::vector<float> bias_data{3, 4};
255 std::vector<int32_t> output_shape_data{1, 5, 5, 2};
256 std::vector<float> ref_output_data{
257 4, 6, 6, 8, 10, 14, 9, 12, 13, 16, //
258 10, 12, 12, 14, 28, 32, 21, 24, 25, 28, //
259 19, 24, 27, 32, 65, 76, 45, 52, 57, 64, //
260 24, 28, 30, 34, 64, 72, 39, 44, 47, 52, //
261 42, 46, 48, 52, 106, 114, 63, 68, 71, 76, //
264 Tensor input_tensor =
265 makeInputTensor<DataType::S16>({1, 2, 2, 1}, 0.25, 0, input_data, memory_manager.get());
266 Tensor filter_tensor =
267 makeInputTensor<DataType::S16>({2, 3, 3, 1}, 0.2, 0, filter_data, memory_manager.get());
269 makeInputTensor<DataType::S64>({2}, 0.25 * 0.2, 0, bias_data, memory_manager.get());
270 Tensor output_shape_tensor =
271 makeInputTensor<DataType::S32>({4}, output_shape_data, memory_manager.get());
272 Tensor output_tensor = makeOutputTensor(DataType::S16, 0.5, 0);
274 DataType scratch_data_type =
275 input_tensor.element_type() == DataType::S16 ? DataType::S64 : DataType::S32;
276 Tensor scratch_tensor(scratch_data_type, Shape({}), {}, "");
278 TransposeConvParams params{};
279 params.padding = Padding::VALID;
280 params.stride_height = 2;
281 params.stride_width = 2;
283 TransposeConv kernel(&output_shape_tensor, &filter_tensor, &input_tensor, &bias_tensor,
284 &output_tensor, &scratch_tensor, params);
286 memory_manager->allocate_memory(output_tensor);
287 memory_manager->allocate_memory(scratch_tensor);
290 EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shape_data));
291 EXPECT_THAT(dequantizeTensorData(output_tensor), FloatArrayNear(ref_output_data));
294 TEST(TransposeConvTest, SInt16_CWQ_weights)
296 std::unique_ptr<IMemoryManager> memory_manager = std::make_unique<TestMemoryManager>();
298 const int output_channels = 2;
299 const Shape input_shape{1, 2, 2, 1};
300 const Shape filter_shape{output_channels, 3, 3, 1};
301 const Shape bias_shape{output_channels};
302 std::vector<int32_t> output_shape_data{1, 5, 5, output_channels};
304 std::vector<float> input_data{1, 2, 3, 4};
305 std::vector<float> filter_data{1, 3, 5, 7, 9, 11, 13, 15, 17, 2, 4, 6, 8, 10, 12, 14, 16, 18};
306 std::vector<float> bias_data{3, 4};
308 std::vector<float> ref_output_data{
309 4, 6, 6, 8, 10, 14, 9, 12, 13, 16, //
310 10, 12, 12, 14, 28, 32, 21, 24, 25, 28, //
311 19, 24, 27, 32, 65, 76, 45, 52, 57, 64, //
312 24, 28, 30, 34, 64, 72, 39, 44, 47, 52, //
313 42, 46, 48, 52, 106, 114, 63, 68, 71, 76, //
316 const float input_scale = 0.25;
317 const float output_scale = 0.5;
318 const std::vector<float> filter_scales{0.2f, 0.5f};
319 std::vector<float> bias_scales{filter_scales[0] * input_scale, filter_scales[1] * input_scale};
320 const std::vector<int32_t> zerop(2, 0);
322 Tensor input_tensor =
323 makeInputTensor<DataType::S16>(input_shape, input_scale, 0, input_data, memory_manager.get());
324 Tensor filter_tensor = makeInputTensor<DataType::S16>(filter_shape, filter_scales, zerop, 0,
325 filter_data, memory_manager.get());
326 Tensor bias_tensor = makeInputTensor<DataType::S64>(bias_shape, bias_scales, zerop, 0, bias_data,
327 memory_manager.get());
328 Tensor output_shape_tensor =
329 makeInputTensor<DataType::S32>({4}, output_shape_data, memory_manager.get());
330 Tensor output_tensor = makeOutputTensor(DataType::S16, output_scale, 0);
332 DataType scratch_data_type =
333 input_tensor.element_type() == DataType::S16 ? DataType::S64 : DataType::S32;
334 Tensor scratch_tensor(scratch_data_type, Shape({}), {}, "");
336 TransposeConvParams params{};
337 params.padding = Padding::VALID;
338 params.stride_height = 2;
339 params.stride_width = 2;
341 TransposeConv kernel(&output_shape_tensor, &filter_tensor, &input_tensor, &bias_tensor,
342 &output_tensor, &scratch_tensor, params);
344 memory_manager->allocate_memory(output_tensor);
345 memory_manager->allocate_memory(scratch_tensor);
348 EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shape_data));
349 EXPECT_THAT(dequantizeTensorData(output_tensor), FloatArrayNear(ref_output_data));
353 } // namespace kernels
354 } // namespace luci_interpreter