2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3 * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include "kernels/Add.h"
19 #include "kernels/TestUtils.h"
21 namespace luci_interpreter
28 using namespace testing;
30 // for quantized Add, the error shouldn't exceed step
31 float GetTolerance(float min, float max)
33 float kQuantizedStep = (max - min) / 255.0;
34 return kQuantizedStep;
39 std::initializer_list<int32_t> base_shape = {2, 3, 1, 2};
40 std::initializer_list<float> base_data = {-0.3f, 2.3f, 0.9f, 0.5f, 0.8f, -1.1f,
41 1.2f, 2.8f, -1.6f, 0.0f, 0.7f, -2.2f};
42 std::initializer_list<int32_t> test_shapes[] = {
43 {1, 1, 3, 2}, {1, 3, 1, 2}, {2, 1, 3, 1}, {2, 3, 1, 1}};
44 std::initializer_list<float> test_data = {0.2f, 0.3f, -0.4f, 0.5f, 1.0f, 0.9f};
45 std::initializer_list<int32_t> output_shapes[] = {
46 {2, 3, 3, 2}, {2, 3, 1, 2}, {2, 3, 3, 2}, {2, 3, 1, 2}};
47 std::vector<std::vector<float>> output_data = {
48 {-0.1f, 2.6f, -0.7f, 2.8f, 0.7f, 3.0f, 1.1f, 0.8f, 0.5f, 1.0f, 1.9f, 1.4f,
49 1.0f, -0.8f, 0.4f, -0.6f, 1.8f, -0.2f, 1.4f, 3.0f, 0.8f, 3.0f, 2.2f, 3.0f,
50 -1.4f, 0.3f, -2.0f, 0.5f, -0.6f, 0.9f, 0.9f, -1.9f, 0.3f, -1.7f, 1.7f, -1.3f},
51 {-0.1f, 2.6f, 0.5f, 1.0f, 1.8f, -0.2f, 1.4f, 3.0f, -2.0f, 0.5f, 1.7f, -1.3f},
52 {-0.1f, 2.5f, 0.0f, 2.6f, -0.7f, 1.9f, 1.1f, 0.7f, 1.2f, 0.8f, 0.5f, 0.1f,
53 1.0f, -0.9f, 1.1f, -0.8f, 0.4f, -1.5f, 1.7f, 3.0f, 2.2f, 3.0f, 2.1f, 3.0f,
54 -1.1f, 0.5f, -0.6f, 1.0f, -0.7f, 0.9f, 1.2f, -1.7f, 1.7f, -1.2f, 1.6f, -1.3f},
55 {-0.1f, 2.5f, 1.2f, 0.8f, 0.4f, -1.5f, 1.7f, 3.0f, -0.6f, 1.0f, 1.6f, -1.3f}};
56 float kQuantizedTolerance = GetTolerance(-3.f, 3.f);
57 std::pair<float, int32_t> quant_param = quantizationParams<uint8_t>(-3.f, 3.f);
58 for (int i = 0; i < output_data.size(); i++)
61 getElementType<uint8_t>(), base_shape, {{quant_param.first}, {quant_param.second}}, ""};
63 getElementType<uint8_t>(), test_shapes[i], {{quant_param.first}, {quant_param.second}}, ""};
64 std::vector<uint8_t> quantized_input1_value =
65 quantize<uint8_t>(base_data, quant_param.first, quant_param.second);
66 std::vector<uint8_t> quantized_input2_value =
67 quantize<uint8_t>(test_data, quant_param.first, quant_param.second);
68 input1_tensor.writeData(quantized_input1_value.data(),
69 quantized_input1_value.size() * sizeof(uint8_t));
70 input2_tensor.writeData(quantized_input2_value.data(),
71 quantized_input2_value.size() * sizeof(uint8_t));
72 Tensor output_tensor =
73 makeOutputTensor(getElementType<uint8_t>(), quant_param.first, quant_param.second);
76 params.activation = Activation::NONE;
78 Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
82 EXPECT_THAT(dequantize<uint8_t>(extractTensorData<uint8_t>(output_tensor),
83 output_tensor.scale(), output_tensor.zero_point()),
84 ElementsAreArray(ArrayFloatNear(output_data[i], kQuantizedTolerance)));
85 EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shapes[i]));
87 // Re-run with exchanged inputs.
88 for (int i = 0; i < output_data.size(); i++)
91 getElementType<uint8_t>(), test_shapes[i], {{quant_param.first}, {quant_param.second}}, ""};
93 getElementType<uint8_t>(), base_shape, {{quant_param.first}, {quant_param.second}}, ""};
94 std::vector<uint8_t> quantized_input1_value =
95 quantize<uint8_t>(test_data, quant_param.first, quant_param.second);
96 std::vector<uint8_t> quantized_input2_value =
97 quantize<uint8_t>(base_data, quant_param.first, quant_param.second);
98 input1_tensor.writeData(quantized_input1_value.data(),
99 quantized_input1_value.size() * sizeof(uint8_t));
100 input2_tensor.writeData(quantized_input2_value.data(),
101 quantized_input2_value.size() * sizeof(uint8_t));
102 Tensor output_tensor =
103 makeOutputTensor(getElementType<uint8_t>(), quant_param.first, quant_param.second);
106 params.activation = Activation::NONE;
108 Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
112 EXPECT_THAT(dequantize<uint8_t>(extractTensorData<uint8_t>(output_tensor),
113 output_tensor.scale(), output_tensor.zero_point()),
114 ElementsAreArray(ArrayFloatNear(output_data[i], kQuantizedTolerance)));
115 EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shapes[i]));
121 Shape base_shape = {2, 3, 1, 2};
122 std::vector<Shape> test_shapes{{1, 1, 3, 2}, {1, 3, 1, 2}, {2, 1, 3, 1}, {2, 3, 1, 1}};
123 std::vector<std::vector<float>> test_outputs = {
124 {0.0f, 2.6f, 0.0f, 2.8f, 0.7f, 3.2f, 1.1f, 0.8f, 0.5f, 1.0f, 1.9f, 1.4f,
125 1.0f, 0.0f, 0.4f, 0.0f, 1.8f, 0.0f, 1.4f, 3.1f, 0.8f, 3.3f, 2.2f, 3.7f,
126 0.0f, 0.3f, 0.0f, 0.5f, 0.0f, 0.9f, 0.9f, 0.0f, 0.3f, 0.0f, 1.7f, 0.0f},
127 {0.0f, 2.6f, 0.5f, 1.0f, 1.8f, 0.0f, 1.4f, 3.1f, 0.0f, 0.5f, 1.7f, 0.0f},
128 {0.0f, 2.5f, 0.0f, 2.6f, 0.0f, 1.9f, 1.1f, 0.7f, 1.2f, 0.8f, 0.5f, 0.1f,
129 1.0f, 0.0f, 1.1f, 0.0f, 0.4f, 0.0f, 1.7f, 3.3f, 2.2f, 3.8f, 2.1f, 3.7f,
130 0.0f, 0.5f, 0.0f, 1.0f, 0.0f, 0.9f, 1.2f, 0.0f, 1.7f, 0.0f, 1.6f, 0.0f},
131 {0.0f, 2.5f, 1.2f, 0.8f, 0.4f, 0.0f, 1.7f, 3.3f, 0.0f, 1.0f, 1.6f, 0.0f}};
132 std::vector<float> input1_data{-0.3f, 2.3f, 0.9f, 0.5f, 0.8f, -1.1f,
133 1.2f, 2.8f, -1.6f, 0.0f, 0.7f, -2.2f};
134 std::vector<float> input2_data{0.2f, 0.3f, -0.4f, 0.5f, 1.0f, 0.9f};
135 for (size_t i = 0; i < test_shapes.size(); ++i)
137 Tensor input1_tensor = makeInputTensor<DataType::FLOAT32>(base_shape, input1_data);
138 Tensor input2_tensor = makeInputTensor<DataType::FLOAT32>(test_shapes[i], input2_data);
139 Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
142 params.activation = Activation::RELU;
144 Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
148 EXPECT_THAT(extractTensorData<float>(output_tensor),
149 ::testing::ElementsAreArray(ArrayFloatNear(test_outputs[i], 0.0001f)))
150 << "With shape number " << i;
152 // Re-run with exchanged inputs.
153 for (size_t i = 0; i < test_shapes.size(); ++i)
155 Tensor input1_tensor = makeInputTensor<DataType::FLOAT32>(test_shapes[i], input2_data);
156 Tensor input2_tensor = makeInputTensor<DataType::FLOAT32>(base_shape, input1_data);
157 Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
160 params.activation = Activation::RELU;
162 Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
166 EXPECT_THAT(extractTensorData<float>(output_tensor),
167 ::testing::ElementsAreArray(ArrayFloatNear(test_outputs[i], 0.0001f)))
168 << "With shape number " << i;
173 } // namespace kernels
174 } // namespace luci_interpreter