Imported Upstream version 1.8.0
[platform/core/ml/nnfw.git] / compiler / luci-interpreter / src / kernels / Add.test.cpp
1 /*
2  * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3  * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  *    http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17
18 #include "kernels/Add.h"
19 #include "kernels/TestUtils.h"
20
21 namespace luci_interpreter
22 {
23 namespace kernels
24 {
25 namespace
26 {
27
28 using namespace testing;
29
30 // for quantized Add, the error shouldn't exceed step
31 float GetTolerance(float min, float max)
32 {
33   float kQuantizedStep = (max - min) / 255.0;
34   return kQuantizedStep;
35 }
36
37 TEST(AddTest, Uint8)
38 {
39   std::initializer_list<int32_t> base_shape = {2, 3, 1, 2};
40   std::initializer_list<float> base_data = {-0.3f, 2.3f, 0.9f,  0.5f, 0.8f, -1.1f,
41                                             1.2f,  2.8f, -1.6f, 0.0f, 0.7f, -2.2f};
42   std::initializer_list<int32_t> test_shapes[] = {
43       {1, 1, 3, 2}, {1, 3, 1, 2}, {2, 1, 3, 1}, {2, 3, 1, 1}};
44   std::initializer_list<float> test_data = {0.2f, 0.3f, -0.4f, 0.5f, 1.0f, 0.9f};
45   std::initializer_list<int32_t> output_shapes[] = {
46       {2, 3, 3, 2}, {2, 3, 1, 2}, {2, 3, 3, 2}, {2, 3, 1, 2}};
47   std::vector<std::vector<float>> output_data = {
48       {-0.1f, 2.6f,  -0.7f, 2.8f,  0.7f,  3.0f,  1.1f, 0.8f,  0.5f, 1.0f,  1.9f, 1.4f,
49        1.0f,  -0.8f, 0.4f,  -0.6f, 1.8f,  -0.2f, 1.4f, 3.0f,  0.8f, 3.0f,  2.2f, 3.0f,
50        -1.4f, 0.3f,  -2.0f, 0.5f,  -0.6f, 0.9f,  0.9f, -1.9f, 0.3f, -1.7f, 1.7f, -1.3f},
51       {-0.1f, 2.6f, 0.5f, 1.0f, 1.8f, -0.2f, 1.4f, 3.0f, -2.0f, 0.5f, 1.7f, -1.3f},
52       {-0.1f, 2.5f,  0.0f,  2.6f,  -0.7f, 1.9f,  1.1f, 0.7f,  1.2f, 0.8f,  0.5f, 0.1f,
53        1.0f,  -0.9f, 1.1f,  -0.8f, 0.4f,  -1.5f, 1.7f, 3.0f,  2.2f, 3.0f,  2.1f, 3.0f,
54        -1.1f, 0.5f,  -0.6f, 1.0f,  -0.7f, 0.9f,  1.2f, -1.7f, 1.7f, -1.2f, 1.6f, -1.3f},
55       {-0.1f, 2.5f, 1.2f, 0.8f, 0.4f, -1.5f, 1.7f, 3.0f, -0.6f, 1.0f, 1.6f, -1.3f}};
56   float kQuantizedTolerance = GetTolerance(-3.f, 3.f);
57   std::pair<float, int32_t> quant_param = quantizationParams<uint8_t>(-3.f, 3.f);
58   for (int i = 0; i < output_data.size(); i++)
59   {
60     Tensor input1_tensor{
61         getElementType<uint8_t>(), base_shape, {{quant_param.first}, {quant_param.second}}, ""};
62     Tensor input2_tensor{
63         getElementType<uint8_t>(), test_shapes[i], {{quant_param.first}, {quant_param.second}}, ""};
64     std::vector<uint8_t> quantized_input1_value =
65         quantize<uint8_t>(base_data, quant_param.first, quant_param.second);
66     std::vector<uint8_t> quantized_input2_value =
67         quantize<uint8_t>(test_data, quant_param.first, quant_param.second);
68     input1_tensor.writeData(quantized_input1_value.data(),
69                             quantized_input1_value.size() * sizeof(uint8_t));
70     input2_tensor.writeData(quantized_input2_value.data(),
71                             quantized_input2_value.size() * sizeof(uint8_t));
72     Tensor output_tensor =
73         makeOutputTensor(getElementType<uint8_t>(), quant_param.first, quant_param.second);
74
75     AddParams params{};
76     params.activation = Activation::NONE;
77
78     Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
79     kernel.configure();
80     kernel.execute();
81
82     EXPECT_THAT(dequantize<uint8_t>(extractTensorData<uint8_t>(output_tensor),
83                                     output_tensor.scale(), output_tensor.zero_point()),
84                 ElementsAreArray(ArrayFloatNear(output_data[i], kQuantizedTolerance)));
85     EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shapes[i]));
86   }
87   // Re-run with exchanged inputs.
88   for (int i = 0; i < output_data.size(); i++)
89   {
90     Tensor input1_tensor{
91         getElementType<uint8_t>(), test_shapes[i], {{quant_param.first}, {quant_param.second}}, ""};
92     Tensor input2_tensor{
93         getElementType<uint8_t>(), base_shape, {{quant_param.first}, {quant_param.second}}, ""};
94     std::vector<uint8_t> quantized_input1_value =
95         quantize<uint8_t>(test_data, quant_param.first, quant_param.second);
96     std::vector<uint8_t> quantized_input2_value =
97         quantize<uint8_t>(base_data, quant_param.first, quant_param.second);
98     input1_tensor.writeData(quantized_input1_value.data(),
99                             quantized_input1_value.size() * sizeof(uint8_t));
100     input2_tensor.writeData(quantized_input2_value.data(),
101                             quantized_input2_value.size() * sizeof(uint8_t));
102     Tensor output_tensor =
103         makeOutputTensor(getElementType<uint8_t>(), quant_param.first, quant_param.second);
104
105     AddParams params{};
106     params.activation = Activation::NONE;
107
108     Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
109     kernel.configure();
110     kernel.execute();
111
112     EXPECT_THAT(dequantize<uint8_t>(extractTensorData<uint8_t>(output_tensor),
113                                     output_tensor.scale(), output_tensor.zero_point()),
114                 ElementsAreArray(ArrayFloatNear(output_data[i], kQuantizedTolerance)));
115     EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shapes[i]));
116   }
117 }
118
119 TEST(AddTest, Float)
120 {
121   Shape base_shape = {2, 3, 1, 2};
122   std::vector<Shape> test_shapes{{1, 1, 3, 2}, {1, 3, 1, 2}, {2, 1, 3, 1}, {2, 3, 1, 1}};
123   std::vector<std::vector<float>> test_outputs = {
124       {0.0f, 2.6f, 0.0f, 2.8f, 0.7f, 3.2f, 1.1f, 0.8f, 0.5f, 1.0f, 1.9f, 1.4f,
125        1.0f, 0.0f, 0.4f, 0.0f, 1.8f, 0.0f, 1.4f, 3.1f, 0.8f, 3.3f, 2.2f, 3.7f,
126        0.0f, 0.3f, 0.0f, 0.5f, 0.0f, 0.9f, 0.9f, 0.0f, 0.3f, 0.0f, 1.7f, 0.0f},
127       {0.0f, 2.6f, 0.5f, 1.0f, 1.8f, 0.0f, 1.4f, 3.1f, 0.0f, 0.5f, 1.7f, 0.0f},
128       {0.0f, 2.5f, 0.0f, 2.6f, 0.0f, 1.9f, 1.1f, 0.7f, 1.2f, 0.8f, 0.5f, 0.1f,
129        1.0f, 0.0f, 1.1f, 0.0f, 0.4f, 0.0f, 1.7f, 3.3f, 2.2f, 3.8f, 2.1f, 3.7f,
130        0.0f, 0.5f, 0.0f, 1.0f, 0.0f, 0.9f, 1.2f, 0.0f, 1.7f, 0.0f, 1.6f, 0.0f},
131       {0.0f, 2.5f, 1.2f, 0.8f, 0.4f, 0.0f, 1.7f, 3.3f, 0.0f, 1.0f, 1.6f, 0.0f}};
132   std::vector<float> input1_data{-0.3f, 2.3f, 0.9f,  0.5f, 0.8f, -1.1f,
133                                  1.2f,  2.8f, -1.6f, 0.0f, 0.7f, -2.2f};
134   std::vector<float> input2_data{0.2f, 0.3f, -0.4f, 0.5f, 1.0f, 0.9f};
135   for (size_t i = 0; i < test_shapes.size(); ++i)
136   {
137     Tensor input1_tensor = makeInputTensor<DataType::FLOAT32>(base_shape, input1_data);
138     Tensor input2_tensor = makeInputTensor<DataType::FLOAT32>(test_shapes[i], input2_data);
139     Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
140
141     AddParams params{};
142     params.activation = Activation::RELU;
143
144     Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
145     kernel.configure();
146     kernel.execute();
147
148     EXPECT_THAT(extractTensorData<float>(output_tensor),
149                 ::testing::ElementsAreArray(ArrayFloatNear(test_outputs[i], 0.0001f)))
150         << "With shape number " << i;
151   }
152   // Re-run with exchanged inputs.
153   for (size_t i = 0; i < test_shapes.size(); ++i)
154   {
155     Tensor input1_tensor = makeInputTensor<DataType::FLOAT32>(test_shapes[i], input2_data);
156     Tensor input2_tensor = makeInputTensor<DataType::FLOAT32>(base_shape, input1_data);
157     Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
158
159     AddParams params{};
160     params.activation = Activation::RELU;
161
162     Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
163     kernel.configure();
164     kernel.execute();
165
166     EXPECT_THAT(extractTensorData<float>(output_tensor),
167                 ::testing::ElementsAreArray(ArrayFloatNear(test_outputs[i], 0.0001f)))
168         << "With shape number " << i;
169   }
170 }
171
172 TEST(AddTest, Input_Output_Type_NEG)
173 {
174   Tensor input1_tensor = makeInputTensor<DataType::FLOAT32>({1}, {1.f});
175   Tensor input2_tensor = makeInputTensor<DataType::S32>({1}, {2});
176   Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
177
178   AddParams params{};
179   params.activation = Activation::RELU;
180
181   Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
182   EXPECT_ANY_THROW(kernel.configure());
183 }
184
185 TEST(AddTest, Invalid_Input_Type_NEG)
186 {
187   Tensor input1_tensor = makeInputTensor<DataType::S64>({1}, {1});
188   Tensor input2_tensor = makeInputTensor<DataType::S64>({1}, {2});
189   Tensor output_tensor = makeOutputTensor(DataType::S64);
190
191   AddParams params{};
192   params.activation = Activation::RELU;
193
194   Add kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
195   kernel.configure();
196   EXPECT_ANY_THROW(kernel.execute());
197 }
198
199 } // namespace
200 } // namespace kernels
201 } // namespace luci_interpreter