2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3 * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include "kernels/Div.h"
19 #include "kernels/TestUtils.h"
20 #include "luci_interpreter/TestMemoryManager.h"
22 namespace luci_interpreter
29 using namespace testing;
31 class DivTest : public ::testing::Test
34 void SetUp() override { _memory_manager = std::make_unique<TestMemoryManager>(); }
36 std::unique_ptr<IMemoryManager> _memory_manager;
39 float GetTolerance(float min, float max)
41 const float kQuantizedStep = (max - min) / 255.0f;
42 const float kQuantizedTolerance = 2.0f * kQuantizedStep + kQuantizedStep * kQuantizedStep;
43 return kQuantizedTolerance;
46 TEST_F(DivTest, Float)
48 Shape base_shape = {2, 3, 1, 1};
50 std::vector<int32_t> output_shape = {2, 3, 1, 1};
52 std::vector<float> input1_data{0.3f, 2.3f, 0.9f, 0.5f, 0.8f, 1.1f};
53 std::vector<float> input2_data{0.2f, 1.6f, 0.5f, 0.4f, 1.6f, 0.4f};
54 std::vector<float> test_outputs{1.5f, 1.4375f, 1.8f, 1.25f, 0.5f, 2.75f};
56 Tensor input1_tensor =
57 makeInputTensor<DataType::FLOAT32>(base_shape, input1_data, _memory_manager.get());
58 Tensor input2_tensor =
59 makeInputTensor<DataType::FLOAT32>(base_shape, input2_data, _memory_manager.get());
61 Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
64 params.activation = Activation::RELU;
66 Div kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
68 _memory_manager->allocate_memory(output_tensor);
71 EXPECT_THAT(extractTensorData<float>(output_tensor), FloatArrayNear(test_outputs, 0.0001f));
72 EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shape));
75 TEST_F(DivTest, FloatBroadcast)
77 Shape input1_shape = {1, 3};
78 Shape input2_shape = {3, 1};
80 std::vector<float> input1_data{-0.3f, 2.3f, 0.9f};
81 std::vector<float> input2_data{0.2f, 1.6f, 0.5f};
82 std::vector<float> test_outputs{0.f, 11.5f, 4.5f, 0.f, 1.4375f, 0.5625f, 0.f, 4.6f, 1.8f};
84 Tensor input1_tensor =
85 makeInputTensor<DataType::FLOAT32>(input1_shape, input1_data, _memory_manager.get());
86 Tensor input2_tensor =
87 makeInputTensor<DataType::FLOAT32>(input2_shape, input2_data, _memory_manager.get());
89 Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
92 params.activation = Activation::RELU;
94 Div kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
96 _memory_manager->allocate_memory(output_tensor);
99 EXPECT_THAT(extractTensorData<float>(output_tensor), FloatArrayNear(test_outputs, 0.0001f));
102 TEST_F(DivTest, Uint8)
104 Shape base_shape = {1, 2, 2, 1};
106 std::vector<int32_t> output_shape = {1, 2, 2, 1};
108 std::vector<float> input1_data = {-0.8f, -0.2f, 0.3f, 0.7f};
109 std::vector<float> input2_data = {-0.8f, 0.4f, 0.8f, 1.0f};
110 std::vector<float> test_outputs{1.0f, 0.f, 0.375f, 0.7f};
112 const float kQuantizedTolerance = GetTolerance(-1.0, 1.0);
114 std::pair<float, int32_t> quant_param = quantizationParams<uint8_t>(-1.f, 1.f);
116 Tensor input1_tensor = makeInputTensor<DataType::U8>(
117 base_shape, quant_param.first, quant_param.second, input1_data, _memory_manager.get());
118 Tensor input2_tensor = makeInputTensor<DataType::U8>(
119 base_shape, quant_param.first, quant_param.second, input2_data, _memory_manager.get());
121 Tensor output_tensor =
122 makeOutputTensor(getElementType<uint8_t>(), quant_param.first, quant_param.second);
125 params.activation = Activation::RELU;
127 Div kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
129 _memory_manager->allocate_memory(output_tensor);
132 EXPECT_THAT(dequantizeTensorData(output_tensor),
133 FloatArrayNear(test_outputs, kQuantizedTolerance));
134 EXPECT_THAT(extractTensorShape(output_tensor), ::testing::ElementsAreArray(output_shape));
137 template <loco::DataType DType> void checkInteger(luci_interpreter::IMemoryManager *memory_manager)
139 using dtype = typename loco::DataTypeImpl<DType>::Type;
140 Shape base_shape = {2, 3, 1, 2};
141 std::vector<Shape> test_shapes{{1, 1, 3, 2}, {1, 3, 1, 2}, {2, 1, 3, 1}, {2, 3, 1, 1}};
143 std::vector<std::vector<dtype>> test_outputs = {{5, 6, 2, 0, 10, 3, //
144 10, 0, 4, 5, 20, 0, //
146 2, 0, 1, 10, 5, 0, //
148 18, 20, 7, 0, 37, 10},
149 {5, 6, 4, 5, 0, 0, 2, 0, 1, 0, 37, 10},
150 {5, 7, 4, 6, 2, 3, 10, 0, 8, 0, 4, 0,
151 0, 0, 0, 0, 0, 0, 0, 10, 5, 0, 1, 0,
152 0, 0, 5, 9, 1, 1, 0, 0, 37, 50, 7, 10},
153 {5, 7, 8, 0, 0, 0, 0, 10, 5, 9, 7, 10}};
154 std::vector<dtype> input1_data{20, 30, 40, -17, -4, -7, 11, -31, 10, 19, 75, 100};
155 std::vector<dtype> input2_data{4, 5, 10, -3, 2, 10};
156 for (size_t i = 0; i < test_shapes.size(); ++i)
158 Tensor input1_tensor = makeInputTensor<DType>(base_shape, input1_data, memory_manager);
159 Tensor input2_tensor = makeInputTensor<DType>(test_shapes[i], input2_data, memory_manager);
160 Tensor output_tensor = makeOutputTensor(DType);
163 params.activation = Activation::RELU;
165 Div kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
167 memory_manager->allocate_memory(output_tensor);
170 EXPECT_THAT(extractTensorData<dtype>(output_tensor), test_outputs[i])
171 << "With shape number " << i;
175 TEST_F(DivTest, SInt64)
177 checkInteger<loco::DataType::S64>(_memory_manager.get());
181 TEST_F(DivTest, SInt32)
183 checkInteger<loco::DataType::S32>(_memory_manager.get());
187 TEST_F(DivTest, Input_Output_Type_NEG)
189 Tensor input1_tensor = makeInputTensor<DataType::FLOAT32>({1}, {1.f}, _memory_manager.get());
190 Tensor input2_tensor = makeInputTensor<DataType::S32>({1}, {2}, _memory_manager.get());
191 Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
194 params.activation = Activation::RELU;
196 Div kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
197 EXPECT_ANY_THROW(kernel.configure());
200 TEST_F(DivTest, Invalid_Input_Type_NEG)
202 Tensor input1_tensor = makeInputTensor<DataType::U64>({1}, {1}, _memory_manager.get());
203 Tensor input2_tensor = makeInputTensor<DataType::U64>({1}, {2}, _memory_manager.get());
204 Tensor output_tensor = makeOutputTensor(DataType::U64);
207 params.activation = Activation::RELU;
209 Div kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
211 _memory_manager->allocate_memory(output_tensor);
212 EXPECT_ANY_THROW(kernel.execute());
215 TEST_F(DivTest, Invalid_Output_Type_NEG)
217 Tensor input1_tensor = makeInputTensor<DataType::S32>({1}, {1}, _memory_manager.get());
218 Tensor input2_tensor = makeInputTensor<DataType::S32>({1}, {2}, _memory_manager.get());
219 Tensor output_tensor = makeOutputTensor(DataType::S64);
222 params.activation = Activation::RELU;
224 Div kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
225 EXPECT_ANY_THROW(kernel.configure());
229 } // namespace kernels
230 } // namespace luci_interpreter