Imported Upstream version 1.18.0
[platform/core/ml/nnfw.git] / compiler / luci-interpreter / src / kernels / Mul.test.cpp
1 /*
2  * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3  * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  *    http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17
18 #include "kernels/Mul.h"
19 #include "kernels/TestUtils.h"
20 #include "luci_interpreter/TestMemoryManager.h"
21
22 namespace luci_interpreter
23 {
24 namespace kernels
25 {
26 namespace
27 {
28
29 using namespace testing;
30
31 class MulTest : public ::testing::Test
32 {
33 protected:
34   void SetUp() override { _memory_manager = std::make_unique<TestMemoryManager>(); }
35
36   std::unique_ptr<IMemoryManager> _memory_manager;
37 };
38
39 TEST_F(MulTest, Float)
40 {
41   Shape base_shape = {2, 3, 1, 2};
42   std::vector<Shape> test_shapes{{1, 1, 3, 2}, {1, 3, 1, 2}, {2, 1, 3, 1}, {2, 3, 1, 1}};
43   std::vector<std::vector<float>> test_outputs = {
44     {0.00f, 0.69f, 0.12f, 1.15f, 0.00f, 2.07f, 0.18f, 0.15f, 0.00f, 0.25f, 0.90f, 0.45f,
45      0.16f, 0.00f, 0.00f, 0.00f, 0.80f, 0.00f, 0.24f, 0.84f, 0.00f, 1.40f, 1.20f, 2.52f,
46      0.00f, 0.00f, 0.64f, 0.00f, 0.00f, 0.00f, 0.14f, 0.00f, 0.00f, 0.00f, 0.70f, 0.00f},
47     {0.00f, 0.69f, 0.00f, 0.25f, 0.80f, 0.00f, 0.24f, 0.84f, 0.64f, 0.00f, 0.70f, 0.00f},
48     {0.00f, 0.46f, 0.00f, 0.69f, 0.12f, 0.00f, 0.18f, 0.10f, 0.27f, 0.15f, 0.00f, 0.00f,
49      0.16f, 0.00f, 0.24f, 0.00f, 0.00f, 0.44f, 0.60f, 1.40f, 1.20f, 2.80f, 1.08f, 2.52f,
50      0.00f, 0.00f, 0.00f, 0.00f, 0.00f, 0.00f, 0.35f, 0.00f, 0.70f, 0.00f, 0.63f, 0.00f},
51     {0.00f, 0.46f, 0.27f, 0.15f, 0.00f, 0.44f, 0.60f, 1.40f, 0.00f, 0.00f, 0.63f, 0.00f}};
52   std::vector<float> input1_data{-0.3f, 2.3f, 0.9f,  0.5f, 0.8f, -1.1f,
53                                  1.2f,  2.8f, -1.6f, 0.0f, 0.7f, -2.2f};
54   std::vector<float> input2_data{0.2f, 0.3f, -0.4f, 0.5f, 1.0f, 0.9f};
55   for (size_t i = 0; i < test_shapes.size(); ++i)
56   {
57     Tensor input1_tensor =
58       makeInputTensor<DataType::FLOAT32>(base_shape, input1_data, _memory_manager.get());
59     Tensor input2_tensor =
60       makeInputTensor<DataType::FLOAT32>(test_shapes[i], input2_data, _memory_manager.get());
61     Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
62
63     MulParams params{};
64     params.activation = Activation::RELU;
65
66     Mul kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
67     kernel.configure();
68     _memory_manager->allocate_memory(output_tensor);
69     kernel.execute();
70
71     EXPECT_THAT(extractTensorData<float>(output_tensor), FloatArrayNear(test_outputs[i], 0.0001f))
72       << "With shape number " << i;
73   }
74   // Re-run with exchanged inputs.
75   for (size_t i = 0; i < test_shapes.size(); ++i)
76   {
77     Tensor input1_tensor =
78       makeInputTensor<DataType::FLOAT32>(test_shapes[i], input2_data, _memory_manager.get());
79     Tensor input2_tensor =
80       makeInputTensor<DataType::FLOAT32>(base_shape, input1_data, _memory_manager.get());
81     Tensor output_tensor = makeOutputTensor(DataType::FLOAT32);
82
83     MulParams params{};
84     params.activation = Activation::RELU;
85
86     Mul kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
87     kernel.configure();
88     _memory_manager->allocate_memory(output_tensor);
89     kernel.execute();
90
91     EXPECT_THAT(extractTensorData<float>(output_tensor), FloatArrayNear(test_outputs[i], 0.0001f))
92       << "With shape number " << i;
93   }
94 }
95
96 TEST_F(MulTest, SInt16)
97 {
98   Shape base_shape = {2, 3, 1, 2};
99   std::vector<Shape> test_shapes{{1, 1, 3, 2}, {1, 3, 1, 2}, {2, 1, 3, 1}, {2, 3, 1, 1}};
100   std::vector<std::vector<int32_t>> ref_output_shapes{
101     {2, 3, 3, 2}, {2, 3, 1, 2}, {2, 3, 3, 2}, {2, 3, 1, 2}};
102
103   std::vector<float> input1_data{-0.3f, 2.3f, 0.9f,  0.5f, 0.8f, -1.1f,
104                                  1.2f,  2.8f, -1.6f, 0.0f, 0.7f, -2.2f};
105   std::vector<float> input2_data{0.2f, 0.3f, -0.4f, 0.5f, 1.0f, 0.9f};
106   std::vector<std::vector<float>> ref_outputs = {
107     {0.00f, 0.69f, 0.12f, 1.15f, 0.00f, 2.07f, 0.18f, 0.15f, 0.00f, 0.25f, 0.90f, 0.45f,
108      0.16f, 0.00f, 0.00f, 0.00f, 0.80f, 0.00f, 0.24f, 0.84f, 0.00f, 1.40f, 1.20f, 2.52f,
109      0.00f, 0.00f, 0.64f, 0.00f, 0.00f, 0.00f, 0.14f, 0.00f, 0.00f, 0.00f, 0.70f, 0.00f},
110     {0.00f, 0.69f, 0.00f, 0.25f, 0.80f, 0.00f, 0.24f, 0.84f, 0.64f, 0.00f, 0.70f, 0.00f},
111     {0.00f, 0.46f, 0.00f, 0.69f, 0.12f, 0.00f, 0.18f, 0.10f, 0.27f, 0.15f, 0.00f, 0.00f,
112      0.16f, 0.00f, 0.24f, 0.00f, 0.00f, 0.44f, 0.60f, 1.40f, 1.20f, 2.80f, 1.08f, 2.52f,
113      0.00f, 0.00f, 0.00f, 0.00f, 0.00f, 0.00f, 0.35f, 0.00f, 0.70f, 0.00f, 0.63f, 0.00f},
114     {0.00f, 0.46f, 0.27f, 0.15f, 0.00f, 0.44f, 0.60f, 1.40f, 0.00f, 0.00f, 0.63f, 0.00f}};
115   for (size_t i = 0; i < test_shapes.size(); ++i)
116   {
117     Tensor input1_tensor = makeInputTensor<DataType::S16>(base_shape, 3.0 / 32767, 0, input1_data,
118                                                           _memory_manager.get());
119     Tensor input2_tensor = makeInputTensor<DataType::S16>(test_shapes[i], 1.0 / 32767, 0,
120                                                           input2_data, _memory_manager.get());
121     Tensor output_tensor = makeOutputTensor(DataType::S16, 4.0 / 32767, 0);
122     const float tolerance = output_tensor.scale() * 2;
123
124     MulParams params{};
125     params.activation = Activation::RELU;
126
127     Mul kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
128     kernel.configure();
129     _memory_manager->allocate_memory(output_tensor);
130     kernel.execute();
131
132     EXPECT_THAT(extractTensorShape(output_tensor),
133                 ::testing::ElementsAreArray(ref_output_shapes[i]))
134       << "With shape number " << i;
135     EXPECT_THAT(dequantizeTensorData(output_tensor), FloatArrayNear(ref_outputs[i], tolerance))
136       << "With shape number " << i;
137   }
138   // Re-run with exchanged inputs and different scales.
139   for (size_t i = 0; i < test_shapes.size(); ++i)
140   {
141     Tensor input1_tensor = makeInputTensor<DataType::S16>(test_shapes[i], 2.0 / 32767, 0,
142                                                           input2_data, _memory_manager.get());
143     Tensor input2_tensor = makeInputTensor<DataType::S16>(base_shape, 4.0 / 32767, 0, input1_data,
144                                                           _memory_manager.get());
145     Tensor output_tensor = makeOutputTensor(DataType::S16, 3.0 / 32767, 0);
146     const float tolerance = output_tensor.scale() * 2;
147
148     MulParams params{};
149     params.activation = Activation::RELU;
150
151     Mul kernel(&input1_tensor, &input2_tensor, &output_tensor, params);
152     kernel.configure();
153     _memory_manager->allocate_memory(output_tensor);
154     kernel.execute();
155
156     EXPECT_THAT(extractTensorShape(output_tensor),
157                 ::testing::ElementsAreArray(ref_output_shapes[i]))
158       << "With shape number " << i;
159     EXPECT_THAT(dequantizeTensorData(output_tensor), FloatArrayNear(ref_outputs[i], tolerance))
160       << "With shape number " << i;
161   }
162 }
163
164 } // namespace
165 } // namespace kernels
166 } // namespace luci_interpreter