2 // Copyright (c) 2018 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
17 #include <gtest/gtest.h>
18 #include "api/CPP/memory.hpp"
19 #include <api/CPP/input_layout.hpp>
20 #include "api/CPP/lookup_table.hpp"
21 #include "api/CPP/arg_max_min.hpp"
22 #include <api/CPP/topology.hpp>
23 #include <api/CPP/network.hpp>
24 #include <api/CPP/engine.hpp>
25 #include "test_utils/test_utils.h"
27 using namespace cldnn;
29 using namespace tests;
32 TEST(lookup_table_base, base) {
34 static const int32_t x_size = 2, y_size = 2, feature_num = 3, batch_num = 2;
35 const auto& engine = get_test_engine();
37 auto input = memory::allocate(engine, { data_types::f32, format::bfyx, { batch_num, feature_num, x_size , y_size } });
38 auto input2 = memory::allocate(engine, { data_types::f32, format::bfyx, {2, 1, 1, 1} });
40 topology.add(input_layout("input", input.get_layout()));
41 topology.add(input_layout("input2", input2.get_layout()));
42 topology.add(lookup_table("table", "input", "input2"));
43 vector<float> input_vec = {
45 /*b0f0*/0.1f, -0.1f, 0.9f, 1.5f,
46 /*b0f1*/0.2f, 0.2f, -10.f, 5.2f,
47 /*b0f2*/0.2f, 0.2f, -10.f, 5.2f,
49 /*b1f0*/3.f, 0.5f, 7.f, 10.f,
50 /*b1f1*/4.f, 0.5f, 8.f, 8.2f,
51 /*b1f2*/0.2f, 0.2f, -10.f, 5.2f
53 vector<float> input2_vec = { 11, 3 };
54 set_values(input, input_vec);
55 set_values(input2, input2_vec);
57 network network(engine, topology);
59 network.set_input_data("input", input);
60 network.set_input_data("input2", input2);
61 auto outputs = network.execute();
63 EXPECT_EQ(outputs.size(), size_t(1));
65 auto output = outputs.at("table").get_memory();
66 auto output_ptr = output.pointer<float>();;
67 float out_buffer[batch_num];
68 for (uint32_t i = 0; i < batch_num; i++)
70 out_buffer[i] = get_value<float>(output_ptr, i);
72 int size = x_size * y_size * feature_num;
74 for (int i = 0; i < batch_num; i++) {
75 value = out_buffer[i];
76 for (int j = 0; j < size; j++)
78 EXPECT_LE(input_vec[i*size + j], value);
83 TEST(lookup_table_num, base) {
85 static const int32_t x_size = 2, y_size = 2, feature_num = 3, batch_num = 2, number_of_values = 3;
86 const auto& engine = get_test_engine();
88 auto input = memory::allocate(engine, { data_types::f32, format::bfyx,{ batch_num, feature_num, x_size , y_size } });
89 auto input2 = memory::allocate(engine, { data_types::f32, format::bfyx,{ 2, 1, 3, 1 } });
91 topology.add(input_layout("input", input.get_layout()));
92 topology.add(input_layout("input2", input2.get_layout()));
93 topology.add(lookup_table("table", "input", "input2"));
94 vector<float> input_vec = {
96 /*b0f0*/0.1f, -0.1f, 0.9f, 1.5f,
97 /*b0f1*/0.2f, 0.2f, -10.f, 5.2f,
98 /*b0f2*/0.2f, 0.2f, -10.f, 5.2f,
100 /*b1f0*/3.f, 0.5f, 7.f, 10.f,
101 /*b1f1*/4.f, 0.5f, 8.f, 8.2f,
102 /*b1f2*/0.2f, 0.2f, -10.f, 5.2f
104 vector<float> input2_vec = { 11, 7, 3, 3, 7, 6};
105 set_values(input, input_vec);
106 set_values(input2, input2_vec);
108 network network(engine, topology);
110 network.set_input_data("input", input);
111 network.set_input_data("input2", input2);
112 auto outputs = network.execute();
114 EXPECT_EQ(outputs.size(), size_t(1));
116 auto output = outputs.at("table").get_memory();
117 auto output_ptr = output.pointer<float>();;
118 float out_buffer[batch_num*number_of_values];
119 for (uint32_t i = 0; i < batch_num * number_of_values; i++)
121 out_buffer[i] = get_value<float>(output_ptr, i);
123 int size = x_size * y_size * feature_num;
125 for (int i = 0; i < batch_num; i++) {
130 for (j = 0; j < number_of_values; j++) {
131 if (number_of_values - 1 == j) {
132 if (input_vec[i*size + (int)input2_vec[i*number_of_values + j]] != input_vec[i*size + (int)input2_vec[i*number_of_values + j - 1]]) {
136 amount += same_values * (j - same_values + 1);
138 else if (input_vec[i*size + (int)input2_vec[i*number_of_values + j]] != input_vec[i*size + (int)input2_vec[i*number_of_values + j + 1]]) {
139 if (same_values != j + 1) {
140 amount += same_values * (j - same_values + 1);
147 for (int j = 0; j < number_of_values; j++)
149 value = out_buffer[i*number_of_values + j];
150 for (int k = 0; k < size; k++)
152 if (input_vec[i*size + k] > value)
156 EXPECT_EQ(count, amount);
160 TEST(lookup_table_with_arg_max, base) {
162 static const int32_t x_size = 2, y_size = 2, feature_num = 3, batch_num = 2;
163 const auto& engine = get_test_engine();
165 auto input = memory::allocate(engine, { data_types::f32, format::yxfb,{ batch_num, feature_num, x_size , y_size } });
167 topology.add(input_layout("input", input.get_layout()));
168 topology.add(arg_max_min("arg_max", "input", arg_max_min::max));
169 topology.add(lookup_table("table", "input", "arg_max"));
170 vector<float> input_vec = {
171 //y0x0 y0x1 y1x0 y1x1
172 /*b0f0*/0.1f, -0.1f, 0.9f, 1.5f,
173 /*b0f1*/0.2f, 0.2f, -10.f, 5.2f,
174 /*b0f2*/0.2f, 0.2f, -10.f, 5.2f,
176 /*b1f0*/3.f, 0.5f, 7.f, 10.f,
177 /*b1f1*/4.f, 0.5f, 8.f, 8.2f,
178 /*b1f2*/0.2f, 0.2f, -10.f, 5.2f
180 set_values(input, input_vec);
182 network network(engine, topology);
184 network.set_input_data("input", input);
185 auto outputs = network.execute();
187 EXPECT_EQ(outputs.size(), size_t(1));
189 auto output = outputs.at("table").get_memory();
190 auto output_ptr = output.pointer<float>();;
191 float out_buffer[batch_num];
192 for (uint32_t i = 0; i < batch_num; i++)
194 out_buffer[i] = get_value<float>(output_ptr, i);
196 int size = x_size * y_size * feature_num;
198 for (int i = 0; i < batch_num; i++) {
199 value = out_buffer[i];
200 for (int j = 0; j < size; j++)
202 EXPECT_LE(input_vec[i*size + j], value);
207 TEST(lookup_table_axis, base) {
209 static const int32_t x_size = 2, y_size = 2, feature_num = 3, batch_num = 2, number_of_values = 2;
210 const auto& engine = get_test_engine();
212 auto input = memory::allocate(engine, { data_types::f32, format::bfyx,{ batch_num, feature_num, x_size , y_size } });
213 auto input2 = memory::allocate(engine, { data_types::f32, format::bfyx,{ 2, 3, 2, 2 } });
215 topology.add(input_layout("input", input.get_layout()));
216 topology.add(input_layout("input2", input2.get_layout()));
217 topology.add(lookup_table("table", "input", "input2", lookup_table::batch));
218 vector<float> input_vec = {
219 //y0x0 y0x1 y1x0 y1x1
220 /*b0f0*/0.1f, -0.1f, 0.9f, 1.5f,
221 /*b0f1*/0.2f, 0.2f, -10.f, 5.2f,
222 /*b0f2*/0.2f, 0.2f, -10.f, 5.2f,
224 /*b1f0*/3.f, 0.5f, 7.f, 10.f,
225 /*b1f1*/4.f, 0.5f, 8.f, 8.2f,
226 /*b1f2*/0.2f, 0.2f, -10.f, 5.2f
228 vector<float> input2_vec = { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1};
229 set_values(input, input_vec);
230 set_values(input2, input2_vec);
232 network network(engine, topology);
234 network.set_input_data("input", input);
235 network.set_input_data("input2", input2);
236 auto outputs = network.execute();
238 EXPECT_EQ(outputs.size(), size_t(1));
240 auto output = outputs.at("table").get_memory();
241 auto output_ptr = output.pointer<float>();;
242 const int out_size = y_size * feature_num * x_size * number_of_values;
243 float out_buffer[out_size];
244 for (uint32_t i = 0; i < out_size; i++)
246 out_buffer[i] = get_value<float>(output_ptr, i);
248 for (int i = 0; i < out_size; i++)
250 EXPECT_EQ(out_buffer[i], (i%2==0 ? input_vec[i/2] : input_vec[(i/2+12)]));