Publishing 2019 R1 content
[platform/upstream/dldt.git] / inference-engine / thirdparty / clDNN / tests / test_cases / activation_simple_gpu_test.cpp
1 /*
2 // Copyright (c) 2016 Intel Corporation
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //      http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 */
16
17 ///////////////////////////////////////////////////////////////////////////////////////////////////
18
19 #include <cmath>
20 #include <gtest/gtest.h>
21 #include <algorithm>
22 #include "api/CPP/memory.hpp"
23 #include <api/CPP/input_layout.hpp>
24 #include "api/CPP/activation.hpp"
25 #include <api/CPP/topology.hpp>
26 #include <api/CPP/network.hpp>
27 #include <api/CPP/engine.hpp>
28 #include <api/CPP/data.hpp>
29 #include "test_utils/test_utils.h"
30 #include "test_utils/float16.h"
31 #include "api/CPP/reorder.hpp"
32
33 using namespace cldnn;
34 using namespace tests;
35
36
37 TEST(activation_f32_fw_gpu, not_basic_yxfb) {
38     //  Input:
39     //  1 0 -3  4  5
40     //  0  2  3  4 -6
41     //  3 -3  3  0  1
42     //  1  1  1 -1  0
43     //
44     //  Output:
45     //  0, 1, 0, 0, 0,
46     //  1, 0, 0, 0, 0,
47     //  0, 0, 0, 1, 0,
48     //  0, 0, 0, 0, 1
49
50     const auto& engine = get_test_engine();
51
52     auto input = memory::allocate(engine, { data_types::f32, format::yxfb, { 1, 1, 5, 4 } });
53     set_values(input,
54     { 1.0f, 0.0f, -3.0f, 4.0f, 5.0f,
55       0.0f, 2.0f, 3.0f, 4.0f, -6.0f,
56       3.0f, -3.0f, 3.0f, 0.0f, 1.0f,
57       1.0f, 1.0f, 1.0f, -1.0f, 0.0f });
58     VF<float> output_vec = {
59         0.0f, 1.0f, 0.0f, 0.0f, 0.0f,
60         1.0f, 0.0f, 0.0f, 0.0f, 0.0f,
61         0.0f, 0.0f, 0.0f, 1.0f, 0.0f,
62         0.0f, 0.0f, 0.0f, 0.0f, 1.0f };
63
64     topology topology(
65         input_layout("input", input.get_layout()),
66         activation("not", "input", activation_not));
67     network network(engine, topology);
68     network.set_input_data("input", input);
69     auto outputs = network.execute();
70     EXPECT_EQ(outputs.size(), size_t(1));
71     EXPECT_EQ(outputs.begin()->first, "not");
72
73     auto output_memory = outputs.at("not").get_memory();
74     auto output_layout = output_memory.get_layout();
75     auto output_ptr = output_memory.pointer<float>();
76
77     int y_size = output_layout.size.spatial[1];
78     int x_size = output_layout.size.spatial[0];
79     int f_size = output_layout.size.feature[0];
80     int b_size = output_layout.size.batch[0];
81     EXPECT_EQ(output_layout.format, format::yxfb);
82     EXPECT_EQ(y_size, 4);
83     EXPECT_EQ(x_size, 5);
84     EXPECT_EQ(f_size, 1);
85     EXPECT_EQ(b_size, 1);
86
87     for (size_t i = 0; i < output_vec.size(); ++i) {
88         EXPECT_FLOAT_EQ(output_vec[i], output_ptr[i]);
89     }
90 }
91
92 TEST(activation_f32_fw_gpu, relu_basic_yxfb) {
93     //  Input:
94     //  1 -2 -3  4  5
95     //  2  2  3  4 -6
96     //  3 -3  3  5  1
97     //  1  1  1 -1  1
98     //
99     //  Slope: 0.5
100     //
101     //  Output:
102     //  1   -1   -1.5  4    5
103     //  2    2    3    4   -3
104     //  3   -1.5  3    5    1
105     //  1    1    1   -0.5  1
106
107     const auto& engine = get_test_engine();
108
109     auto input = memory::allocate(engine, { data_types::f32, format::yxfb, { 1, 1, 5, 4 } });
110     set_values(input,
111     { 1.0f, -2.0f, -3.0f, 4.0f, 5.0f,
112       2.0f, 2.0f, 3.0f, 4.0f, -6.0f,
113       3.0f, -3.0f, 3.0f, 5.0f, 1.0f,
114       1.0f, 1.0f, 1.0f, -1.0f, 1.0f });
115     VF<float> output_vec = {
116         1.0f, -1.0f, -1.5f, 4.0f, 5.0f,
117         2.0f, 2.0f, 3.0f, 4.0f, -3.0f,
118         3.0f, -1.5f, 3.0f, 5.0f, 1.0f,
119         1.0f, 1.0f, 1.0f, -0.5f, 1.0f };
120
121     topology topology(
122         input_layout("input", input.get_layout()),
123         activation("relu", "input", activation_relu_negative_slope, { 0.5f, 0.f }, { { 0, 0, 0, 0 }, 0 }));
124     network network(engine, topology);
125     network.set_input_data("input", input);
126     auto outputs = network.execute();
127     EXPECT_EQ(outputs.size(), size_t(1));
128     EXPECT_EQ(outputs.begin()->first, "relu");
129
130     auto output_memory = outputs.at("relu").get_memory();
131     auto output_layout = output_memory.get_layout();
132     auto output_ptr = output_memory.pointer<float>();
133
134     int y_size = output_layout.size.spatial[1];
135     int x_size = output_layout.size.spatial[0];
136     int f_size = output_layout.size.feature[0];
137     int b_size = output_layout.size.batch[0];
138     EXPECT_EQ(output_layout.format, format::yxfb);
139     EXPECT_EQ(y_size, 4);
140     EXPECT_EQ(x_size, 5);
141     EXPECT_EQ(f_size, 1);
142     EXPECT_EQ(b_size, 1);
143
144     for (size_t i = 0; i < output_vec.size(); ++i) {
145         EXPECT_FLOAT_EQ(output_vec[i], output_ptr[i]);
146     }
147 }
148
149 TEST(activation_f32_fw_gpu, basic_yxfb_all_functions) 
150 {
151     //  Input:
152     //  1 -2 -3  4  5
153     //  2  2  3  4 -6
154     //  3 -3  3  5  1
155     //  1  1  1 -1  1
156     //
157     //  a: 0.5, b: 2.5
158     //
159
160     const auto& engine = get_test_engine();
161
162     auto input = memory::allocate(engine, { data_types::f32, format::yxfb,{ 1, 1, 5, 4 } });
163     auto input_params = memory::allocate(engine, { data_types::f32, format::yxfb,{ 1, 1, 2, 1 } });
164     set_values(input,
165     { 1.0f, -2.0f, -3.0f, 4.0f, 5.0f,
166         2.0f, 2.0f, 3.0f, 4.0f, -6.0f,
167         3.0f, -3.0f, 3.0f, 5.0f, 1.0f,
168         1.0f, 1.0f, 1.0f, -1.0f, 1.0f });
169
170     std::vector<cldnn_activation_func> funcs = {
171         activation_none,
172         activation_logistic,
173         activation_hyperbolic_tan,
174         activation_relu,
175         activation_relu_negative_slope,
176         activation_clamp,
177         activation_softrelu,
178         activation_abs,
179         activation_linear,
180         activation_square,
181         activation_sqrt,
182         activation_elu,
183         activation_sin,
184         activation_sinh,
185         activation_cos,
186         activation_cosh,
187         activation_exp,
188         activation_not,
189         activation_log2,
190     };
191
192     cldnn_activation_additional_params params = { 0.5f, 2.5f };
193     set_values(input_params, { params.a, params.b });
194
195     for (uint8_t i = 0 ; i < 2 ; i++)
196     {
197         for (auto func : funcs)
198         {
199             topology topology(input_layout("input", input.get_layout()));
200
201             if (i == 0)
202             {
203                 topology.add(activation("activation", "input", func, params));
204             }
205             else
206             {
207                 topology.add(data("input_params", input_params));
208                 topology.add(activation("activation", "input", "input_params", func));
209             }
210
211             network network(engine, topology);
212             network.set_input_data("input", input);
213             auto outputs = network.execute();
214             EXPECT_EQ(outputs.size(), size_t(1));
215             EXPECT_EQ(outputs.begin()->first, "activation");
216
217             auto output_memory = outputs.at("activation").get_memory();
218             auto output_layout = output_memory.get_layout();
219             auto output_ptr = output_memory.pointer<float>();
220             auto input_ptr = input.pointer<float>();
221
222             int y_size = output_layout.size.spatial[1];
223             int x_size = output_layout.size.spatial[0];
224             int f_size = output_layout.size.feature[0];
225             int b_size = output_layout.size.batch[0];
226             EXPECT_EQ(output_layout.format, format::yxfb);
227             EXPECT_EQ(y_size, 4);
228             EXPECT_EQ(x_size, 5);
229             EXPECT_EQ(f_size, 1);
230             EXPECT_EQ(b_size, 1);
231
232             for (size_t i = 0; i < output_layout.get_linear_size(); ++i)
233             {
234                 switch (func)
235                 {
236                 case activation_none:
237                     EXPECT_FLOAT_EQ(input_ptr[i], output_ptr[i]);
238                     break;
239                 case activation_logistic:
240                     EXPECT_FLOAT_EQ(1.f / (1.f + std::exp((float)-input_ptr[i])), output_ptr[i]);
241                     break;
242                 case activation_hyperbolic_tan:
243                     EXPECT_FLOAT_EQ(std::tanh((float)input_ptr[i]), output_ptr[i]);
244                     break;
245                 case activation_relu:
246                     EXPECT_FLOAT_EQ(std::fmax((float)input_ptr[i], 0.f), output_ptr[i]);
247                     break;
248                 case activation_clamp:
249                     EXPECT_FLOAT_EQ(std::fmin((float)std::fmax((float)input_ptr[i], params.a), params.b), output_ptr[i]);
250                     break;
251                 case activation_softrelu:
252                     EXPECT_FLOAT_EQ(std::log(1.f + std::exp((float)input_ptr[i])), output_ptr[i]);
253                     break;
254                 case activation_abs:
255                     EXPECT_FLOAT_EQ(std::fabs(input_ptr[i]), output_ptr[i]);
256                     break;
257                 case activation_linear:
258                     EXPECT_FLOAT_EQ((params.a*input_ptr[i] + params.b), output_ptr[i]);
259                     break;
260                 case activation_square:
261                     EXPECT_FLOAT_EQ((input_ptr[i] * input_ptr[i]), output_ptr[i]);
262                     break;
263                 case activation_sqrt:
264                     if (input_ptr[i] >= 0)
265                     {
266                         EXPECT_FLOAT_EQ(std::sqrt((float)input_ptr[i]), output_ptr[i]);
267                     }
268                     break;
269                 case activation_elu:
270                     EXPECT_FLOAT_EQ(std::fmax((float)input_ptr[i], 0.0f) +
271                                     params.a*(std::exp(std::fmin((float)input_ptr[i], 0.0f)) - 1), output_ptr[i]);
272                     break;
273                 case activation_sin:
274                     EXPECT_FLOAT_EQ(std::sin((float)input_ptr[i]), output_ptr[i]);
275                     break;
276                 case activation_sinh:
277                     EXPECT_FLOAT_EQ(std::sinh((float)input_ptr[i]), output_ptr[i]);
278                     break;
279                 case activation_cos:
280                     EXPECT_FLOAT_EQ(std::cos((float)input_ptr[i]), output_ptr[i]);
281                     break;
282                 case activation_cosh:
283                     EXPECT_FLOAT_EQ(std::cosh((float)input_ptr[i]), output_ptr[i]);
284                     break;
285                 case activation_exp:
286                     EXPECT_FLOAT_EQ(std::exp((float)input_ptr[i]), output_ptr[i]);
287                     break;
288                 case activation_not:
289                     EXPECT_FLOAT_EQ((float)(!input_ptr[i]), output_ptr[i]);
290                     break;
291                                 case activation_log2:
292                                         if (input_ptr[i] > 0) //logarithm exist only for positive real values
293                     {
294                                                 EXPECT_FLOAT_EQ(std::log2((float)input_ptr[i]), output_ptr[i]);
295                     }
296                                         break;
297                 default:
298                     break;
299                 }
300             }
301         }
302     }
303 }
304
305 TEST(activation_f32_fw_gpu, basic_yxfb_asin_acos_log)
306 {
307     const auto& engine = get_test_engine();
308
309     auto input = memory::allocate(engine, { data_types::f32, format::yxfb,{ 1, 1, 2, 4 } });
310     set_values(input, { 0.12f, 0.56f, 0.45f, 0.789f, 0.546f, 0.999f, 0.7899f, 0.6677f});
311
312     std::vector<cldnn_activation_func> funcs = {
313         activation_asin,
314         activation_acos,
315         activation_log,
316                 activation_log2
317     };
318
319     for (auto func : funcs)
320     {
321         topology topology(input_layout("input", input.get_layout()));
322         topology.add(activation("activation", "input", func));
323
324         network network(engine, topology);
325         network.set_input_data("input", input);
326         auto outputs = network.execute();
327         EXPECT_EQ(outputs.size(), size_t(1));
328         EXPECT_EQ(outputs.begin()->first, "activation");
329
330         auto output_memory = outputs.at("activation").get_memory();
331         auto output_layout = output_memory.get_layout();
332         auto output_ptr = output_memory.pointer<float>();
333         auto input_ptr = input.pointer<float>();
334
335         int y_size = output_layout.size.spatial[1];
336         int x_size = output_layout.size.spatial[0];
337         int f_size = output_layout.size.feature[0];
338         int b_size = output_layout.size.batch[0];
339         EXPECT_EQ(output_layout.format, format::yxfb);
340         EXPECT_EQ(y_size, 4);
341         EXPECT_EQ(x_size, 2);
342         EXPECT_EQ(f_size, 1);
343         EXPECT_EQ(b_size, 1);
344
345         for (size_t i = 0; i < output_layout.get_linear_size(); ++i)
346         {
347             switch (func)
348             {
349             case activation_asin:
350                 EXPECT_FLOAT_EQ(std::asin((float)input_ptr[i]), output_ptr[i]);
351                 break;
352             case activation_acos:
353                 EXPECT_FLOAT_EQ(std::acos((float)input_ptr[i]), output_ptr[i]);
354                 break;
355             case activation_log:
356                 EXPECT_FLOAT_EQ(std::log((float)input_ptr[i]), output_ptr[i]);
357                 break;
358                         case activation_log2:
359                                 EXPECT_FLOAT_EQ(std::log2((float)input_ptr[i]), output_ptr[i]);
360                                 break;
361             default:
362                 break;
363             }
364         }
365     }
366 }
367
368 TEST(activation_f32_fw_gpu, relu_basic_input_padding_yxfb) {
369     //  Input Padding: 2x1 (yx format) out of the reorder layer
370     //  The expected size is the same as in put - the output padding is set to 0, 0
371     //
372     //  Input:
373     //  z  z  z  z  z  z  z
374     //  z  z  z  z  z  z  z
375     //  z  1 -2 -3  4  5  z
376     //  z  2  2  3  4 -6  z
377     //  z  3 -3  3  5  1  z
378     //  z  1  1  1 -1  1  z
379     //  z  z  z  z  z  z  z
380     //  z  z  z  z  z  z  z
381     //
382     //  Slope: 0.5
383     //
384     //  Output:
385     //  1   -1   -1.5  4    5
386     //  2    2    3    4   -3
387     //  3   -1.5  3    5    1
388     //  1    1    1   -0.5  1
389
390     const auto& engine = get_test_engine();
391
392     auto input = memory::allocate(engine, { data_types::f32, format::yxfb, { 1, 1, 5, 4 } });
393
394     set_values(input,
395     { 1.0f, -2.0f, -3.0f, 4.0f, 5.0f,
396         2.0f, 2.0f, 3.0f, 4.0f, -6.0f,
397         3.0f, -3.0f, 3.0f, 5.0f, 1.0f,
398         1.0f, 1.0f, 1.0f, -1.0f, 1.0f });
399     VF<float> output_vec = {
400          1.0f, -1.0f, -1.5f, 4.0f, 5.0f,
401          2.0f, 2.0f, 3.0f, 4.0f, -3.0f,
402          3.0f, -1.5f, 3.0f, 5.0f, 1.0f,
403          1.0f, 1.0f, 1.0f, -0.5f, 1.0f};
404
405     topology topology(
406         input_layout("input", input.get_layout()),
407         reorder("reorder", "input", input.get_layout().with_padding({ { 0, 0, 2, 1 }, 0 })),
408         activation("relu", "reorder", activation_relu_negative_slope, { 0.5f, 0.f }, { { 0, 0, 0, 0 }, 0 }));
409     network network(engine, topology);
410     network.set_input_data("input", input);
411     auto outputs = network.execute();
412     EXPECT_EQ(outputs.begin()->first, "relu");
413
414     auto output_memory = outputs.at("relu").get_memory();
415     auto output_layout = output_memory.get_layout();
416     auto output_ptr = output_memory.pointer<float>();
417
418     int y_size = output_layout.size.spatial[1];
419     int x_size = output_layout.size.spatial[0];
420     int f_size = output_layout.size.feature[0];
421     int b_size = output_layout.size.batch[0];
422     EXPECT_EQ(output_layout.format, format::yxfb);
423     EXPECT_EQ(y_size, 4);
424     EXPECT_EQ(x_size, 5);
425     EXPECT_EQ(f_size, 1);
426     EXPECT_EQ(b_size, 1);
427
428     for (size_t i = 0; i < output_vec.size(); ++i) {
429         EXPECT_FLOAT_EQ(output_vec[i], output_ptr[i]);
430     }
431 }
432
433 TEST(activation_f32_fw_gpu, relu_basic_output_padding_yxfb) {
434     //  Output Padding: 3x3 (yx format)
435     //
436     //  Input:
437     //  1 -2 -3  4  5
438     //  2  2  3  4 -6
439     //  3 -3  3  5  1
440     //  1  1  1 -1  1
441     //
442     //  Slope: 0.5
443     //
444     //  Output:
445     //  0    0    0    0    0    0    0    0    0    0    0
446     //  0    0    0    0    0    0    0    0    0    0    0
447     //  0    0    0    0    0    0    0    0    0    0    0
448     //  0    0    0    1   -1   -1.5  4    5    0    0    0
449     //  0    0    0    2    2    3    4   -3    0    0    0
450     //  0    0    0    3   -1.5  3    5    1    0    0    0
451     //  0    0    0    1    1    1   -0.5  1    0    0    0
452     //  0    0    0    0    0    0    0    0    0    0    0
453     //  0    0    0    0    0    0    0    0    0    0    0
454     //  0    0    0    0    0    0    0    0    0    0    0
455
456     const auto& engine = get_test_engine();
457
458     auto input = memory::allocate(engine, { data_types::f32, format::yxfb, { 1, 1, 5, 4 } });
459     set_values(input,
460     { 1.0f, -2.0f, -3.0f, 4.0f, 5.0f,
461         2.0f, 2.0f, 3.0f, 4.0f, -6.0f,
462         3.0f, -3.0f, 3.0f, 5.0f, 1.0f,
463         1.0f, 1.0f, 1.0f, -1.0f, 1.0f });
464     VF<float> output_vec = {
465         0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
466         0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
467         0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
468         0.0f, 0.0f, 0.0f, 1.0f, -1.0f, -1.5f, 4.0f, 5.0f, 0.0f, 0.0f, 0.0f,
469         0.0f, 0.0f, 0.0f, 2.0f, 2.0f, 3.0f, 4.0f, -3.0f, 0.0f, 0.0f, 0.0f,
470         0.0f, 0.0f, 0.0f, 3.0f, -1.5f, 3.0f, 5.0f, 1.0f, 0.0f, 0.0f, 0.0f,
471         0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, -0.5f, 1.0f, 0.0f, 0.0f, 0.0f,
472         0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
473         0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
474         0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f };
475
476     topology topology(
477         input_layout("input", input.get_layout()),
478         activation("relu", "input", activation_relu_negative_slope, { 0.5f, 0.f }, { { 0, 0, 3, 3 }, 0 }));
479     network network(engine, topology);
480     network.set_input_data("input", input);
481     auto outputs = network.execute();
482     EXPECT_EQ(outputs.size(), size_t(1));
483     EXPECT_EQ(outputs.begin()->first, "relu");
484
485     auto output_memory = outputs.at("relu").get_memory();
486     auto output_layout = output_memory.get_layout();
487     auto output_size = output_layout.get_buffer_size();
488     auto output_ptr = output_memory.pointer<float>();
489
490     int y_size = output_size.spatial[1];
491     int x_size = output_size.spatial[0];
492     int f_size = output_size.feature[0];
493     int b_size = output_size.batch[0];
494     EXPECT_EQ(output_layout.format, format::yxfb);
495     EXPECT_EQ(y_size, 10);
496     EXPECT_EQ(x_size, 11);
497     EXPECT_EQ(f_size, 1);
498     EXPECT_EQ(b_size, 1);
499
500     for (size_t i = 0; i < output_vec.size(); ++i) {
501         EXPECT_FLOAT_EQ(output_vec[i], output_ptr[i]);
502     }
503 }