2 // Copyright (c) 2018 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
17 ///////////////////////////////////////////////////////////////////////////////////////////////////
18 #include <gtest/gtest.h>
19 #include "api/CPP/memory.hpp"
20 #include <api/CPP/input_layout.hpp>
21 #include "api/CPP/average_unpooling.hpp"
22 #include <api/CPP/topology.hpp>
23 #include <api/CPP/network.hpp>
24 #include <api/CPP/engine.hpp>
25 #include "test_utils/test_utils.h"
26 #include <api/CPP/reorder.hpp>
27 #include <api/CPP/data.hpp>
28 #include <api/CPP/mutable_data.hpp>
29 #include <api/CPP/pooling.hpp>
30 #include "test_utils/float16.h"
32 using namespace cldnn;
33 using namespace tests;
35 TEST(average_unpooling_gpu, basic_in2x2x2x1) {
39 // Forward Average Pooling Input:
40 // f0: b0: 1 2 -10 b1: 0 0 -11
41 // f0: b0: 3 4 -14 b1: 1 -1 -15
42 // f1: b0: 5 6 -12 b1: 1.5 5.5 -13
43 // f1: b0: 7 8 14 b1: 12 9 17.5
46 // f0: b0: 2.5 -4.5 b1: 0 -6.75
47 // f1: b0: 6 4 b1: 7 4.75
50 // f0: b0: 0.625 -0.5 -1.125 b1: 0 -1.6875 -1.6875
51 // f0: b0: 0.625 -0.5 -1.125 b1: 0 -1.6875 -1.6875
52 // f1: b0: 1.5 2.5 1 b1: 1.75 2.9375 1.1875
53 // f1: b0: 1.5 2.5 1 b1: 1.75 2.9375 1.1875
57 auto input = memory::allocate(engine, { data_types::f32, format::bfyx, { 2, 2, 2, 1 } });
67 topology.add(input_layout("input", input.get_layout()));
68 topology.add(average_unpooling("average_unpooling", "input", { 2, 2, 3, 2 }, { 1, 1, 2, 2 }, { 1, 1, 1, 1 }));
70 network network(engine, topology);
72 network.set_input_data("input", input);
74 auto outputs = network.execute();
76 auto output = outputs.at("average_unpooling").get_memory();
77 auto output_ptr = output.pointer<float>();
78 auto output_layout = output.get_layout();
80 EXPECT_EQ(output_layout.format, format::bfyx);
81 EXPECT_EQ(output_layout.size.spatial[1], 2);
82 EXPECT_EQ(output_layout.size.spatial[0], 3);
83 EXPECT_EQ(output_layout.size.feature[0], 2);
84 EXPECT_EQ(output_layout.size.batch[0], 2);
86 std::vector<float> expected_output_vec = {
87 0.625f, -0.5f, -1.125,
88 0.625f, -0.5f, -1.125,
91 0.f, -1.6875f, -1.6875f,
92 0.f, -1.6875f, -1.6875f,
93 1.75f, 2.9375f, 1.1875f,
94 1.75f, 2.9375f, 1.1875f
97 for (size_t i = 0; i < expected_output_vec.size(); ++i) {
98 EXPECT_EQ(expected_output_vec[i], output_ptr[i]);
102 TEST(average_unpooling_gpu, basic_in2x2x3x2_with_average_pooling_unpooling) {
106 // Forward Average Pooling Input:
107 // f0: b0: 1 2 -10 b1: 0 0 -11
108 // f0: b0: 3 4 -14 b1: 1 -1 -15
109 // f1: b0: 3 6 -12 b1: 1.5 5.5 -13
110 // f1: b0: 7 8 14 b1: 12 9 17
113 // f0: b0: 2.5 -12 b1: 0 -13
114 // f1: b0: 6 1 b1: 7 2
117 // f0: b0: 0.625 0.625 -6 b1: 0 0 -6.5
118 // f0: b0: 0.625 0.625 -6 b1: 0 0 -6.5
119 // f1: b0: 1.5 1.5 0.5 b1: 1.75 1.75 1
120 // f1: b0: 1.5 1.5 0.5 b1: 1.75 1.75 1
124 auto input = memory::allocate(engine, { data_types::f32, format::bfyx,{ 2, 2, 3, 2 } });
138 topology.add(input_layout("input", input.get_layout()));
139 topology.add(pooling("pooling", "input", pooling_mode::average_no_padding, { 1, 1, 2, 2 }, { 1, 1, 2, 2 }));
140 topology.add(average_unpooling("average_unpooling", "pooling", input.get_layout().size, { 1, 1, 2, 2 }, { 1, 1, 2, 2 }));
142 network network(engine, topology);
144 network.set_input_data("input", input);
146 auto outputs = network.execute();
148 auto output = outputs.at("average_unpooling").get_memory();
149 auto output_ptr = output.pointer<float>();
150 auto output_layout = output.get_layout();
152 EXPECT_EQ(output_layout.format, format::bfyx);
153 EXPECT_EQ(output_layout.size.spatial[1], 2);
154 EXPECT_EQ(output_layout.size.spatial[0], 3);
155 EXPECT_EQ(output_layout.size.feature[0], 2);
156 EXPECT_EQ(output_layout.size.batch[0], 2);
158 std::vector<float> expected_output_vec = {
169 for (size_t i = 0; i < expected_output_vec.size(); ++i) {
170 EXPECT_EQ(expected_output_vec[i], output_ptr[i]);
174 TEST(average_unpooling_gpu, basic_in2x2x2x1_output_padding) {
176 // Output Padding : 0x0x1x1
179 // Forward Average Pooling Input:
180 // f0: b0: 1 2 -10 b1: 0 0 -11
181 // f0: b0: 3 4 -14 b1: 1 -1 -15
182 // f1: b0: 5 6 -12 b1: 1.5 5.5 -13
183 // f1: b0: 7 8 14 b1: 12 9 17.5
186 // f0: b0: 2.5 -4.5 b1: 0 -6.75
187 // f1: b0: 6 4 b1: 7 4.75
190 // f0: b0: 0.625 -0.5 -1.125 b1: 0 -1.6875 -1.6875
191 // f0: b0: 0.625 -0.5 -1.125 b1: 0 -1.6875 -1.6875
192 // f1: b0: 1.5 2.5 1 b1: 1.75 2.9375 1.1875
193 // f1: b0: 1.5 2.5 1 b1: 1.75 2.9375 1.1875
196 auto input = memory::allocate(engine, { data_types::f32, format::bfyx,{ 2, 2, 2, 1 } });
206 topology.add(input_layout("input", input.get_layout()));
207 topology.add(average_unpooling("average_unpooling", "input", { 2, 2, 3, 2 }, { 1, 1, 2, 2 }, { 1, 1, 1, 1 }, padding({ 0, 0, 1, 1 }, 0)));
209 network network(engine, topology);
211 network.set_input_data("input", input);
213 auto outputs = network.execute();
215 auto output = outputs.at("average_unpooling").get_memory();
216 auto output_ptr = output.pointer<float>();
217 auto output_layout = output.get_layout();
219 EXPECT_EQ(output_layout.format, format::bfyx);
220 EXPECT_EQ(output_layout.size.spatial[1], 2);
221 EXPECT_EQ(output_layout.size.spatial[0], 3);
222 EXPECT_EQ(output_layout.size.feature[0], 2);
223 EXPECT_EQ(output_layout.size.batch[0], 2);
225 std::vector<float> expected_output_vec = {
226 0.f, 0.f, 0.f, 0.f, 0.f,
227 0.f, 0.625f, -0.5f, -1.125, 0.f,
228 0.f, 0.625f, -0.5f, -1.125, 0.f,
229 0.f, 0.f, 0.f, 0.f, 0.f,
231 0.f, 0.f, 0.f, 0.f, 0.f,
232 0.f, 1.5f, 2.5f, 1.f, 0.f,
233 0.f, 1.5f, 2.5f, 1.f, 0.f,
234 0.f, 0.f, 0.f, 0.f, 0.f,
236 0.f, 0.f, 0.f, 0.f, 0.f,
237 0.f, 0.f, -1.6875f, -1.6875f, 0.f,
238 0.f, 0.f, -1.6875f, -1.6875f, 0.f,
239 0.f, 0.f, 0.f, 0.f, 0.f,
241 0.f, 0.f, 0.f, 0.f, 0.f,
242 0.f, 1.75f, 2.9375f, 1.1875f, 0.f,
243 0.f, 1.75f, 2.9375f, 1.1875f, 0.f,
244 0.f, 0.f, 0.f, 0.f, 0.f
246 std::vector<float> out;
247 for (size_t i = 0; i < expected_output_vec.size(); ++i) {
248 out.push_back(output_ptr[i]);
249 EXPECT_EQ(expected_output_vec[i], output_ptr[i]);
253 TEST(average_unpooling_gpu, basic_in2x2x2x1_fp16) {
255 // Output Padding : 0x0x1x1
257 // Input values in fp16
259 // Forward Average Pooling Input:
260 // f0: b0: 1 2 -10 b1: 0 0 -11
261 // f0: b0: 3 4 -14 b1: 1 -1 -15
262 // f1: b0: 5 6 -12 b1: 1.5 5.5 -13
263 // f1: b0: 7 8 14 b1: 12 9 17.5
266 // f0: b0: 2.5 -4.5 b1: 0 -6.75
267 // f1: b0: 6 4 b1: 7 4.75
270 // f0: b0: 0.625 -0.5 -1.125 b1: 0 -1.6875 -1.6875
271 // f0: b0: 0.625 -0.5 -1.125 b1: 0 -1.6875 -1.6875
272 // f1: b0: 1.5 2.5 1 b1: 1.75 2.9375 1.1875
273 // f1: b0: 1.5 2.5 1 b1: 1.75 2.9375 1.1875
277 auto input = memory::allocate(engine, { data_types::f16, format::bfyx,{ 2, 2, 2, 1 } });
280 FLOAT16(2.5f), FLOAT16(-4.5f),
281 FLOAT16(6.f), FLOAT16(4.f),
282 FLOAT16(0.f), FLOAT16(-6.75f),
283 FLOAT16(7.0f), FLOAT16(4.75f)
287 topology.add(input_layout("input", input.get_layout()));
288 topology.add(average_unpooling("average_unpooling", "input", { 2, 2, 3, 2 }, { 1, 1, 2, 2 }, { 1, 1, 1, 1 }));
290 network network(engine, topology);
292 network.set_input_data("input", input);
294 auto outputs = network.execute();
296 auto output = outputs.at("average_unpooling").get_memory();
297 auto output_ptr = output.pointer<uint16_t>();
298 auto output_layout = output.get_layout();
300 EXPECT_EQ(output_layout.format, format::bfyx);
301 EXPECT_EQ(output_layout.size.spatial[1], 2);
302 EXPECT_EQ(output_layout.size.spatial[0], 3);
303 EXPECT_EQ(output_layout.size.feature[0], 2);
304 EXPECT_EQ(output_layout.size.batch[0], 2);
306 std::vector<float> expected_output_vec = {
307 0.625f, -0.5f, -1.125,
308 0.625f, -0.5f, -1.125,
311 0.f, -1.6875f, -1.6875f,
312 0.f, -1.6875f, -1.6875f,
313 1.75f, 2.9375f, 1.1875f,
314 1.75f, 2.9375f, 1.1875f
316 for (size_t i = 0; i < expected_output_vec.size(); ++i) {
317 EXPECT_EQ(expected_output_vec[i], float16_to_float32(output_ptr[i]));