Publishing 2019 R1 content
[platform/upstream/dldt.git] / inference-engine / thirdparty / clDNN / src / gpu / one_hot_gpu.cpp
1 // Copyright (c) 2019 Intel Corporation
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //      http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15
16 #include "one_hot_inst.h"
17
18 #include "primitive_gpu_base.h"
19 #include "implementation_map.h"
20 #include "kernel_selector_helper.h"
21 #include "one_hot/one_hot_kernel_selector.h"
22 #include "one_hot/one_hot_kernel_base.h"
23 #include "error_handler.h"
24
25 namespace cldnn {
26     namespace gpu {
27
28         struct one_hot_gpu : typed_primitive_gpu_impl<one_hot>
29         {
30             using parent = typed_primitive_gpu_impl<one_hot>;
31             using parent::parent;
32
33
34             static primitive_impl* create(const one_hot_node& arg)
35             {
36                 auto oh_params = get_default_params<kernel_selector::one_hot_params>(arg, 1);
37                 auto oh_optional_params = get_default_optional_params<kernel_selector::one_hot_optional_params>(arg.get_program());
38
39                 oh_params.one_hot_axis = arg.get_primitive()->one_hot_axis;
40
41                 auto output_sizes = arg.get_output_layout().size;
42                 std::vector<tensor::value_type> output_dims = { output_sizes.batch[0], output_sizes.feature[0],
43                     output_sizes.spatial[1], output_sizes.spatial[0] };
44                 oh_params.one_hot_limit = output_dims[oh_params.one_hot_axis];
45
46                 auto& kernel_selector = kernel_selector::one_hot_kernel_selector::Instance();
47                 auto best_kernels = kernel_selector.GetBestKernels(oh_params, oh_optional_params);
48
49                 CLDNN_ERROR_BOOL(arg.id(), "Best_kernel.empty()", best_kernels.empty(), "Cannot find a proper kernel with these arguments");
50
51                 return new one_hot_gpu(arg, best_kernels[0]);
52             }
53         };
54
55         namespace {
56             struct attach {
57                 attach() {
58                     auto val_fw = one_hot_gpu::create;
59
60                     implementation_map<one_hot>::add(std::make_tuple(engine_types::ocl, data_types::i8, format::bfyx), val_fw);
61                     implementation_map<one_hot>::add(std::make_tuple(engine_types::ocl, data_types::u8, format::bfyx), val_fw);
62                     implementation_map<one_hot>::add(std::make_tuple(engine_types::ocl, data_types::i32, format::bfyx), val_fw);
63                     implementation_map<one_hot>::add(std::make_tuple(engine_types::ocl, data_types::i64, format::bfyx), val_fw);
64                 }
65                 ~attach() = default;
66             };
67
68             attach attach_impl;
69
70         }
71     }
72 }