Publishing 2019 R1 content
[platform/upstream/dldt.git] / inference-engine / thirdparty / clDNN / kernel_selector / core / actual_kernels / fully_connected / fully_connected_kernel_bs_f_bsv16_af8.cpp
1 /*
2 // Copyright (c) 2016 Intel Corporation
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //      http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 */
16
17 #include "fully_connected_kernel_bs_f_bsv16_af8.h"
18
19 namespace kernel_selector 
20 {
21     ParamsKey FullyConnected_bs_f_bsv16_af8::GetSupportedKey() const
22     {
23         ParamsKey k;
24         k.EnableInputDataType(Datatype::F16);
25         k.EnableOutputDataType(Datatype::F16);
26         k.EnableInputWeightsType(WeightsType::F16);
27         k.EnableInputWeightsType(WeightsType::F32);
28         k.EnableAllInputLayout();
29         k.EnableOutputLayout(DataLayout::fb);
30         k.EnableOutputLayout(DataLayout::bs_f_bsv16__af8);
31         k.EnableBatching();
32         k.EnableBiasPerFeature();
33         k.EnableNonBiasTerm();
34         k.EnableSubGroup();
35         return k;
36     }
37
38     FullyConnected_bs_f_bsv16_af8::DispatchData FullyConnected_bs_f_bsv16_af8::SetDefault(const fully_connected_params& arg, int ) const
39     {
40         auto kd = FullyConnectedBlockKernelBase::SetDefault(arg);
41
42         size_t groups_per_batches = GetLocalGroupsSize(arg);
43         kd.gws0 = Align(arg.output.LogicalSize() / (GetBatchesPerWorkItem(arg) * groups_per_batches), 16);
44         kd.gws1 = groups_per_batches;
45         kd.lws0 = 16;
46         kd.lws1 = 1;
47
48         return kd;
49     }
50     
51     static bool check_input_layout(const DataTensor& t)
52     {
53         bool b16_layout = false;
54         b16_layout |= t.GetLayout() == DataLayout::bs_f_bsv16__af8;
55         b16_layout |= DataTensor::Channelndex(t.GetLayout(), Tensor::DataChannelName::BATCH) == 0 && t.Batch().v == 16;
56         return b16_layout;
57     }
58
59     bool FullyConnected_bs_f_bsv16_af8::Validate(const Params& p, const optional_params& o) const
60     {
61         if (!FullyConnectedBlockKernelBase::Validate(p, o))
62         {
63             return false;
64         }
65
66         const auto& params = static_cast<const fully_connected_params&>(p);
67         const auto& optParams = static_cast<const fully_connected_optional_params&>(o);
68
69         if (!params.engineInfo.bSubGroupShortSupport && params.inputs[0].GetDType() == Datatype::F16)
70         {
71             return false;
72         }
73
74         const bool bProperBatch = params.inputs[0].Batch().v == 16;
75         const bool bProperInput = check_input_layout(params.inputs[0]);
76         const bool bSupportedLayout = optParams.allowInputReordering || bProperInput;
77
78         if (!bProperBatch || !bSupportedLayout)
79         {
80             return false;
81         }
82
83         return true;
84     }
85
86     KernelsData FullyConnected_bs_f_bsv16_af8::GetKernelsData(const Params& params, const optional_params& optParams) const
87     {   
88         KernelsData res = {};
89         for (size_t i = 0; i < autoTuneOptions.size(); i++)
90         {
91             KernelsData kd = GetTunedKernelsDataByIndex(params, optParams, DataLayout::bs_f_bsv16__af8, { WeightsLayout::os_i_osv16__ai8 }, FORCE_PRIORITY_2, (int)i);
92             if (!kd.empty())
93             {
94                 res.emplace_back(kd[0]);
95             }
96         }
97
98         return res;
99     }
100 }