2 // Copyright (c) 2016 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
17 #include "fully_connected_kernel_bs_f_bsv8_af8.h"
19 namespace kernel_selector
21 ParamsKey FullyConnected_bs_f_bsv8_af8::GetSupportedKey() const
24 k.EnableInputDataType(Datatype::F16);
25 k.EnableInputDataType(Datatype::F32);
26 k.EnableOutputDataType(Datatype::F16);
27 k.EnableOutputDataType(Datatype::F32);
28 k.EnableInputWeightsType(WeightsType::F16);
29 k.EnableInputWeightsType(WeightsType::F32);
30 k.EnableAllInputLayout();
31 k.EnableOutputLayout(DataLayout::fb);
32 k.EnableOutputLayout(DataLayout::bs_f_bsv8__af8);
34 k.EnableBiasPerFeature();
35 k.EnableNonBiasTerm();
40 FullyConnected_bs_f_bsv8_af8::DispatchData FullyConnected_bs_f_bsv8_af8::SetDefault(const fully_connected_params& arg, int ) const
42 auto kd = FullyConnectedBlockKernelBase::SetDefault(arg);
44 size_t groups_per_batches = GetLocalGroupsSize(arg);
45 kd.gws0 = Align(arg.output.LogicalSize() / (GetNeuronsPerWorkItem(arg) * GetBatchesPerWorkItem(arg) * groups_per_batches), 8);
46 kd.gws1 = groups_per_batches;
53 static bool check_input_layout(const DataTensor& t)
55 bool b16_layout = false;
56 b16_layout |= t.GetLayout() == DataLayout::bs_f_bsv8__af8;
57 b16_layout |= DataTensor::Channelndex(t.GetLayout(), Tensor::DataChannelName::BATCH) == 0 && (t.Batch().v == 8);
61 static bool check_output_layout(const DataTensor& t)
63 bool b16_layout = false;
64 b16_layout |= (t.GetLayout() == DataLayout::fb) && (t.Batch().v == 8);
65 b16_layout |= (t.GetLayout() == DataLayout::bs_f_bsv8__af8);
69 bool FullyConnected_bs_f_bsv8_af8::Validate(const Params& p, const optional_params& o) const
71 if (!FullyConnectedBlockKernelBase::Validate(p, o))
76 const auto& params = static_cast<const fully_connected_params&>(p);
77 const auto& optParams = static_cast<const fully_connected_optional_params&>(o);
79 if (!params.engineInfo.bSubGroupShortSupport && params.inputs[0].GetDType() == Datatype::F16)
84 const bool bProperBatch =
85 params.inputs[0].Batch().v >= 8 &&
86 params.inputs[0].Batch().v % 8 == 0;
87 const bool bProperFeature =
88 params.inputs[0].Feature().v >= 8 &&
89 params.inputs[0].Feature().v % 8 == 0;
90 const bool bProperInput = check_input_layout(params.inputs[0]);
91 const bool bProperOutput = check_output_layout(params.output);
92 const bool bSupportedLayout = optParams.allowInputReordering || bProperInput;
94 if (!bProperBatch || !bProperFeature || !bSupportedLayout || !bProperOutput)
102 KernelsData FullyConnected_bs_f_bsv8_af8::GetKernelsData(const Params& params, const optional_params& optParams) const
104 KernelsData res = {};
105 for (size_t i = 0; i < autoTuneOptions.size(); i++)
107 KernelsData kd = GetTunedKernelsDataByIndex(params, optParams, DataLayout::bs_f_bsv8__af8, { WeightsLayout::os_i_osv8__ai8 }, FORCE_PRIORITY_4, (int)i);
110 res.emplace_back(kd[0]);