2 // Copyright (c) 2016 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
17 #include "fused_conv_eltwise_kernel_yxfb_yxio_b16.h"
19 namespace kernel_selector
22 ParamsKey fused_conv_eltwise_kernel_yxfb_yxio_b16::GetSupportedKey() const
25 k.EnableInputDataType(Datatype::F16);
26 k.EnableInputWeightsType(WeightsType::F16);
27 k.EnableInputWeightsType(WeightsType::F32);
28 k.EnableOutputDataType(Datatype::F16);
29 k.EnableInputLayout(DataLayout::yxfb);
30 k.EnableOutputLayout(DataLayout::yxfb);
31 k.EnableTensorOffset();
32 k.EnableTensorPitches();
33 k.EnableBiasPerFeature();
34 k.EnableNonBiasTerm();
36 k.EnableSplitSupport();
39 k.EnableFusedConvEltwiseRWOutOpt();
43 std::string fused_conv_eltwise_kernel_yxfb_yxio_b16::GetKernelName(const fused_conv_eltwise_params& params) const
45 if (params.inputs[0].GetDType() == Datatype::F32)
47 return kernelName + "_fp32";
51 return kernelName + "_fp16";
56 // how many batches will a single work item compute
57 size_t GetBatchesPerWorkItem(size_t batch_size, Datatype dataType)
59 if (dataType == Datatype::F16)
61 const uint32_t min_batches_per_wi = 1;
62 const uint32_t min_lws = 16;
64 if (batch_size % (4 * min_batches_per_wi * min_lws) == 0)
66 return 4 * min_batches_per_wi; // USE_BLOCK_READ_2 + as_half4
68 else if (batch_size % (2 * min_batches_per_wi * min_lws) == 0)
70 return 2 * min_batches_per_wi; // USE_BLOCK_READ_1 + as_half2
74 return min_batches_per_wi;
83 size_t GetOfmPerWorkitem(Datatype dataType)
85 if (dataType == Datatype::F16)
91 fused_conv_eltwise_kernel_base::DispatchData fused_conv_eltwise_kernel_yxfb_yxio_b16::SetDefault(const fused_conv_eltwise_params& arg, int) const
93 DispatchData runInfo = fused_conv_eltwise_kernel_base::SetDefault(arg);
95 const auto filter_ofm_num = arg.weights.OFM().v;
96 const auto batch_size = arg.output.Batch().v;
97 const uint32_t min_lws = 16;
99 const size_t batchesPerWorkItem = GetBatchesPerWorkItem(batch_size, arg.inputs[0].GetDType());
100 const size_t ofmPerWorkItem = GetOfmPerWorkitem(arg.inputs[0].GetDType());
102 if (arg.inputs[0].GetDType() == Datatype::F16)
104 runInfo.effiency = FORCE_PRIORITY_7;
108 runInfo.effiency = FORCE_PRIORITY_9;
111 runInfo.lws0 = min_lws;
112 runInfo.gws0 = filter_ofm_num * batch_size / (ofmPerWorkItem * batchesPerWorkItem);
117 bool fused_conv_eltwise_kernel_yxfb_yxio_b16::Validate(const Params& p, const optional_params& o) const
119 if (!fused_conv_eltwise_kernel_base::Validate(p, o))
123 const convolution_params& params = static_cast<const convolution_params&>(p);
125 const auto filter_ofm_num = params.weights.OFM().v;
126 const auto batch_size = params.output.Batch().v;
127 const uint32_t min_lws = 16;
129 const bool bInputValidated =
130 (filter_ofm_num > 0) &&
132 (params.output.Feature().v == filter_ofm_num);
134 if (!bInputValidated)
139 if (params.inputs[0].GetDType() == Datatype::F16)
141 const uint32_t min_ofm_per_wi = 16;
142 const uint32_t min_batches_per_wi = 1;
144 const bool bFilterOK = filter_ofm_num % min_ofm_per_wi == 0; // Number of output features dividable by minimum number of output features processed inside work item.
145 const bool bBatchOK = batch_size % (min_batches_per_wi * min_lws) == 0; // Batch size dividable by minimum number of batches processed when smallest local work size is used.
147 if (!bFilterOK || !bBatchOK)
154 if ((filter_ofm_num * batch_size) % min_lws != 0 ||
155 batch_size < 32) // TODO: check why it's not supported
164 JitConstants fused_conv_eltwise_kernel_yxfb_yxio_b16::GetJitConstants(const fused_conv_eltwise_params& params, const DispatchData& kd) const
166 auto jit = Parent::GetJitConstants(params, kd);
168 const auto local_work_group_size = kd.lws0;
169 const auto batch_size = params.output.Batch().v;
171 if (params.inputs[0].GetDType() == Datatype::F32)
173 // A LITTLE HACK, for convolutions with low number of input features don't use block reads, and it will speed up by 25%
174 // TODO - investigate why is this happening
175 if (params.inputs[0].Feature().v > 4)
177 jit.AddConstant(MakeJitConstant("USE_BLOCK_READ_2", ""));
182 const auto batch_pad_before = params.output.Batch().pad.before;
183 const auto feature_pitch = params.output.Feature().pitch;
185 if (batch_size >= 64 && (feature_pitch % 2 == 0) && (batch_pad_before % 2 == 0))
187 jit.AddConstant(MakeJitConstant("USE_BLOCK_READ_2", ""));
189 else if (batch_size >= 32 && (feature_pitch % 2 == 0) && (batch_pad_before % 2 == 0))
191 jit.AddConstant(MakeJitConstant("USE_BLOCK_READ_1", ""));
195 const size_t batchesPerWorkItem = GetBatchesPerWorkItem(batch_size, params.inputs[0].GetDType());
196 const size_t ofmPerWorkItem = GetOfmPerWorkitem(params.inputs[0].GetDType());
199 MakeJitConstant("LOCAL_WORK_GROUP_SIZE", kd.lws0),
200 MakeJitConstant("OFM_PER_WORK_ITEM", ofmPerWorkItem),
201 MakeJitConstant("BATCHES_PER_WORK_ITEM", batchesPerWorkItem), // how many batches will a single work item compute
202 MakeJitConstant("LOCAL_WORK_GROUPS_PER_SINGLE_BATCHES_ELEMENTS", std::max(batch_size / batchesPerWorkItem / local_work_group_size, static_cast<size_t>(1))), // how many local work groups we need to compute single element for each batch
203 MakeJitConstant("WORK_ITEMS_PER_SINGLE_BATCHES_ELEMENTS", batch_size / batchesPerWorkItem), // how many work items we need to compute single element for each batch
206 if (!params.eltw.stride.empty())
208 jit.AddConstant(MakeJitConstant("ELTW_STRIDE_X", params.eltw.stride[0].x));
209 jit.AddConstant(MakeJitConstant("ELTW_STRIDE_Y", params.eltw.stride[0].y));
213 jit.AddConstant(MakeJitConstant("ELTW_STRIDE_X", 1));
214 jit.AddConstant(MakeJitConstant("ELTW_STRIDE_Y", 1));
220 KernelsData fused_conv_eltwise_kernel_yxfb_yxio_b16::GetKernelsData(const Params& params, const optional_params& options) const
222 return GetTunedKernelsDataByIndex(params, options);