2 // Copyright (c) 2018 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
17 #include "fused_conv_eltwise_kernel_bfyx_1x1_opt.h"
18 #include "kernel_selector_utils.h"
20 namespace kernel_selector {
22 ParamsKey fused_conv_eltwise_kernel_bfyx_1x1_opt::GetSupportedKey() const
25 k.EnableInputDataType(Datatype::F32);
26 k.EnableInputWeightsType(WeightsType::F32);
27 k.EnableOutputDataType(Datatype::F32);
28 k.EnableInputLayout(DataLayout::bfyx);
29 k.EnableOutputLayout(DataLayout::bfyx);
30 k.EnableTensorOffset();
31 k.EnableTensorPitches();
33 //k.EnableSubGroupShort(); // we need it for FP16 only. we check it on the Validate phase
34 k.EnableBiasPerFeature();
35 k.EnableNonBiasTerm();
37 k.EnableFusedConvEltwSplitSupport();
38 k.EnableFusedConvEltwiseRWOutOpt(); // data for second input are already in output
49 static block_params get_out_block_size(const fused_conv_eltwise_params& p)
53 if (p.output.X().v == 7)
55 auto gws0 = p.output.X().v / 7;
56 auto gws1 = p.output.Y().v / 1;
57 auto gws2 = 2 * (p.output.Feature().v * p.output.Batch().v) / 8; // process 8 output channels per Workitem
59 auto compute_units = p.engineInfo.computeUnitsCount;
60 auto total_threads = (gws0 * gws1 * gws2) / 64;
61 if (total_threads < compute_units)
66 if (total_threads < compute_units)
71 return { 7,1,out_depth };
73 else if (p.output.X().v == 14)
75 else if (p.output.X().v == 28)
77 else if (p.output.X().v == 56)
83 std::string fused_conv_eltwise_kernel_bfyx_1x1_opt::GetKernelName(const fused_conv_eltwise_params& params) const
85 if (params.inputs[0].GetDType() == Datatype::F32)
87 return kernelName + "_fp32";
91 return kernelName + "_fp16";
95 bool fused_conv_eltwise_kernel_bfyx_1x1_opt::Validate(const Params& p, const optional_params& o) const
97 if (!fused_conv_eltwise_kernel_base::Validate(p, o) ||
98 !FusedConvolutionEltwiseCheckInput(p, o))
103 const fused_conv_eltwise_params& cp = static_cast<const fused_conv_eltwise_params&>(p);
105 if (cp.conv.stride.x != 1 || cp.conv.stride.y != 1)
108 if (cp.conv.filterSize.x != 1 || cp.conv.filterSize.y != 1)
111 if (cp.output.Feature().v % 64 != 0)
114 if (cp.conv.padding.x != 0 || cp.conv.padding.y != 0)
117 // if block sizes are 1x1, then this algorithm is probably not the best
118 auto block = get_out_block_size(cp);
119 if (block.out_width == 1 && block.out_height == 1)
122 if (cp.output.X().v % block.out_width != 0)
124 if (cp.output.Y().v % block.out_height != 0)
130 std::vector<WeightsLayout> fused_conv_eltwise_kernel_bfyx_1x1_opt::GetSupportedWeightLayouts(const fused_conv_eltwise_params& p) const
132 auto block = get_out_block_size(p);
133 if (block.out_depth == 8)
134 return { WeightsLayout::os_iyx_osv64 };
135 if (block.out_depth == 4)
136 return { WeightsLayout::os_iyx_osv32 };
137 if (block.out_depth == 2)
138 return { WeightsLayout::os_iyx_osv16 };
140 return{ WeightsLayout::yxio };
143 fused_conv_eltwise_kernel_base::DispatchData fused_conv_eltwise_kernel_bfyx_1x1_opt::SetDefault(const fused_conv_eltwise_params& arg, int) const
145 DispatchData runInfo = Parent::SetDefault(arg);
147 constexpr size_t sub_group_size = 8;
149 runInfo.effiency = FORCE_PRIORITY_3;
151 auto block = get_out_block_size(arg);
153 runInfo.gws0 = arg.output.X().v / block.out_width;
154 runInfo.gws1 = arg.output.Y().v / block.out_height;
155 runInfo.gws2 = 2 * (arg.output.Feature().v * arg.output.Batch().v) / block.out_depth; // process 8 output channels per Workitem
159 runInfo.lws2 = 2 * sub_group_size;
164 JitConstants fused_conv_eltwise_kernel_bfyx_1x1_opt::GetJitConstants(const fused_conv_eltwise_params& params, const DispatchData& runInfo) const
166 auto jit = Parent::GetJitConstants(params, runInfo);
168 auto block = get_out_block_size(params);
169 jit.AddConstant(MakeJitConstant("OUT_BLOCK_WIDTH", block.out_width));
170 jit.AddConstant(MakeJitConstant("OUT_BLOCK_HEIGHT", block.out_height));
171 jit.AddConstant(MakeJitConstant("OUT_BLOCK_DEPTH", block.out_depth));
173 if (!params.eltw.stride.empty())
175 jit.AddConstant(MakeJitConstant("ELTW_STRIDE_X", params.eltw.stride[0].x));
176 jit.AddConstant(MakeJitConstant("ELTW_STRIDE_Y", params.eltw.stride[0].y));
180 jit.AddConstant(MakeJitConstant("ELTW_STRIDE_X", 1));
181 jit.AddConstant(MakeJitConstant("ELTW_STRIDE_Y", 1));
187 KernelsData fused_conv_eltwise_kernel_bfyx_1x1_opt::GetKernelsData(const Params& params, const optional_params& options) const
189 KernelsData kd = GetCommonKernelsData(params, options);
191 kd[0].estimatedTime = FORCE_PRIORITY_1;