2 // Copyright (c) 2016 Intel Corporation
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
8 // http://www.apache.org/licenses/LICENSE-2.0
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
17 #include "fully_connected_kernel_mmad_batched.h"
19 namespace kernel_selector
21 ParamsKey FullyConnected_mmad_batched::GetSupportedKey() const
24 k.EnableInputDataType(Datatype::INT8);
25 k.EnableOutputDataType(Datatype::INT8);
26 k.EnableInputWeightsType(WeightsType::INT8);
27 k.EnableInputLayout(DataLayout::fs_bs_yx_bsv4_fsv32);
28 k.EnableOutputLayout(DataLayout::fs_bs_yx_bsv4_fsv32);
29 k.EnableOutputLayout(DataLayout::bf);
30 k.EnableBiasPerOutput();
31 k.EnableBiasPerFeature();
32 k.EnableNonBiasTerm();
33 k.EnableTensorOffset();
34 k.EnableTensorPitches();
36 k.EnableInt8Quantization();
37 k.EnableOutputCalibration();
41 bool FullyConnected_mmad_batched::Validate(const Params& p, const optional_params& o) const
43 if (!FullyConnectedKernelBase::Validate(p, o))
48 const auto& params = static_cast<const fully_connected_params&>(p);
50 // we do not support padded input
51 if (params.inputs[0].X().pad.Total() != 0 || params.inputs[0].Y().pad.Total() != 0)
54 size_t batch = params.inputs[0].Batch().v;
55 // batch must be a multiple of 8
64 JitConstants FullyConnected_mmad_batched::GetJitConstants(const fully_connected_params& params, const DispatchData& runInfo) const
66 auto jit = Parent::GetJitConstants(params, runInfo);
68 jit.AddConstant(MakeJitConstant("SUB_GROUP_SIZE", runInfo.lws1));
70 // pitch for special block format used in this kernel
71 const size_t ifm_32_aligned = Align(params.weights.IFM().v, 32);
72 const size_t filter_ofm_block_pitch = (ifm_32_aligned / 32) * params.weights.X().v * params.weights.Y().v * 4 * 8 * 8;
73 jit.AddConstant(MakeJitConstant("FILTER_OFM_BLOCK_PITCH", filter_ofm_block_pitch));
75 const size_t in_x_pitch = 32 * 4;
76 const size_t in_y_pitch = 32 * 4 * params.inputs[0].X().LogicalDimPadded();
77 const size_t in_b_block_pitch = in_y_pitch * params.inputs[0].Y().LogicalDimPadded();
78 const size_t in_f_block_pitch = in_b_block_pitch * ((params.inputs[0].Batch().v + 3) / 4);
79 const size_t in_offset = in_x_pitch * params.inputs[0].X().pad.before + in_y_pitch * params.inputs[0].Y().pad.before;
81 jit.AddConstant(MakeJitConstant("IN_X_PITCH", in_x_pitch));
82 jit.AddConstant(MakeJitConstant("IN_Y_PITCH", in_y_pitch));
83 jit.AddConstant(MakeJitConstant("IN_B_BLOCK_PITCH", in_b_block_pitch));
84 jit.AddConstant(MakeJitConstant("IN_F_BLOCK_PITCH", in_f_block_pitch));
85 jit.AddConstant(MakeJitConstant("IN_OFFSET", in_offset));
90 FullyConnected_mmad_batched::DispatchData FullyConnected_mmad_batched::SetDefault(const fully_connected_params& params, int) const
92 auto runInfo = Parent::SetDefault(params);
94 constexpr size_t sub_group_size = 8;
96 const auto of_maps = params.output.Feature().v;
97 const size_t of_threads_per_batch = RoundUp(of_maps, sub_group_size);
99 runInfo.gws0 = params.output.Batch().v / 8; // we process 8 batches in a single WG
100 runInfo.gws1 = of_threads_per_batch;
104 runInfo.lws1 = sub_group_size;
107 runInfo.effiency = FORCE_PRIORITY_1;
111 KernelsData FullyConnected_mmad_batched::GetKernelsData(const Params& params, const optional_params& options) const
113 KernelsData res = {};
114 for (size_t i = 0; i < autoTuneOptions.size(); i++)
116 KernelsData kd = GetTunedKernelsDataByIndex(params, options, DataLayout::fs_bs_yx_bsv4_fsv32,
117 { WeightsLayout::os_is_yx_isa8_osv8_isv4 }, FORCE_PRIORITY_1, (int)i);
120 res.emplace_back(kd[0]);