Publishing 2019 R1 content
[platform/upstream/dldt.git] / inference-engine / thirdparty / clDNN / kernel_selector / core / actual_kernels / convolution / convolution_kernel_MMAD.cpp
1 /*
2 // Copyright (c) 2016 Intel Corporation
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //      http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 */
16
17 #include "convolution_kernel_MMAD.h"
18
19 namespace kernel_selector {
20     
21     ParamsKey ConvolutionKernel_MMAD::GetSupportedKey() const
22     {
23         ParamsKey k;
24         k.EnableInputDataType(Datatype::INT8);
25         k.EnableOutputDataType(Datatype::INT8);
26         k.EnableInputWeightsType(WeightsType::INT8);
27         k.EnableInputLayout(DataLayout::byxf_af32);
28         k.EnableOutputLayout(DataLayout::byxf_af32);
29         k.EnableTensorOffset();
30         k.EnableTensorPitches();
31         k.EnableDilation();
32         k.EnableBiasPerFeature();
33         k.EnableBiasPerOutput();
34         k.EnableNonBiasTerm();
35         k.EnableBatching();
36         k.EnableSplitSupport();
37         k.EnableInt8Quantization();
38         k.EnableOutputCalibration();
39         k.DisableTuning();
40         return k;
41     }
42
43     ConvolutionKernelBase::DispatchData ConvolutionKernel_MMAD::SetDefault(const convolution_params& arg, int) const
44     {
45         DispatchData runInfo = ConvolutionKernelBase::SetDefault(arg);
46
47         constexpr size_t sub_group_size = 8;
48
49         const auto of_maps = arg.output.Feature().v;
50         const size_t of_threads_per_batch = RoundUp(of_maps, sub_group_size);
51
52         runInfo.effiency = FORCE_PRIORITY_4;
53
54         runInfo.gws0 = arg.output.X().v;
55         runInfo.gws1 = arg.output.Y().v;
56         runInfo.gws2 = of_threads_per_batch * arg.output.Batch().v;
57
58         runInfo.lws0 = 1;
59         runInfo.lws1 = 1;
60         runInfo.lws2 = sub_group_size;
61
62         return runInfo;
63     }
64
65     JitConstants ConvolutionKernel_MMAD::GetJitConstants(const convolution_params& params, const DispatchData& runInfo) const
66     {
67         auto jit = Parent::GetJitConstants(params, runInfo);
68
69         jit.AddConstant(MakeJitConstant("SUB_GROUP_SIZE", runInfo.lws2));
70
71         // pitch for special block format used in this kernel
72         const size_t ifm_32_aligned = Align(params.weights.IFM().v, 32);
73         const size_t filter_ofm_block_pitch = (ifm_32_aligned / 32) * params.weights.X().v * params.weights.Y().v * 4 * 8 * 8;
74         jit.AddConstant(MakeJitConstant("FILTER_OFM_BLOCK_PITCH", filter_ofm_block_pitch));
75
76         return jit;
77     }
78
79     KernelsData ConvolutionKernel_MMAD::GetKernelsData(const Params& params, const optional_params& options) const
80     {
81         KernelsData kd = GetTunedKernelsDataByIndex(params, options);
82         if(!kd.empty())
83             kd[0].estimatedTime = FORCE_PRIORITY_4;
84         return kd;
85     }
86 }