Publishing 2019 R1 content
[platform/upstream/dldt.git] / inference-engine / thirdparty / mkl-dnn / src / cpu / jit_avx512_common_convolution_winograd.hpp
1 /*******************************************************************************
2 * Copyright 2017-2018 Intel Corporation
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *     http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *******************************************************************************/
16
17 #ifndef CPU_JIT_AVX512_COMMON_CONVOLUTION_WINOGRAD_HPP
18 #define CPU_JIT_AVX512_COMMON_CONVOLUTION_WINOGRAD_HPP
19
20 #include "c_types_map.hpp"
21 #include "memory_tracking.hpp"
22 #include "cpu_convolution_pd.hpp"
23 #include "cpu_engine.hpp"
24 #include "mkldnn_thread.hpp"
25
26 #include "jit_avx512_common_conv_winograd_kernel_f32.hpp"
27
28 namespace mkldnn {
29 namespace impl {
30 namespace cpu {
31
32 namespace winograd_avx512_common {
33 inline void init_scratchpad(memory_tracking::registrar_t &scratchpad,
34         const jit_conv_winograd_conf_t &jcp) {
35     using namespace memory_tracking::names;
36
37     size_t U_sz = (size_t)alpha * alpha * jcp.ic * jcp.oc;
38     size_t V_sz = (size_t)alpha * alpha * jcp.mb * jcp.ic
39         * (jcp.itiles * jcp.jtiles + jcp.tile_4fma_padding);
40     size_t M_sz = (size_t)alpha * alpha * jcp.mb * jcp.oc
41         * (jcp.itiles * jcp.jtiles + jcp.tile_4fma_padding);
42
43     scratchpad.book(key_wino_U, sizeof(float) * U_sz, PAGE_2M);
44     scratchpad.book(key_wino_V, sizeof(float) * V_sz, PAGE_2M);
45     scratchpad.book(key_wino_M, sizeof(float) * M_sz, PAGE_2M);
46
47     if (jcp.sched_policy == WSCHED_WEI_S_D_G_W) {
48         const int nthr = mkldnn_get_max_threads();
49
50         size_t tr_src_sz = jcp.ver != ver_4fma ? 0 : (size_t)nthr
51             * alpha * alpha * jcp.tile_4fma * jcp.ic_simd_block;
52         scratchpad.book(key_conv_tr_src, sizeof(float) * tr_src_sz, PAGE_2M);
53
54         size_t br_sz = jcp.with_bias ? nthr * jcp.oc : 0;
55         scratchpad.book(key_conv_bia_reduction, sizeof(float) * br_sz, PAGE_2M);
56
57         size_t padded_bias_sz =
58             jcp.with_bias && jcp.oc_without_padding != jcp.oc ? jcp.oc : 0;
59         scratchpad.book(key_conv_padded_bias, sizeof(float) * padded_bias_sz);
60     }
61 }
62 }
63
64 template <bool is_fwd>
65 struct _jit_avx512_common_convolution_winograd_t {
66
67     _jit_avx512_common_convolution_winograd_t(
68             const jit_conv_winograd_conf_t &jcp, const primitive_attr_t *attr)
69         : kernel_(nullptr), attr_(attr) {
70         kernel_ = new _jit_avx512_common_conv_winograd_data_kernel_f32(jcp);
71         }
72
73     ~_jit_avx512_common_convolution_winograd_t() { delete kernel_; }
74
75     protected:
76         void _execute_data_W_S_G_D(const int MB, float *inp_ptr, float *out_ptr,
77                 float *wei_ptr, float *bias_ptr,
78                 const memory_tracking::grantor_t &scratchpad) const;
79         _jit_avx512_common_conv_winograd_data_kernel_f32 *kernel_;
80         const primitive_attr_t *attr_;
81 };
82
83 struct jit_avx512_common_convolution_winograd_fwd_t
84      : _jit_avx512_common_convolution_winograd_t<true>
85      , public cpu_primitive_t
86     {
87     struct pd_t : public cpu_convolution_fwd_pd_t {
88         pd_t(engine_t *engine, const convolution_desc_t *adesc,
89                 const primitive_attr_t *attr,
90                 const typename pd_t::base_class *hint_fwd_pd)
91             : cpu_convolution_fwd_pd_t(engine, adesc, attr, hint_fwd_pd)
92             , jcp_() {}
93
94         DECLARE_COMMON_PD_T(
95                 JIT_IMPL_NAME_HELPER("jit_wino:", avx512_common, ""),
96                 jit_avx512_common_convolution_winograd_fwd_t);
97
98         virtual status_t init() override
99         {
100             using namespace prop_kind;
101             assert(this->engine()->kind() == engine_kind::cpu);
102             bool ok = true && this->set_default_params() == status::success
103                     && utils::one_of(this->desc()->prop_kind, forward_training,
104                                forward_inference)
105                     && utils::one_of(this->desc()->alg_kind,
106                                alg_kind::convolution_auto,
107                                alg_kind::convolution_winograd)
108                     && !this->has_zero_dim_memory()
109                     && utils::everyone_is(data_type::f32,
110                                this->desc()->src_desc.data_type,
111                                this->desc()->weights_desc.data_type,
112                                this->desc()->dst_desc.data_type)
113                     && IMPLICATION(this->with_bias(), data_type::f32
114                                        == this->desc()->bias_desc.data_type)
115                     && mkldnn_thr_syncable();
116
117             if (!ok)
118                 return status::unimplemented;
119
120             status_t status =
121                 jit_avx512_common_conv_winograd_fwd_kernel_f32::init_conf(
122                         jcp_, *this->desc(), *this->src_pd_.desc(),
123                         *this->weights_pd_.desc(), *this->dst_pd_.desc(),
124                         *this->attr());
125             if (status != status::success) return status;
126
127             auto scratchpad = this->scratchpad_registry().registrar();
128             winograd_avx512_common::init_scratchpad(scratchpad, jcp_);
129
130             if (status == status::success
131                     && this->desc()->alg_kind == alg_kind::convolution_auto)
132                 CHECK(this->set_alg_kind(alg_kind::convolution_winograd));
133
134             return status;
135         }
136
137         jit_conv_winograd_conf_t jcp_;
138
139     protected:
140         virtual status_t set_default_params() override
141         {
142             using namespace memory_format;
143             if (this->src_pd_.desc()->format == any)
144                 CHECK(this->src_pd_.set_format(nChw16c));
145             if (this->dst_pd_.desc()->format == any)
146                 CHECK(this->dst_pd_.set_format(nChw16c));
147             if (this->weights_pd_.desc()->format == any)
148                 CHECK(this->weights_pd_.set_format(
149                         this->with_groups() ? gOIhw16i16o : OIhw16i16o));
150             if (this->bias_pd_.desc()->format == any)
151                 CHECK(this->bias_pd_.set_format(x));
152             return status::success;
153         }
154     };
155
156     jit_avx512_common_convolution_winograd_fwd_t(const pd_t *apd,
157             const input_vector &inputs, const output_vector &outputs)
158         : _jit_avx512_common_convolution_winograd_t<true>(apd->jcp_, apd->attr())
159         , cpu_primitive_t(apd, inputs, outputs, true) {}
160
161     ~jit_avx512_common_convolution_winograd_fwd_t(){};
162
163     typedef typename prec_traits<data_type::f32>::type data_t;
164
165     virtual void execute(event_t *e) const
166     {
167         float *src = (float *)this->input_memory(0);
168         float *dst = (float *)this->memory();
169         float *weights = (float *)this->input_memory(1);
170         float *bias = (float *)this->input_memory(2);
171
172         this->_execute_data_W_S_G_D(pd()->MB(), src, dst, weights, bias,
173                 this->scratchpad());
174
175         e->set_state(event_t::ready);
176     }
177
178 private:
179     const pd_t *pd() const { return (const pd_t *)primitive_t::pd(); }
180 };
181
182 struct jit_avx512_common_convolution_winograd_bwd_data_t
183         : _jit_avx512_common_convolution_winograd_t<false>,
184         public cpu_primitive_t {
185     struct pd_t : public cpu_convolution_bwd_data_pd_t {
186         pd_t(engine_t *engine, const convolution_desc_t *adesc,
187                 const primitive_attr_t *attr,
188                 const convolution_fwd_pd_t *hint_fwd_pd)
189             : cpu_convolution_bwd_data_pd_t(engine, adesc, attr, hint_fwd_pd)
190             , jcp_() {}
191
192         DECLARE_COMMON_PD_T(
193                 JIT_IMPL_NAME_HELPER("jit_wino:", avx512_common, ""),
194                 jit_avx512_common_convolution_winograd_bwd_data_t);
195
196         virtual status_t init() override
197         {
198             using namespace prop_kind;
199             assert(this->engine()->kind() == engine_kind::cpu);
200             bool ok = true && this->set_default_params() == status::success
201                     && utils::one_of(this->desc()->prop_kind, backward_data)
202                     && utils::one_of(this->desc()->alg_kind,
203                                alg_kind::convolution_auto,
204                                alg_kind::convolution_winograd)
205                     && !this->has_zero_dim_memory()
206                     && utils::everyone_is(data_type::f32,
207                                this->desc()->diff_src_desc.data_type,
208                                this->desc()->weights_desc.data_type,
209                                this->desc()->diff_dst_desc.data_type)
210                     && mkldnn_thr_syncable();
211
212             if (!ok)
213                 return status::unimplemented;
214
215             status_t status =
216                 jit_avx512_common_conv_winograd_bwd_data_kernel_f32::init_conf(
217                         jcp_, *this->desc(), *this->diff_src_pd_.desc(),
218                         *this->weights_pd_.desc(), *this->diff_dst_pd_.desc());
219             if (status != status::success) return status;
220
221             auto scratchpad = this->scratchpad_registry().registrar();
222             winograd_avx512_common::init_scratchpad(scratchpad, jcp_);
223
224             if (status == status::success
225                     && this->desc()->alg_kind == alg_kind::convolution_auto)
226                 CHECK(this->set_alg_kind(alg_kind::convolution_winograd));
227
228             return status;
229         }
230
231         jit_conv_winograd_conf_t jcp_;
232
233     protected:
234         virtual status_t set_default_params() override
235         {
236             using namespace memory_format;
237
238             if (this->diff_src_pd_.desc()->format == any)
239                 CHECK(this->diff_src_pd_.set_format(nChw16c));
240             if (this->diff_dst_pd_.desc()->format == any)
241                 CHECK(this->diff_dst_pd_.set_format(nChw16c));
242             if (this->weights_pd_.desc()->format == any)
243                 CHECK(this->weights_pd_.set_format(
244                         this->with_groups() ? gOIhw16i16o : OIhw16i16o));
245             return status::success;
246         }
247     };
248
249     jit_avx512_common_convolution_winograd_bwd_data_t(const pd_t *apd,
250             const input_vector &inputs, const output_vector &outputs)
251         : _jit_avx512_common_convolution_winograd_t<false>(apd->jcp_, apd->attr())
252         , cpu_primitive_t(apd, inputs, outputs, true) {}
253
254     ~jit_avx512_common_convolution_winograd_bwd_data_t(){};
255
256     typedef typename prec_traits<data_type::f32>::type data_t;
257
258     virtual void execute(event_t *e) const
259     {
260         assert(pd()->desc()->prop_kind == prop_kind::backward_data
261                 && "invalid prop_kind");
262
263         float *diff_dst = (float *)this->input_memory(0);
264         float *diff_src = (float *)this->memory();
265         float *weights = (float *)this->input_memory(1);
266
267         this->_execute_data_W_S_G_D(pd()->MB(), diff_dst, diff_src, weights, nullptr,
268                 this->scratchpad());
269
270         e->set_state(event_t::ready);
271     }
272
273 private:
274     const pd_t *pd() const { return (const pd_t *)primitive_t::pd(); }
275 };
276
277 struct jit_avx512_common_convolution_winograd_bwd_weights_t
278         : public cpu_primitive_t {
279     struct pd_t : public cpu_convolution_bwd_weights_pd_t {
280         pd_t(engine_t *engine, const convolution_desc_t *adesc,
281                 const primitive_attr_t *attr,
282                 const convolution_fwd_pd_t *hint_fwd_pd)
283             : cpu_convolution_bwd_weights_pd_t(engine, adesc, attr,
284                     hint_fwd_pd)
285             , jcp_() {}
286
287         DECLARE_COMMON_PD_T(
288                 JIT_IMPL_NAME_HELPER("jit_wino:", avx512_common, ""),
289                 jit_avx512_common_convolution_winograd_bwd_weights_t);
290
291         virtual status_t init() override
292         {
293             using namespace prop_kind;
294             assert(this->engine()->kind() == engine_kind::cpu);
295             bool ok = true && this->set_default_params() == status::success
296                     && utils::one_of(this->desc()->prop_kind, backward_weights)
297                     && utils::one_of(this->desc()->alg_kind,
298                                alg_kind::convolution_auto,
299                                alg_kind::convolution_winograd)
300                     && !this->has_zero_dim_memory()
301                     && utils::everyone_is(data_type::f32,
302                                this->desc()->src_desc.data_type,
303                                this->desc()->diff_dst_desc.data_type,
304                                this->desc()->diff_weights_desc.data_type)
305                     && mkldnn_thr_syncable();
306             if (!ok)
307                 return status::unimplemented;
308
309             status_t status =
310                 jit_avx512_common_conv_winograd_bwd_weights_kernel_f32::
311                 init_conf(jcp_, *this->desc(), *this->src_pd_.desc(),
312                         *this->diff_dst_pd_.desc(),
313                         *this->diff_weights_pd_.desc());
314             if (status != status::success) return status;
315
316             auto scratchpad = this->scratchpad_registry().registrar();
317             winograd_avx512_common::init_scratchpad(scratchpad, jcp_);
318
319             if (status == status::success
320                     && this->desc()->alg_kind == alg_kind::convolution_auto)
321                 CHECK(this->set_alg_kind(alg_kind::convolution_winograd));
322
323             return status;
324         }
325
326         jit_conv_winograd_conf_t jcp_;
327
328     protected:
329         virtual status_t set_default_params() override
330         {
331             using namespace memory_format;
332
333             if (this->src_pd_.desc()->format == any)
334                 CHECK(this->src_pd_.set_format(nChw16c));
335             if (this->diff_dst_pd_.desc()->format == any)
336                 CHECK(this->diff_dst_pd_.set_format(nChw16c));
337             if (this->diff_weights_pd_.desc()->format == any)
338                 CHECK(this->diff_weights_pd_.set_format(
339                         this->with_groups() ? gOIhw16i16o : OIhw16i16o));
340             if (diff_bias_pd_.desc()->format == any)
341                 CHECK(diff_bias_pd_.set_format(x));
342             return status::success;
343         }
344     };
345
346     jit_avx512_common_convolution_winograd_bwd_weights_t(const pd_t *apd,
347             const input_vector &inputs, const output_vector &outputs)
348         : cpu_primitive_t(apd, inputs, outputs, true), kernel_(nullptr)
349     {
350         kernel_ = new jit_avx512_common_conv_winograd_bwd_weights_kernel_f32(
351                 pd()->jcp_);
352     }
353
354     ~jit_avx512_common_convolution_winograd_bwd_weights_t()
355     { delete kernel_; }
356
357     typedef typename prec_traits<data_type::f32>::type data_t;
358
359     virtual void execute(event_t *e) const
360     {
361         assert(pd()->desc()->prop_kind == prop_kind::backward_weights
362                 && "invalid prop_kind");
363         _execute_backward_weights_S_D_G_W(scratchpad());
364         e->set_state(event_t::ready);
365     }
366
367 private:
368     void _execute_backward_weights_S_D_G_W(
369             const memory_tracking::grantor_t &scratchpad) const;
370     void _maybe_execute_diff_bias_copy(
371             const memory_tracking::grantor_t &scratchpad) const;
372
373     const pd_t *pd() const { return (const pd_t *)primitive_t::pd(); }
374     jit_avx512_common_conv_winograd_bwd_weights_kernel_f32 *kernel_;
375 };
376
377 void trans_W_4x4_3x3(float Fw_[6][6][16][16], float F[3][3][16][16]);
378 void trans_O_4x4_3x3(float Mw[6][6][16], float O[4][4][16]);
379 void trans_W_3x3_4x4(float Fw[6][6][16], float F[4][6][16]);
380 void trans_O_3x3_4x4(float Mw[6][6][16][16], float M[3][3][16][16]);
381 void trans_I_4x4_3x3(float Iw[6][6][16], float I[6][6][16]);
382 void trans_W_3x3_4x4_wu(float Fw[6][6][16], float F[4][6][16]);
383 void trans_O_3x3_4x4_wu(float Mw[6][6][16][16], float M[3][3][16][16]);
384
385 }
386 }
387 }
388
389 #endif
390
391 // vim: et ts=4 sw=4 cindent cino^=l0,\:0,N-s