updated readme file due to moving CMake scripts to the root folder
[platform/upstream/dldt.git] / inference-engine / thirdparty / mkl-dnn / src / cpu / nspc_batch_normalization.hpp
1 /*******************************************************************************
2 * Copyright 2018 Intel Corporation
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *     http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *******************************************************************************/
16
17 #ifndef CPU_NSPC_BATCH_NORMALIZATION_HPP
18 #define CPU_NSPC_BATCH_NORMALIZATION_HPP
19
20 #include <assert.h>
21
22 #include "c_types_map.hpp"
23 #include "memory_tracking.hpp"
24 #include "type_helpers.hpp"
25 #include "utils.hpp"
26
27 #include "cpu_batch_normalization_pd.hpp"
28
29 namespace mkldnn {
30 namespace impl {
31 namespace cpu {
32
33 template <data_type_t data_type>
34 struct nspc_batch_normalization_fwd_t : public cpu_primitive_t {
35     struct pd_t : public cpu_batch_normalization_fwd_pd_t {
36         pd_t(engine_t *engine, const batch_normalization_desc_t *adesc,
37                 const primitive_attr_t *attr,
38                 const batch_normalization_fwd_pd_t *hint_fwd_pd)
39             : cpu_batch_normalization_fwd_pd_t(
40                       engine, adesc, attr, hint_fwd_pd) {}
41
42         DECLARE_COMMON_PD_T("nspc_bnorm:any", nspc_batch_normalization_fwd_t);
43
44         virtual status_t init() override {
45             using namespace data_type;
46             using namespace prop_kind;
47
48             assert(engine()->kind() == engine_kind::cpu);
49
50             bool ok = true
51                     /* the algorithm requires barriers while switching
52                      * between parallelization over N and C dimensions */
53                     && mkldnn_thr_syncable() && is_fwd()
54                     && !has_zero_dim_memory()
55                     && utils::one_of(desc()->prop_kind, forward_training,
56                                forward_inference)
57                     && desc()->data_desc.data_type == data_type
58                     && IMPLICATION(use_scaleshift(),
59                                desc()->data_scaleshift_desc.data_type
60                                        == f32)
61                     && utils::everyone_is(f32, desc()->mean_desc.data_type,
62                                desc()->variance_desc.data_type)
63                     // TODO: add ndhwc support?
64                     && utils::one_of(
65                                data_pd_.desc()->format, memory_format::nhwc)
66                     && IMPLICATION(data_type == bf16, mayiuse(avx512_core))
67                     && (attr()->has_default_values()
68                                || this->with_relu_post_op());
69             if (!ok)
70                 return status::unimplemented;
71
72             if (is_training() && fuse_bn_relu())
73                 bn_init_default_ws(this, this->workspace_pd_, 8);
74
75             if (stats_is_src() || is_training()) {
76                 memory_desc_t stats_d;
77                 dims_t stats_dims = { C() };
78                 mkldnn_memory_desc_init(
79                         &stats_d, 1, stats_dims, f32, memory_format::x);
80                 mean_pd_ = cpu_memory_t::pd_t(engine_, &stats_d);
81                 variance_pd_ = cpu_memory_t::pd_t(engine_, &stats_d);
82             }
83
84             init_scratchpad();
85
86             return status::success;
87         }
88
89     private:
90         void init_scratchpad() {
91             using namespace memory_tracking::names;
92             auto scratchpad = scratchpad_registry().registrar();
93             if (!stats_is_src()) {
94                 const size_t stats_buf_sz = sizeof(acc_data_t)
95                     * nstl::max(C(), 16) * mkldnn_get_max_threads();
96                 scratchpad.book(key_bnorm_reduction, stats_buf_sz);
97                 scratchpad.book(key_bnorm_tmp_mean, stats_buf_sz);
98                 scratchpad.book(key_bnorm_tmp_var, stats_buf_sz);
99             }
100             if (data_type == data_type::bf16) {
101                 const int simd_w = 16;
102                 const int nbufs = 2;
103                 const size_t bf16cvt_buf_sz = sizeof(acc_data_t) * nbufs
104                     * mkldnn_get_max_threads() * utils::rnd_up(C(), simd_w);
105                 scratchpad.book(key_bnorm_bf16cvt, bf16cvt_buf_sz);
106             }
107         }
108     };
109
110     typedef typename prec_traits<data_type>::type data_t;
111     typedef float acc_data_t;
112
113     nspc_batch_normalization_fwd_t(const pd_t *apd, const input_vector &inputs,
114             const output_vector &outputs)
115         : cpu_primitive_t(apd, inputs, outputs) {}
116
117     ~nspc_batch_normalization_fwd_t() {}
118
119     virtual void execute(event_t *e) const {
120         execute_forward();
121         e->set_state(event_t::ready);
122     }
123
124 private:
125     void execute_forward() const;
126     const pd_t *pd() const { return (const pd_t *)primitive_t::pd(); }
127 };
128
129 template <data_type_t data_type>
130 struct nspc_batch_normalization_bwd_t : public cpu_primitive_t {
131     struct pd_t : public cpu_batch_normalization_bwd_pd_t {
132         pd_t(engine_t *engine, const batch_normalization_desc_t *adesc,
133                 const primitive_attr_t *attr,
134                 const batch_normalization_fwd_pd_t *hint_fwd_pd)
135             : cpu_batch_normalization_bwd_pd_t(
136                       engine, adesc, attr, hint_fwd_pd) {}
137
138         DECLARE_COMMON_PD_T("nspc_bnorm:any", nspc_batch_normalization_bwd_t);
139
140         virtual status_t init() override {
141             using namespace data_type;
142             using namespace prop_kind;
143
144             assert(engine()->kind() == engine_kind::cpu);
145
146             bool ok = true
147                     /* the algorithm requires barriers while switching
148                      * between parallelization over N and C dimensions */
149                     && mkldnn_thr_syncable() && is_bwd()
150                     && !has_zero_dim_memory()
151                     && utils::one_of(desc()->prop_kind, backward, backward_data)
152                     && utils::everyone_is(data_type,
153                                desc()->data_desc.data_type,
154                                desc()->diff_data_desc.data_type)
155                     && utils::everyone_is(f32, desc()->mean_desc.data_type,
156                                desc()->variance_desc.data_type)
157                     && IMPLICATION(use_scaleshift(),
158                                desc()->diff_data_scaleshift_desc.data_type
159                                                == f32
160                                        && desc()->data_scaleshift_desc.data_type
161                                                == f32)
162                     && IMPLICATION(data_type == bf16, mayiuse(avx512_core))
163                     // TODO: add ndhwc support?
164                     && utils::one_of(
165                                data_pd_.desc()->format, memory_format::nhwc)
166                     && (attr()->has_default_values()
167                                || this->with_relu_post_op())
168                     && hint_fwd_pd_ != nullptr;
169             if (!ok)
170                 return status::unimplemented;
171
172             if (fuse_bn_relu()) {
173                 bn_init_default_ws(this, this->workspace_pd_, 8);
174                 const size_t this_ws_sz
175                     = memory_desc_wrapper(this->workspace_pd()).size();
176
177                 bool ws_ok = true
178                     && hint_fwd_pd_->workspace_pd()
179                     && memory_desc_wrapper(hint_fwd_pd_->workspace_pd()).size()
180                     == this_ws_sz;
181                 if (!ws_ok) return status::unimplemented;
182             }
183
184             init_scratchpad();
185
186             return status::success;
187         }
188
189     private:
190         void init_scratchpad() {
191             using namespace memory_tracking::names;
192             auto scratchpad = scratchpad_registry().registrar();
193             scratchpad.book(key_bnorm_reduction,
194                     sizeof(acc_data_t) * 2 * C() * mkldnn_get_max_threads());
195             scratchpad.book(key_bnorm_tmp_diff_ss, sizeof(acc_data_t) * 2 * C()
196                     * (mkldnn_get_max_threads() + 1));
197             if (data_type == data_type::bf16) {
198                 const int simd_w = 16;
199                 const int nbufs = 2 + !use_global_stats();
200                 const size_t bf16cvt_buf_sz = sizeof(acc_data_t) * nbufs
201                     * mkldnn_get_max_threads() * utils::rnd_up(C(), simd_w);
202                 scratchpad.book(key_bnorm_bf16cvt, bf16cvt_buf_sz);
203             }
204         }
205     };
206
207     typedef typename prec_traits<data_type>::type data_t;
208     typedef float acc_data_t;
209
210     nspc_batch_normalization_bwd_t(const pd_t *apd, const input_vector &inputs,
211             const output_vector &outputs)
212         : cpu_primitive_t(apd, inputs, outputs) {}
213
214     ~nspc_batch_normalization_bwd_t() {}
215
216     virtual void execute(event_t *e) const {
217         execute_backward();
218         e->set_state(event_t::ready);
219     }
220
221 private:
222     void execute_backward() const;
223     const pd_t *pd() const { return (const pd_t *)primitive_t::pd(); }
224 };
225
226 }
227 }
228 }
229
230 #endif
231
232 // vim: et ts=4 sw=4 cindent cino^=l0,\:0,N-s