1 /*******************************************************************************
2 * Copyright 2018 Intel Corporation
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *******************************************************************************/
17 #ifndef CPU_NSPC_BATCH_NORMALIZATION_HPP
18 #define CPU_NSPC_BATCH_NORMALIZATION_HPP
22 #include "c_types_map.hpp"
23 #include "memory_tracking.hpp"
24 #include "type_helpers.hpp"
27 #include "cpu_batch_normalization_pd.hpp"
33 template <data_type_t data_type>
34 struct nspc_batch_normalization_fwd_t : public cpu_primitive_t {
35 struct pd_t : public cpu_batch_normalization_fwd_pd_t {
36 pd_t(engine_t *engine, const batch_normalization_desc_t *adesc,
37 const primitive_attr_t *attr,
38 const batch_normalization_fwd_pd_t *hint_fwd_pd)
39 : cpu_batch_normalization_fwd_pd_t(
40 engine, adesc, attr, hint_fwd_pd) {}
42 DECLARE_COMMON_PD_T("nspc_bnorm:any", nspc_batch_normalization_fwd_t);
44 virtual status_t init() override {
45 using namespace data_type;
46 using namespace prop_kind;
48 assert(engine()->kind() == engine_kind::cpu);
51 /* the algorithm requires barriers while switching
52 * between parallelization over N and C dimensions */
53 && mkldnn_thr_syncable() && is_fwd()
54 && !has_zero_dim_memory()
55 && utils::one_of(desc()->prop_kind, forward_training,
57 && desc()->data_desc.data_type == data_type
58 && IMPLICATION(use_scaleshift(),
59 desc()->data_scaleshift_desc.data_type
61 && utils::everyone_is(f32, desc()->mean_desc.data_type,
62 desc()->variance_desc.data_type)
63 // TODO: add ndhwc support?
65 data_pd_.desc()->format, memory_format::nhwc)
66 && IMPLICATION(data_type == bf16, mayiuse(avx512_core))
67 && (attr()->has_default_values()
68 || this->with_relu_post_op());
70 return status::unimplemented;
72 if (is_training() && fuse_bn_relu())
73 bn_init_default_ws(this, this->workspace_pd_, 8);
75 if (stats_is_src() || is_training()) {
76 memory_desc_t stats_d;
77 dims_t stats_dims = { C() };
78 mkldnn_memory_desc_init(
79 &stats_d, 1, stats_dims, f32, memory_format::x);
80 mean_pd_ = cpu_memory_t::pd_t(engine_, &stats_d);
81 variance_pd_ = cpu_memory_t::pd_t(engine_, &stats_d);
86 return status::success;
90 void init_scratchpad() {
91 using namespace memory_tracking::names;
92 auto scratchpad = scratchpad_registry().registrar();
93 if (!stats_is_src()) {
94 const size_t stats_buf_sz = sizeof(acc_data_t)
95 * nstl::max(C(), 16) * mkldnn_get_max_threads();
96 scratchpad.book(key_bnorm_reduction, stats_buf_sz);
97 scratchpad.book(key_bnorm_tmp_mean, stats_buf_sz);
98 scratchpad.book(key_bnorm_tmp_var, stats_buf_sz);
100 if (data_type == data_type::bf16) {
101 const int simd_w = 16;
103 const size_t bf16cvt_buf_sz = sizeof(acc_data_t) * nbufs
104 * mkldnn_get_max_threads() * utils::rnd_up(C(), simd_w);
105 scratchpad.book(key_bnorm_bf16cvt, bf16cvt_buf_sz);
110 typedef typename prec_traits<data_type>::type data_t;
111 typedef float acc_data_t;
113 nspc_batch_normalization_fwd_t(const pd_t *apd, const input_vector &inputs,
114 const output_vector &outputs)
115 : cpu_primitive_t(apd, inputs, outputs) {}
117 ~nspc_batch_normalization_fwd_t() {}
119 virtual void execute(event_t *e) const {
121 e->set_state(event_t::ready);
125 void execute_forward() const;
126 const pd_t *pd() const { return (const pd_t *)primitive_t::pd(); }
129 template <data_type_t data_type>
130 struct nspc_batch_normalization_bwd_t : public cpu_primitive_t {
131 struct pd_t : public cpu_batch_normalization_bwd_pd_t {
132 pd_t(engine_t *engine, const batch_normalization_desc_t *adesc,
133 const primitive_attr_t *attr,
134 const batch_normalization_fwd_pd_t *hint_fwd_pd)
135 : cpu_batch_normalization_bwd_pd_t(
136 engine, adesc, attr, hint_fwd_pd) {}
138 DECLARE_COMMON_PD_T("nspc_bnorm:any", nspc_batch_normalization_bwd_t);
140 virtual status_t init() override {
141 using namespace data_type;
142 using namespace prop_kind;
144 assert(engine()->kind() == engine_kind::cpu);
147 /* the algorithm requires barriers while switching
148 * between parallelization over N and C dimensions */
149 && mkldnn_thr_syncable() && is_bwd()
150 && !has_zero_dim_memory()
151 && utils::one_of(desc()->prop_kind, backward, backward_data)
152 && utils::everyone_is(data_type,
153 desc()->data_desc.data_type,
154 desc()->diff_data_desc.data_type)
155 && utils::everyone_is(f32, desc()->mean_desc.data_type,
156 desc()->variance_desc.data_type)
157 && IMPLICATION(use_scaleshift(),
158 desc()->diff_data_scaleshift_desc.data_type
160 && desc()->data_scaleshift_desc.data_type
162 && IMPLICATION(data_type == bf16, mayiuse(avx512_core))
163 // TODO: add ndhwc support?
165 data_pd_.desc()->format, memory_format::nhwc)
166 && (attr()->has_default_values()
167 || this->with_relu_post_op())
168 && hint_fwd_pd_ != nullptr;
170 return status::unimplemented;
172 if (fuse_bn_relu()) {
173 bn_init_default_ws(this, this->workspace_pd_, 8);
174 const size_t this_ws_sz
175 = memory_desc_wrapper(this->workspace_pd()).size();
178 && hint_fwd_pd_->workspace_pd()
179 && memory_desc_wrapper(hint_fwd_pd_->workspace_pd()).size()
181 if (!ws_ok) return status::unimplemented;
186 return status::success;
190 void init_scratchpad() {
191 using namespace memory_tracking::names;
192 auto scratchpad = scratchpad_registry().registrar();
193 scratchpad.book(key_bnorm_reduction,
194 sizeof(acc_data_t) * 2 * C() * mkldnn_get_max_threads());
195 scratchpad.book(key_bnorm_tmp_diff_ss, sizeof(acc_data_t) * 2 * C()
196 * (mkldnn_get_max_threads() + 1));
197 if (data_type == data_type::bf16) {
198 const int simd_w = 16;
199 const int nbufs = 2 + !use_global_stats();
200 const size_t bf16cvt_buf_sz = sizeof(acc_data_t) * nbufs
201 * mkldnn_get_max_threads() * utils::rnd_up(C(), simd_w);
202 scratchpad.book(key_bnorm_bf16cvt, bf16cvt_buf_sz);
207 typedef typename prec_traits<data_type>::type data_t;
208 typedef float acc_data_t;
210 nspc_batch_normalization_bwd_t(const pd_t *apd, const input_vector &inputs,
211 const output_vector &outputs)
212 : cpu_primitive_t(apd, inputs, outputs) {}
214 ~nspc_batch_normalization_bwd_t() {}
216 virtual void execute(event_t *e) const {
218 e->set_state(event_t::ready);
222 void execute_backward() const;
223 const pd_t *pd() const { return (const pd_t *)primitive_t::pd(); }
232 // vim: et ts=4 sw=4 cindent cino^=l0,\:0,N-s