updated readme file due to moving CMake scripts to the root folder
[platform/upstream/dldt.git] / inference-engine / thirdparty / mkl-dnn / src / cpu / jit_uni_pooling.hpp
1 /*******************************************************************************
2 * Copyright 2017-2018 Intel Corporation
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *     http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *******************************************************************************/
16
17 #ifndef CPU_JIT_UNI_POOLING_HPP
18 #define CPU_JIT_UNI_POOLING_HPP
19
20 #include <assert.h>
21
22 #include "c_types_map.hpp"
23 #include "cpu_pooling_pd.hpp"
24 #include "cpu_engine.hpp"
25 #include "jit_uni_pool_kernel.hpp"
26 #include "type_helpers.hpp"
27 #include "utils.hpp"
28
29 namespace mkldnn {
30 namespace impl {
31 namespace cpu {
32
33 template <cpu_isa_t isa, impl::data_type_t d_type>
34 struct jit_uni_pooling_fwd_t: public cpu_primitive_t {
35     struct pd_t: public cpu_pooling_fwd_pd_t {
36         pd_t(engine_t *engine, const pooling_desc_t *adesc,
37                 const primitive_attr_t *attr,
38                 const pooling_fwd_pd_t *hint_fwd_pd)
39             : cpu_pooling_fwd_pd_t(engine, adesc, attr, hint_fwd_pd) {}
40
41         DECLARE_COMMON_PD_T(
42                 JIT_IMPL_NAME_HELPER("jit:", isa, ""),
43                 jit_uni_pooling_fwd_t<isa, d_type>);
44
45         virtual status_t init() override {
46             using namespace prop_kind;
47             using namespace alg_kind;
48             using namespace utils;
49             assert(engine()->kind() == engine_kind::cpu);
50             bool ok = true
51                 && mayiuse(isa)
52                 && IMPLICATION(d_type == data_type::bf16, mayiuse(avx512_core))
53                 && set_default_params() == status::success
54                 && one_of(desc()->prop_kind, forward_training,
55                         forward_inference)
56                 && one_of(desc()->alg_kind, pooling_max,
57                         pooling_avg_include_padding,
58                         pooling_avg_exclude_padding)
59                 && !has_zero_dim_memory()
60                 && src_pd()->desc()->data_type == d_type
61                 && dst_pd()->desc()->data_type == d_type
62                 && everyone_is(desired_fmt(), src_pd()->desc()->format,
63                         dst_pd()->desc()->format)
64                 && attr()->has_default_values();
65             if (!ok) return status::unimplemented;
66
67             bool is_training = desc_.prop_kind == forward_training;
68
69             if (desc()->alg_kind == pooling_max && is_training) {
70                 auto indices_desc = *dst_pd()->desc();
71                 indices_desc.data_type = pooling_index_data_type(desc());
72                 ws_pd_ = cpu_memory_t::pd_t(engine_, &indices_desc);
73             }
74
75             return jit_uni_pool_kernel<isa>::init_conf(jpp_, desc_,
76                     src_pd_.desc(), dst_pd_.desc());
77         }
78         inline memory_format_t desired_fmt()
79         {
80             using namespace memory_format;
81             return (desc()->src_desc.ndims == 4)
82                 ? isa == avx512_common ? nChw16c : nChw8c
83                 : isa == avx512_common ? nCdhw16c : nCdhw8c;
84         }
85
86         jit_pool_conf_t jpp_;
87
88     protected:
89         virtual status_t set_default_params() override {
90             if (dst_pd_.desc()->format == memory_format::any)
91                CHECK(dst_pd_.set_format(desired_fmt()));
92             return status::success;
93         }
94     };
95
96     jit_uni_pooling_fwd_t(const pd_t *apd, const input_vector &inputs,
97             const output_vector &outputs)
98         : cpu_primitive_t(apd, inputs, outputs)
99     { kernel_ = new jit_uni_pool_kernel<isa>(pd()->jpp_); }
100
101     ~jit_uni_pooling_fwd_t() { delete kernel_; }
102
103     typedef typename prec_traits<d_type>::type data_t;
104
105     virtual void execute(event_t *e) const {
106         if (pd()->jpp_.ndims == 5) execute_forward_3d();
107         else execute_forward();
108         e->set_state(event_t::ready);
109     }
110
111 private:
112     void execute_forward() const;
113     void execute_forward_3d() const;
114     const pd_t *pd() const { return (const pd_t *)primitive_t::pd(); }
115     jit_uni_pool_kernel<isa> *kernel_;
116 };
117
118 template <cpu_isa_t isa, impl::data_type_t d_type>
119 struct jit_uni_pooling_bwd_t: public cpu_primitive_t {
120     struct pd_t: public cpu_pooling_bwd_pd_t {
121         pd_t(engine_t *engine, const pooling_desc_t *adesc,
122                 const primitive_attr_t *attr,
123                 const pooling_fwd_pd_t *hint_fwd_pd)
124             : cpu_pooling_bwd_pd_t(engine, adesc, attr, hint_fwd_pd) {}
125
126         DECLARE_COMMON_PD_T(
127                 JIT_IMPL_NAME_HELPER("jit:", isa, ""),
128                 jit_uni_pooling_bwd_t<isa, d_type>);
129
130         virtual status_t init() override {
131             using namespace prop_kind;
132             using namespace alg_kind;
133             using namespace utils;
134
135             assert(engine()->kind() == engine_kind::cpu);
136             bool ok = true
137                 && mayiuse(isa)
138                 && IMPLICATION(d_type == data_type::bf16, mayiuse(avx512_core))
139                 && set_default_params() == status::success
140                 && one_of(desc()->prop_kind, backward, backward_data)
141                 && one_of(desc()->alg_kind, pooling_max,
142                         pooling_avg_include_padding,
143                         pooling_avg_exclude_padding)
144                 && !has_zero_dim_memory()
145                 && everyone_is(desired_fmt(), diff_src_pd()->desc()->format,
146                         diff_dst_pd()->desc()->format)
147                 && diff_src_pd()->desc()->data_type == d_type
148                 && diff_dst_pd()->desc()->data_type == d_type
149                 && IMPLICATION(desc()->alg_kind == pooling_max,
150                         hint_fwd_pd_ && hint_fwd_pd_->workspace_pd()
151                         && hint_fwd_pd_->workspace_pd()->desc()->format
152                                 == desired_fmt())
153                 && attr()->has_default_values();
154             if (!ok) return status::unimplemented;
155
156             if (desc()->alg_kind == pooling_max)
157                 ws_pd_ = *(cpu_memory_t::pd_t*)hint_fwd_pd_->workspace_pd();
158
159             return jit_uni_pool_kernel<isa>::init_conf(jpp_, desc_,
160                     diff_src_pd_.desc(), diff_dst_pd_.desc());
161         }
162
163         inline memory_format_t desired_fmt()
164         {
165             using namespace memory_format;
166             return (desc()->diff_src_desc.ndims == 4)
167                 ? isa == avx512_common ? nChw16c : nChw8c
168                 : isa == avx512_common ? nCdhw16c : nCdhw8c;
169         }
170
171         jit_pool_conf_t jpp_;
172
173     protected:
174         virtual status_t set_default_params() override {
175             if (diff_src_pd_.desc()->format == memory_format::any)
176                CHECK(diff_src_pd_.set_format(desired_fmt()));
177            return status::success;
178         }
179     };
180
181     jit_uni_pooling_bwd_t(const pd_t *apd, const input_vector &inputs,
182             const output_vector &outputs)
183         : cpu_primitive_t(apd, inputs, outputs)
184     { kernel_ = new jit_uni_pool_kernel<isa>(pd()->jpp_); }
185
186     ~jit_uni_pooling_bwd_t() { delete kernel_; }
187
188     typedef typename prec_traits<d_type>::type data_t;
189
190     virtual void execute(event_t *e) const {
191         if (pd()->jpp_.ndims == 5) execute_backward_3d();
192         else execute_backward();
193         e->set_state(event_t::ready);
194     }
195
196 private:
197     void execute_backward() const;
198     void execute_backward_3d() const;
199     const pd_t *pd() const { return (const pd_t *)primitive_t::pd(); }
200     jit_uni_pool_kernel<isa> *kernel_;
201 };
202
203 }
204 }
205 }
206
207 #endif
208
209 // vim: et ts=4 sw=4 cindent cino^=l0,\:0,N-s