Publishing 2019 R1 content
[platform/upstream/dldt.git] / inference-engine / thirdparty / mkl-dnn / src / cpu / jit_uni_planar_convolution.hpp
1 /*******************************************************************************
2 * Copyright 2018 Intel Corporation
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *     http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *******************************************************************************/
16
17 #ifndef CPU_JIT_UNI_PLANAR_CONVOLUTION_HPP
18 #define CPU_JIT_UNI_PLANAR_CONVOLUTION_HPP
19
20 #include "c_types_map.hpp"
21 #include "cpu_convolution_pd.hpp"
22 #include "cpu_engine.hpp"
23 #include "cpu_reducer.hpp"
24 #include "jit_primitive_conf.hpp"
25 #include "jit_uni_planar_conv_kernel_f32.hpp"
26 #include "mkldnn_thread.hpp"
27 #include "jit_uni_depthwise.hpp"
28
29 namespace mkldnn {
30 namespace impl {
31 namespace cpu {
32
33 template <cpu_isa_t isa>
34 struct _jit_uni_planar_convolution_fwd_t: public cpu_primitive_t {
35     struct pd_t: public cpu_convolution_fwd_pd_t {
36         pd_t(engine_t *engine, const convolution_desc_t *adesc,
37                 const primitive_attr_t *attr,
38                 const typename pd_t::base_class *hint_fwd_pd)
39             : cpu_convolution_fwd_pd_t(engine, adesc, attr, hint_fwd_pd)
40             , jcp_() {}
41
42         DECLARE_COMMON_PD_T(
43                 JIT_IMPL_NAME_HELPER("jit_planar:", isa, ""),
44                 _jit_uni_planar_convolution_fwd_t<isa>);
45
46         virtual status_t init() override {
47             using namespace prop_kind;
48             assert(this->engine()->kind() == engine_kind::cpu);
49             bool ok = true
50                 && this->set_default_params() == status::success
51                 && utils::one_of(this->desc()->prop_kind, forward_training,
52                         forward_inference)
53                 && this->desc()->alg_kind == alg_kind::convolution_direct
54                 && !this->has_zero_dim_memory()
55                 && utils::everyone_is(data_type::f32,
56                         this->desc()->src_desc.data_type,
57                         this->desc()->weights_desc.data_type,
58                         this->desc()->dst_desc.data_type)
59                 && IMPLICATION(this->with_bias(),
60                         data_type::f32 == this->desc()->bias_desc.data_type);
61             if (!ok) return status::unimplemented;
62
63             status_t sts = jit_uni_planar_conv_fwd_kernel_f32<isa>::init_conf(jcp_, *this->desc(),
64                     *this->src_pd_.desc(), *this->weights_pd_.desc(),
65                     *this->dst_pd_.desc(), *this->attr());
66
67             return sts;
68         }
69
70         jit_conv_conf_t jcp_;
71
72     protected:
73         virtual status_t set_default_params() override {
74             using namespace memory_format;
75
76             if (this->src_pd_.desc()->format == any)
77                 CHECK(this->src_pd_.set_format(this->ndims() == 4 ? nchw : ncdhw));
78             if (this->dst_pd_.desc()->format == any)
79                 CHECK(this->dst_pd_.set_format(this->ndims() == 4 ? nchw : ncdhw));
80             if (this->weights_pd_.desc()->format == any)
81                 CHECK(this->weights_pd_.set_format(this->ndims() == 4 ? oihw : oidhw));
82             if (this->bias_pd_.desc()->format == any)
83                 CHECK(this->bias_pd_.set_format(x));
84             return status::success;
85         }
86     };
87
88     _jit_uni_planar_convolution_fwd_t(const pd_t *apd,
89                                       const input_vector &inputs, const output_vector &outputs)
90         : cpu_primitive_t(apd, inputs, outputs) {
91         kernel_ = new jit_uni_planar_conv_fwd_kernel_f32<isa>(pd()->jcp_, *pd()->attr());
92     }
93
94     ~_jit_uni_planar_convolution_fwd_t() {
95         delete kernel_;
96     };
97
98     typedef typename prec_traits<data_type::f32>::type data_t;
99
100     virtual void execute(event_t *e) const {
101         execute_forward();
102         e->set_state(event_t::ready);
103     }
104
105 private:
106     void execute_forward() const;
107
108     const pd_t *pd() const { return (const pd_t *)primitive_t::pd(); }
109     jit_uni_planar_conv_fwd_kernel_f32<isa> *kernel_;
110 };
111
112 using jit_avx512_common_planar_convolution_fwd_t = _jit_uni_planar_convolution_fwd_t<avx512_common>;
113 using jit_avx2_planar_convolution_fwd_t = _jit_uni_planar_convolution_fwd_t<avx2>;
114
115 }
116 }
117 }
118
119 #endif