Publishing 2019 R1 content
[platform/upstream/dldt.git] / inference-engine / thirdparty / mkl-dnn / src / cpu / jit_avx512_common_convolution.hpp
index 42080cc..e500218 100644 (file)
 #define CPU_JIT_AVX512_COMMON_CONVOLUTION_HPP
 
 #include "c_types_map.hpp"
+#include "memory_tracking.hpp"
+#include "mkldnn_thread.hpp"
+#include "utils.hpp"
+
+#include "cpu_barrier.hpp"
 #include "cpu_convolution_pd.hpp"
-#include "cpu_engine.hpp"
-#include "jit_avx512_common_conv_kernel.hpp"
-#include "jit_transpose_src_utils.hpp"
 #include "cpu_reducer.hpp"
-#include "cpu_barrier.hpp"
+
+#include "jit_transpose_src_utils.hpp"
+#include "jit_avx512_common_conv_kernel.hpp"
 
 namespace mkldnn {
 namespace impl {
 namespace cpu {
 
-template <bool with_relu, impl::data_type_t src_type,
+template <impl::data_type_t src_type,
          impl::data_type_t wei_type = src_type,
          impl::data_type_t dst_type = src_type>
-struct _jit_avx512_common_convolution_fwd_t : public cpu_primitive_t {
-    struct pd_t : public _cpu_convolution_fwd_pd_t<with_relu> {
-        pd_t(engine_t *engine, const typename pd_t::base_desc_t *adesc,
+struct jit_avx512_common_convolution_fwd_t : public cpu_primitive_t {
+    struct pd_t : public cpu_convolution_fwd_pd_t {
+        pd_t(engine_t *engine, const convolution_desc_t *adesc,
                 const primitive_attr_t *attr,
                 const typename pd_t::base_class *hint_fwd_pd)
-            : _cpu_convolution_fwd_pd_t<with_relu>(engine, adesc, attr,
-                    hint_fwd_pd)
+            : cpu_convolution_fwd_pd_t(engine, adesc, attr, hint_fwd_pd)
             , jcp_()
         {
         }
 
         DECLARE_COMMON_PD_T(
                 JIT_IMPL_NAME_HELPER("jit:", avx512_common, ""),
-                _jit_avx512_common_convolution_fwd_t);
+                jit_avx512_common_convolution_fwd_t);
 
         virtual status_t init() override
         {
             using namespace prop_kind;
             assert(this->engine()->kind() == engine_kind::cpu);
             bool ok = true
-                    && utils::one_of(this->cdesc_().prop_kind, forward_training,
+                    && utils::one_of(this->desc()->prop_kind, forward_training,
                                forward_inference)
-                    && this->cdesc_().alg_kind == alg_kind::convolution_direct
+                    && utils::one_of(this->desc()->alg_kind,
+                               alg_kind::convolution_auto,
+                               alg_kind::convolution_direct)
                     && !this->has_zero_dim_memory()
-                    && this->cdesc_().src_desc.data_type == src_type
-                    && this->cdesc_().weights_desc.data_type == wei_type
-                    && this->cdesc_().dst_desc.data_type == dst_type
+                    && this->desc()->src_desc.data_type == src_type
+                    && this->desc()->weights_desc.data_type == wei_type
+                    && this->desc()->dst_desc.data_type == dst_type
                     && IMPLICATION(this->with_bias(), dst_type
-                                       == this->cdesc_().bias_desc.data_type)
-                    && !(with_relu && this->negative_slope()!= 0.
-                                   && dst_type == data_type::s32
-                                   && src_type == data_type::s16
-                                   && wei_type == data_type::s16);
+                                       == this->desc()->bias_desc.data_type);
             if (!ok)
                 return status::unimplemented;
 
-            return jit_avx512_common_conv_fwd_kernel::init_conf(
-                    jcp_, this->cdesc_(), this->src_pd_, this->weights_pd_,
+            status_t status = jit_avx512_common_conv_fwd_kernel::init_conf(
+                    jcp_, *this->desc(), this->src_pd_, this->weights_pd_,
                     this->dst_pd_,this->bias_pd_, *this->attr(),
-                    mkldnn_get_max_threads(), with_relu, this->negative_slope());
-        }
+                    mkldnn_get_max_threads());
+            if (status != status::success) return status;
 
-        inline int ndims() { return this->cdesc_().src_desc.ndims; }
+            auto scratchpad = scratchpad_registry().registrar();
+            jit_avx512_common_conv_fwd_kernel::init_scratchpad(scratchpad,
+                    jcp_);
+
+            if (status == status::success
+                    && this->desc()->alg_kind == alg_kind::convolution_auto)
+                CHECK(this->set_alg_kind(alg_kind::convolution_direct));
+            return status;
+        }
 
         jit_conv_conf_t jcp_;
     };
 
-    _jit_avx512_common_convolution_fwd_t(const pd_t *pd,
+    jit_avx512_common_convolution_fwd_t(const pd_t *apd,
             const input_vector &inputs, const output_vector &outputs)
-        : cpu_primitive_t(&conf_, inputs, outputs), conf_(*pd)
-        , padded_bias_(nullptr)
+        : cpu_primitive_t(apd, inputs, outputs)
     {
-        kernel_ = new jit_avx512_common_conv_fwd_kernel(conf_.jcp_,
-                    *conf_.attr());
-
-        if (conf_.want_padded_bias()) {
-            const auto &j = conf_.jcp_;
-            assert(j.ngroups == 1);
-            padded_bias_ = (dst_data_t *)malloc(sizeof(dst_data_t) * j.oc, 64);
-            for (int oc = j.oc_without_padding; oc < j.oc; ++oc)
-                padded_bias_[oc] = 0;
-        }
+        kernel_ = new jit_avx512_common_conv_fwd_kernel(pd()->jcp_,
+                    *pd()->attr());
     }
-    ~_jit_avx512_common_convolution_fwd_t() {
-        delete kernel_;
-        free(padded_bias_);
-    };
+    ~jit_avx512_common_convolution_fwd_t() { delete kernel_; }
 
     typedef typename prec_traits<src_type>::type src_data_t;
     typedef typename prec_traits<wei_type>::type wei_data_t;
     typedef typename prec_traits<dst_type>::type dst_data_t;
 
-    virtual void execute(event_t *e)
+    virtual void execute(event_t *e) const
     {
-        if (conf_.ndims() == 3)
+        if (pd()->ndims() == 3)
             execute_forward_1d();
-        else if (conf_.ndims() == 4)
+        else if (pd()->ndims() == 4)
             execute_forward_2d();
-        else if (conf_.ndims() == 5)
+        else if (pd()->ndims() == 5)
             execute_forward_3d();
         else
             assert(false);
+
+        if (pd()->wants_zero_pad_dst())
+            output_memory_primitive(0)->zero_pad();
+
         e->set_state(event_t::ready);
     }
 
 private:
-    void execute_forward_1d();
-    void execute_forward_2d();
-    void execute_forward_3d();
-    pd_t conf_;
+    void prepare_padded_bias(const dst_data_t *&bias) const;
+    void execute_forward_1d() const;
+    void execute_forward_2d() const;
+    void execute_forward_3d() const;
+    const pd_t *pd() const { return (const pd_t *)primitive_t::pd(); }
+
     jit_avx512_common_conv_fwd_kernel *kernel_;
-    dst_data_t *padded_bias_;
 };
 
-template <impl::data_type_t src_type, impl::data_type_t wei_type = src_type,
-         impl::data_type_t dst_type = src_type>
-using jit_avx512_common_convolution_fwd_t =
-    _jit_avx512_common_convolution_fwd_t<false, src_type, wei_type, dst_type>;
-
-template <impl::data_type_t src_type, impl::data_type_t wei_type = src_type,
-         impl::data_type_t dst_type = src_type>
-using jit_avx512_common_convolution_relu_t =
-    _jit_avx512_common_convolution_fwd_t<true, src_type, wei_type, dst_type>;
-
 template <impl::data_type_t diff_dst_type,
           impl::data_type_t wei_type = diff_dst_type,
           impl::data_type_t diff_src_type = diff_dst_type>
@@ -159,19 +151,27 @@ struct jit_avx512_common_convolution_bwd_data_t: public cpu_primitive_t {
             bool ok = true
                 && this->set_default_params() == status::success
                 && utils::one_of(this->desc()->prop_kind, backward_data) // XXX (this->!)
+                && utils::one_of(this->desc()->alg_kind,
+                           alg_kind::convolution_auto,
+                           alg_kind::convolution_direct)
                 && !this->has_zero_dim_memory()
-                && this->desc()->alg_kind == alg_kind::convolution_direct
                 && this->desc()->diff_dst_desc.data_type == diff_dst_type
                 && this->desc()->weights_desc.data_type == wei_type
                 && this->desc()->diff_src_desc.data_type == diff_src_type;
             if (!ok) return status::unimplemented;
 
-            return jit_avx512_common_conv_bwd_data_kernel_f32::init_conf(
-                    jcp_,*this->desc(), *this->diff_src_pd_.desc(),
-                    *this->weights_pd_.desc(), *this->diff_dst_pd_.desc());
-        }
+            status_t status =
+                jit_avx512_common_conv_bwd_data_kernel_f32::init_conf(jcp_,
+                        *this->desc(), *this->diff_src_pd_.desc(),
+                        *this->weights_pd_.desc(), *this->diff_dst_pd_.desc());
+            if (status != status::success) return status;
 
-        inline int ndims() { return this->desc()->diff_src_desc.ndims; }
+            auto scratchpad = scratchpad_registry().registrar();
+            jit_avx512_common_conv_bwd_data_kernel_f32::init_scratchpad(
+                    scratchpad, jcp_);
+
+            return status::success;
+        }
 
         inline memory_format_t src_format()
         {
@@ -206,30 +206,30 @@ struct jit_avx512_common_convolution_bwd_data_t: public cpu_primitive_t {
                 CHECK(this->diff_dst_pd_.set_format(src_format()));
             if (this->weights_pd_.desc()->format == any)
                 CHECK(this->weights_pd_.set_format(wei_format()));
+            if (this->desc()->alg_kind == alg_kind::convolution_auto)
+                CHECK(this->set_alg_kind(alg_kind::convolution_direct));
             return status::success;
         }
     };
 
-    jit_avx512_common_convolution_bwd_data_t(const pd_t *pd,
+    jit_avx512_common_convolution_bwd_data_t(const pd_t *apd,
             const input_vector &inputs, const output_vector &outputs)
-        : cpu_primitive_t(&conf_, inputs, outputs), conf_(*pd)
-    {
-        kernel_ = new jit_avx512_common_conv_bwd_data_kernel_f32(conf_.jcp_);
-    }
+        : cpu_primitive_t(apd, inputs, outputs)
+    { kernel_ = new jit_avx512_common_conv_bwd_data_kernel_f32(pd()->jcp_); }
     ~jit_avx512_common_convolution_bwd_data_t() { delete kernel_; };
 
     typedef typename prec_traits<diff_dst_type>::type diff_dst_data_t;
     typedef typename prec_traits<wei_type>::type wei_data_t;
     typedef typename prec_traits<diff_src_type>::type diff_src_data_t;
 
-    virtual void execute(event_t *e) {
-        switch (conf_.desc()->prop_kind) {
+    virtual void execute(event_t *e) const {
+        switch (pd()->desc()->prop_kind) {
         case prop_kind::backward_data:
-            if (conf_.ndims() == 3)
+            if (pd()->ndims() == 3)
                 execute_backward_data_1d();
-            else if (conf_.ndims() == 4)
+            else if (pd()->ndims() == 4)
                 execute_backward_data_2d();
-            else if (conf_.ndims() == 5)
+            else if (pd()->ndims() == 5)
                 execute_backward_data_3d();
             else
                 assert(false);
@@ -241,10 +241,11 @@ struct jit_avx512_common_convolution_bwd_data_t: public cpu_primitive_t {
     }
 
 private:
-    void execute_backward_data_1d();
-    void execute_backward_data_2d();
-    void execute_backward_data_3d();
-    pd_t conf_;
+    void execute_backward_data_1d() const;
+    void execute_backward_data_2d() const;
+    void execute_backward_data_3d() const;
+    const pd_t *pd() const { return (const pd_t *)primitive_t::pd(); }
+
     jit_avx512_common_conv_bwd_data_kernel_f32 *kernel_;
 };
 
@@ -267,7 +268,9 @@ struct jit_avx512_common_convolution_bwd_weights_t: public cpu_primitive_t {
             assert(this->engine()->kind() == engine_kind::cpu);
             bool ok = true
                 && this->desc()->prop_kind == prop_kind::backward_weights
-                && this->desc()->alg_kind == alg_kind::convolution_direct
+                && utils::one_of(this->desc()->alg_kind,
+                           alg_kind::convolution_auto,
+                           alg_kind::convolution_direct)
                 && !this->has_zero_dim_memory()
                 && this->desc()->src_desc.data_type == src_type
                 && this->desc()->diff_dst_desc.data_type == diff_dst_type
@@ -275,12 +278,27 @@ struct jit_avx512_common_convolution_bwd_weights_t: public cpu_primitive_t {
                     == diff_weights_type;
             if (!ok) return status::unimplemented;
 
-            return jit_avx512_common_conv_bwd_weights_kernel_f32::init_conf(
-                    jcp_, *this->desc(), this->src_pd_, this->diff_weights_pd_,
-                    this->diff_bias_pd_, this->diff_dst_pd_);
-        }
+            status_t status =
+                jit_avx512_common_conv_bwd_weights_kernel_f32::init_conf(jcp_,
+                        *this->desc(), this->src_pd_, this->diff_weights_pd_,
+                        this->diff_bias_pd_, this->diff_dst_pd_);
+            if (status != status::success) return status;
+
+            init_balancers();
 
-        inline int ndims() { return this->desc()->src_desc.ndims; }
+            auto scratchpad = scratchpad_registry().registrar();
+            jit_avx512_common_conv_bwd_weights_kernel_f32::init_scratchpad(
+                    scratchpad, jcp_);
+
+            auto reducer_bia_scratchpad = memory_tracking::registrar_t(
+                    scratchpad, memory_tracking::names::prefix_reducer_bia);
+            reducer_bia_conf_.init_scratchpad(reducer_bia_scratchpad);
+
+            if (status == status::success &&
+                    this->desc()->alg_kind == alg_kind::convolution_auto)
+                CHECK(this->set_alg_kind(alg_kind::convolution_direct));
+            return status;
+        }
 
         inline memory_format_t src_format()
         {
@@ -297,29 +315,37 @@ struct jit_avx512_common_convolution_bwd_weights_t: public cpu_primitive_t {
                       OIdhw16o16i);
         }
 
-
         jit_conv_conf_t jcp_;
+        typename cpu_reducer_t<diff_weights_type>::conf_t reducer_bia_conf_;
 
-        protected:
-            virtual status_t set_default_params() override {
-                using namespace memory_format;
+    protected:
+        virtual status_t set_default_params() override {
+            using namespace memory_format;
 
-                if (this->src_pd_.desc()->format == any)
-                    CHECK(this->src_pd_.set_format(src_format()));
-                if (this->diff_weights_pd_.desc()->format == any)
-                    CHECK(this->diff_weights_pd_.set_format(wei_format()));
-                if (this->diff_dst_pd_.desc()->format == any)
-                    CHECK(this->diff_dst_pd_.set_format(src_format()));
+            if (this->src_pd_.desc()->format == any)
+                CHECK(this->src_pd_.set_format(src_format()));
+            if (this->diff_weights_pd_.desc()->format == any)
+                CHECK(this->diff_weights_pd_.set_format(wei_format()));
+            if (this->diff_dst_pd_.desc()->format == any)
+                CHECK(this->diff_dst_pd_.set_format(src_format()));
 
-                return status::success;
-            }
+            return status::success;
+        }
 
+    private:
+        void init_balancers() {
+            const size_t max_buffer_size = jcp_.nthr * 3 * 5 * 5 * 16 * 16;
+            if (with_bias()) {
+                reducer_bia_conf_.init(reduce_balancer_t(jcp_.nthr,
+                            jcp_.oc_block, jcp_.ngroups * jcp_.nb_oc, jcp_.mb,
+                            max_buffer_size));
+            }
+        }
     };
 
-    jit_avx512_common_convolution_bwd_weights_t(const pd_t *pd,
+    jit_avx512_common_convolution_bwd_weights_t(const pd_t *apd,
             const input_vector &inputs, const output_vector &outputs);
     ~jit_avx512_common_convolution_bwd_weights_t() {
-
         delete kernel_;
         if (trans_kernel_)
             delete trans_kernel_;
@@ -328,53 +354,37 @@ struct jit_avx512_common_convolution_bwd_weights_t: public cpu_primitive_t {
         if (acc_ker_)
             delete acc_ker_;
         delete reducer_bias_;
-        free(padded_bias_);
-
-        free(tr_src_);
-        free(ws_reduction_);
-
-        free(tr_src_bctx_);
-        free(tr_diff_dst_bctx_);
-
-        free(tr_diff_dst_);
     }
 
     typedef typename prec_traits<src_type>::type src_data_t;
     typedef typename prec_traits<diff_dst_type>::type diff_dst_data_t;
     typedef typename prec_traits<diff_weights_type>::type diff_weights_data_t;
 
-    virtual void execute(event_t *e) {
+    virtual void execute(event_t *e) const {
         execute_backward_weights();
         e->set_state(event_t::ready);
     }
 
 private:
-    void execute_backward_weights();
-    void balance();
-
+    void execute_backward_weights() const;
+    void prepare_scratchpad_data() const;
     struct thread_info_t;
-    void compute_diff_weights(const thread_info_t *);
-    void compute_diff_weights_3d(const thread_info_t *);
-    void reduce_diff_weights(const thread_info_t *);
-    void reduce_diff_weights_3d(const thread_info_t *);
-    void compute_diff_bias(const thread_info_t *);
-    void compute_diff_bias_3d(const thread_info_t *);
+    void compute_diff_weights(const thread_info_t *) const;
+    void compute_diff_weights_3d(const thread_info_t *) const;
+    void reduce_diff_weights(const thread_info_t *) const;
+    void reduce_diff_weights_3d(const thread_info_t *) const;
+    void compute_diff_bias(const thread_info_t *) const;
+    void compute_diff_bias_3d(const thread_info_t *) const;
 
-    pd_t conf_;
+    const pd_t *pd() const { return (const pd_t *)primitive_t::pd(); }
+
+    int nthr_, nthr_mb_, nthr_g_, nthr_oc_b_, nthr_ic_b_;
 
     jit_avx512_common_conv_bwd_weights_kernel_f32 *kernel_;
     jit_trans_src_t *trans_kernel_;
     jit_trans_dst_t *trans_dst_kernel_;
     cpu_accumulator_1d_t<diff_weights_type> *acc_ker_;
     cpu_reducer_t<diff_weights_type> *reducer_bias_;
-    diff_weights_data_t *padded_bias_;
-
-    src_data_t *tr_src_;
-    diff_dst_data_t *tr_diff_dst_;
-    diff_weights_data_t *ws_reduction_;
-
-    int nthr_, nthr_mb_, nthr_g_, nthr_oc_b_, nthr_ic_b_;
-    simple_barrier::ctx_t *tr_src_bctx_, *tr_diff_dst_bctx_, reduction_bctx_;
 };
 
 }