Publishing 2019 R1 content
[platform/upstream/dldt.git] / inference-engine / thirdparty / mkl-dnn / src / cpu / jit_avx512_common_1x1_convolution.hpp
index 7878697..67e8dab 100644 (file)
 #define CPU_JIT_AVX512_COMMON_1x1_CONVOLUTION_HPP
 
 #include "c_types_map.hpp"
+#include "memory_tracking.hpp"
+#include "mkldnn_thread.hpp"
+#include "utils.hpp"
+
 #include "cpu_convolution_pd.hpp"
 #include "cpu_engine.hpp"
 #include "cpu_reducer.hpp"
+
 #include "jit_avx512_common_1x1_conv_kernel.hpp"
 #include "jit_uni_1x1_conv_utils.hpp"
 #include "jit_transpose_src_utils.hpp"
-#include "mkldnn_thread.hpp"
-#include "utils.hpp"
 
 namespace mkldnn {
 namespace impl {
 namespace cpu {
 
-template <bool with_relu, impl::data_type_t src_type,
+template <impl::data_type_t src_type,
          impl::data_type_t wei_type = src_type,
          impl::data_type_t dst_type = src_type>
-struct _jit_avx512_common_1x1_convolution_fwd_t : public cpu_primitive_t {
+struct jit_avx512_common_1x1_convolution_fwd_t : public cpu_primitive_t {
     // TODO: (Roma) Code duplication duplication! Remove with templates
     //              (maybe...)!
-    struct pd_t: public _cpu_convolution_fwd_pd_t<with_relu> {
-        pd_t(engine_t *engine,
-                const typename pd_t::base_desc_t *adesc,
+    struct pd_t: public cpu_convolution_fwd_pd_t {
+        pd_t(engine_t *engine, const convolution_desc_t *adesc,
                 const primitive_attr_t *attr,
                 const typename pd_t::base_class *hint_fwd_pd)
-            : _cpu_convolution_fwd_pd_t<with_relu>(engine, adesc, attr,
-                    hint_fwd_pd)
+            : cpu_convolution_fwd_pd_t(engine, adesc, attr, hint_fwd_pd)
             , jcp_(), rtus_() {}
 
         DECLARE_COMMON_PD_T(
                 JIT_IMPL_NAME_HELPER("jit_1x1:", avx512_common, ""),
-                _jit_avx512_common_1x1_convolution_fwd_t);
+                jit_avx512_common_1x1_convolution_fwd_t);
 
         virtual status_t init() override {
             using namespace prop_kind;
@@ -56,37 +57,42 @@ struct _jit_avx512_common_1x1_convolution_fwd_t : public cpu_primitive_t {
             assert(this->engine()->kind() == engine_kind::cpu);
             bool ok = true
                 && this->set_default_params() == status::success
-                && utils::one_of(this->cdesc_().prop_kind, forward_training,
+                && utils::one_of(this->desc()->prop_kind, forward_training,
                         forward_inference)
-                && this->cdesc_().alg_kind == alg_kind::convolution_direct
+                && utils::one_of(this->desc()->alg_kind,
+                           alg_kind::convolution_auto,
+                           alg_kind::convolution_direct)
                 && !this->has_zero_dim_memory()
-                && this->cdesc_().src_desc.data_type == src_type
-                && this->cdesc_().weights_desc.data_type == wei_type
-                && this->cdesc_().dst_desc.data_type == dst_type
+                && this->desc()->src_desc.data_type == src_type
+                && this->desc()->weights_desc.data_type == wei_type
+                && this->desc()->dst_desc.data_type == dst_type
                 && IMPLICATION(this->with_bias(),
-                    dst_type == this->cdesc_().bias_desc.data_type)
-                && IMPLICATION(with_relu && dst_type == data_type::s32
-                    && everyone_is(data_type::s16, src_type, wei_type),
-                    this->negative_slope() == 0.);
+                    dst_type == this->desc()->bias_desc.data_type);
             if (!ok) return status::unimplemented;
 
-            const convolution_desc_t *conv_d = &this->cdesc_();
+            const convolution_desc_t *conv_d = this->desc();
             const memory_desc_t *src_d = this->src_pd_.desc();
             rtus_prepare(this, conv_d, src_d, this->dst_pd_.desc());
-            return jit_avx512_common_1x1_conv_kernel::init_conf(jcp_,
-                    *conv_d, *src_d, *this->weights_pd_.desc(),
+
+            status_t status = jit_avx512_common_1x1_conv_kernel::init_conf(
+                    jcp_, *conv_d, *src_d, *this->weights_pd_.desc(),
                     *this->dst_pd_.desc(), *this->attr(),
-                    with_relu, this->negative_slope(),
                     mkldnn_get_max_threads(), rtus_.reduce_src_);
+            if (status != status::success) return status;
+
+            auto scratchpad = scratchpad_registry().registrar();
+            jit_avx512_common_1x1_conv_kernel::init_scratchpad(scratchpad,
+                    jcp_);
+
+            rtus_prepare_space_info(this, scratchpad);
+
+            return status::success;
         }
 
         jit_1x1_conv_conf_t jcp_;
-        struct reduce_to_unit_stride_t {
-            convolution_desc_t conv_d_;
-            bool reduce_src_;
-        } rtus_;
+        reduce_to_unit_stride_t rtus_;
 
-      protected:
+    protected:
         virtual status_t set_default_params() override {
             using namespace memory_format;
             if (this->src_pd_.desc()->format == any)
@@ -110,78 +116,61 @@ struct _jit_avx512_common_1x1_convolution_fwd_t : public cpu_primitive_t {
             }
             if (this->bias_pd_.desc()->format == any)
                 CHECK(this->bias_pd_.set_format(x));
+            if (this->desc()->alg_kind == alg_kind::convolution_auto)
+                CHECK(this->set_alg_kind(alg_kind::convolution_direct));
             return status::success;
         }
     };
 
     template <cpu_isa_t isa, typename conv_t>
     friend void init_rtus_driver(conv_t *self);
-    _jit_avx512_common_1x1_convolution_fwd_t(const pd_t *pd,
-                                          const input_vector &inputs,
-                                          const output_vector &outputs)
-        : cpu_primitive_t(&conf_, inputs, outputs), conf_(*pd)
-        , kernel_(nullptr), rtus_driver_(nullptr), ws_per_thread_(0)
-        , scratch_(nullptr), padded_bias_(nullptr)
-    {
-        kernel_ = new jit_avx512_common_1x1_conv_kernel(conf_.jcp_,
-                    *conf_.attr());
 
+    jit_avx512_common_1x1_convolution_fwd_t(const pd_t *apd,
+            const input_vector &inputs, const output_vector &outputs)
+        : cpu_primitive_t(apd, inputs, outputs)
+        , kernel_(nullptr), rtus_driver_(nullptr)
+    {
+        kernel_ =
+            new jit_avx512_common_1x1_conv_kernel(pd()->jcp_, *pd()->attr());
         init_rtus_driver<avx512_common>(this);
-
-        if (conf_.want_padded_bias()) {
-            const auto &j = conf_.jcp_;
-            assert(j.ngroups == 1);
-            padded_bias_ = (dst_data_t *)malloc(sizeof(dst_data_t) * j.oc, 64);
-            for (int oc = j.oc_without_padding; oc < j.oc; ++oc)
-                padded_bias_[oc] = 0;
-        }
     }
 
-    ~_jit_avx512_common_1x1_convolution_fwd_t() {
+    ~jit_avx512_common_1x1_convolution_fwd_t() {
         delete kernel_;
         delete rtus_driver_;
-        free(scratch_);
-        free(padded_bias_);
     }
 
     typedef typename prec_traits<src_type>::type src_data_t;
     typedef typename prec_traits<wei_type>::type wei_data_t;
     typedef typename prec_traits<dst_type>::type dst_data_t;
 
-    virtual void execute(event_t *e) {
+    virtual void execute(event_t *e) const {
         execute_forward();
         e->set_state(event_t::ready);
     }
 
   private:
-    void execute_forward();
+    void execute_forward() const;
     void execute_forward_thr(const int ithr, const int nthr,
             const src_data_t *src, const wei_data_t *weights,
-            const dst_data_t *bias, dst_data_t *dst);
-    pd_t conf_;
+            const dst_data_t *bias, dst_data_t *dst,
+            const memory_tracking::grantor_t &scratchpad) const;
+    const pd_t *pd() const { return (const pd_t *)primitive_t::pd(); }
+
     jit_avx512_common_1x1_conv_kernel *kernel_;
-    /* reduction to unit stride */
     rtus_driver_t<avx512_common> *rtus_driver_;
-    size_t ws_per_thread_;
-    src_data_t *scratch_;
-    dst_data_t *padded_bias_;
 };
 
 using jit_avx512_common_1x1_convolution_fwd_f32_t
-        = _jit_avx512_common_1x1_convolution_fwd_t<false, data_type::f32>;
-using jit_avx512_common_1x1_convolution_relu_f32_t
-        = _jit_avx512_common_1x1_convolution_fwd_t<true, data_type::f32>;
+        = jit_avx512_common_1x1_convolution_fwd_t<data_type::f32>;
 using jit_avx512_common_1x1_convolution_fwd_s16s16s32_t
-        = _jit_avx512_common_1x1_convolution_fwd_t<false, data_type::s16,
-            data_type::s16, data_type::s32>;
-using jit_avx512_common_1x1_convolution_relu_s16s16s32_t
-        = _jit_avx512_common_1x1_convolution_fwd_t<true, data_type::s16,
+        = jit_avx512_common_1x1_convolution_fwd_t<data_type::s16,
             data_type::s16, data_type::s32>;
 
 template <impl::data_type_t diff_dst_type,
           impl::data_type_t wei_type = diff_dst_type,
           impl::data_type_t diff_src_type = diff_dst_type>
-struct _jit_avx512_common_1x1_convolution_bwd_data_t : public cpu_primitive_t {
+struct jit_avx512_common_1x1_convolution_bwd_data_t : public cpu_primitive_t {
     struct pd_t : public cpu_convolution_bwd_data_pd_t {
         pd_t(engine_t *engine,
                 const convolution_desc_t *adesc,
@@ -192,7 +181,7 @@ struct _jit_avx512_common_1x1_convolution_bwd_data_t : public cpu_primitive_t {
 
         DECLARE_COMMON_PD_T(
                 JIT_IMPL_NAME_HELPER("jit_1x1:", avx512_common, ""),
-                _jit_avx512_common_1x1_convolution_bwd_data_t);
+                jit_avx512_common_1x1_convolution_bwd_data_t);
 
         virtual status_t init() override {
             using namespace prop_kind;
@@ -200,7 +189,8 @@ struct _jit_avx512_common_1x1_convolution_bwd_data_t : public cpu_primitive_t {
             bool ok = true
                 && this->set_default_params() == status::success
                 && this->desc()->prop_kind == backward_data
-                && this->desc()->alg_kind == alg_kind::convolution_direct
+                && utils::one_of(this->desc()->alg_kind, alg_kind::convolution_auto,
+                           alg_kind::convolution_direct)
                 && !this->has_zero_dim_memory()
                 && this->desc()->diff_dst_desc.data_type == diff_dst_type
                 && this->desc()->weights_desc.data_type == wei_type
@@ -210,18 +200,25 @@ struct _jit_avx512_common_1x1_convolution_bwd_data_t : public cpu_primitive_t {
             const convolution_desc_t *conv_d = this->desc();
             const memory_desc_t *diff_src_d = this->diff_src_pd_.desc();
             rtus_prepare(this, conv_d, diff_src_d, this->diff_dst_pd_.desc());
-            return jit_avx512_common_1x1_conv_kernel::init_conf(jcp_,
-                            *conv_d, *diff_src_d, *this->weights_pd_.desc(),
-                            *this->diff_dst_pd_.desc(), *this->attr(),
-                            mkldnn_get_max_threads(), rtus_.reduce_src_);
+
+            status_t status = jit_avx512_common_1x1_conv_kernel::init_conf(
+                    jcp_, *conv_d, *diff_src_d, *this->weights_pd_.desc(),
+                    *this->diff_dst_pd_.desc(), *this->attr(),
+                    mkldnn_get_max_threads(), rtus_.reduce_src_);
+            if (status != status::success) return status;
+
+            auto scratchpad = scratchpad_registry().registrar();
+            jit_avx512_common_1x1_conv_kernel::init_scratchpad(scratchpad,
+                    jcp_);
+
+            rtus_prepare_space_info(this, scratchpad);
+
+            return status::success;
         }
 
         // TODO (Roma): structs conf header cleanup
         jit_1x1_conv_conf_t jcp_;
-        struct reduce_to_unit_stride_t {
-            convolution_desc_t conv_d_;
-            bool reduce_src_;
-        } rtus_;
+        reduce_to_unit_stride_t rtus_;
 
     protected:
         virtual status_t set_default_params() override {
@@ -248,6 +245,8 @@ struct _jit_avx512_common_1x1_convolution_bwd_data_t : public cpu_primitive_t {
                             ? pick(this->ndims() - 3, gOIw8o16i2o, gOIhw8o16i2o)
                             : pick(this->ndims() - 3, OIw8o16i2o, OIhw8o16i2o)));
             }
+            if (this->desc()->alg_kind == alg_kind::convolution_auto)
+                CHECK(this->set_alg_kind(alg_kind::convolution_direct));
 
             return status::success;
         }
@@ -255,30 +254,28 @@ struct _jit_avx512_common_1x1_convolution_bwd_data_t : public cpu_primitive_t {
 
     template <cpu_isa_t isa, typename conv_t>
     friend void init_rtus_driver(conv_t *self);
-    _jit_avx512_common_1x1_convolution_bwd_data_t(const pd_t *pd,
-                                              const input_vector &inputs,
-                                              const output_vector &outputs)
-        : cpu_primitive_t(&conf_, inputs, outputs), conf_(*pd)
-        , kernel_(nullptr), rtus_driver_(nullptr), ws_per_thread_(0)
-        , scratch_(nullptr)
+
+    jit_avx512_common_1x1_convolution_bwd_data_t(const pd_t *apd,
+            const input_vector &inputs, const output_vector &outputs)
+        : cpu_primitive_t(apd, inputs, outputs)
+        , kernel_(nullptr), rtus_driver_(nullptr)
     {
-        kernel_ = new jit_avx512_common_1x1_conv_kernel(conf_.jcp_,
-                    *conf_.attr());
+        kernel_ = new jit_avx512_common_1x1_conv_kernel(pd()->jcp_,
+                    *pd()->attr());
         init_rtus_driver<avx512_common>(this);
     }
-    ~_jit_avx512_common_1x1_convolution_bwd_data_t()
-    {
+
+    ~jit_avx512_common_1x1_convolution_bwd_data_t() {
         delete kernel_;
         delete rtus_driver_;
-        free(scratch_);
     }
 
     typedef typename prec_traits<diff_dst_type>::type diff_dst_data_t;
     typedef typename prec_traits<wei_type>::type wei_data_t;
     typedef typename prec_traits<diff_src_type>::type diff_src_data_t;
 
-    virtual void execute(event_t *e) {
-        switch (conf_.desc()->prop_kind) {
+    virtual void execute(event_t *e) const {
+        switch (pd()->desc()->prop_kind) {
         case prop_kind::backward_data:
             execute_backward_data();
             break;
@@ -289,19 +286,17 @@ struct _jit_avx512_common_1x1_convolution_bwd_data_t : public cpu_primitive_t {
     }
 
   private:
-    void execute_backward_data();
-    pd_t conf_;
+    void execute_backward_data() const;
+    const pd_t *pd() const { return (const pd_t *)primitive_t::pd(); }
+
     jit_avx512_common_1x1_conv_kernel *kernel_;
-    /* reduction to unit stride */
     rtus_driver_t<avx512_common> *rtus_driver_;
-    size_t ws_per_thread_;
-    diff_src_data_t *scratch_;
 };
 
 using jit_avx512_common_1x1_convolution_bwd_data_f32_t
-        = _jit_avx512_common_1x1_convolution_bwd_data_t<data_type::f32>;
+        = jit_avx512_common_1x1_convolution_bwd_data_t<data_type::f32>;
 using jit_avx512_common_1x1_convolution_bwd_data_s16s16s32_t
-        = _jit_avx512_common_1x1_convolution_bwd_data_t<data_type::s16,
+        = jit_avx512_common_1x1_convolution_bwd_data_t<data_type::s16,
             data_type::s16, data_type::s32>;
 
 struct jit_avx512_common_1x1_convolution_bwd_weights_t : public cpu_primitive_t
@@ -324,7 +319,9 @@ struct jit_avx512_common_1x1_convolution_bwd_weights_t : public cpu_primitive_t
             bool ok = true
                 && this->set_default_params() == status::success
                 && this->desc()->prop_kind == backward_weights
-                && this->desc()->alg_kind == alg_kind::convolution_direct
+                && utils::one_of(this->desc()->alg_kind,
+                           alg_kind::convolution_auto,
+                           alg_kind::convolution_direct)
                 && !this->has_zero_dim_memory()
                 && utils::everyone_is(data_type::f32,
                         this->desc()->src_desc.data_type,
@@ -337,19 +334,32 @@ struct jit_avx512_common_1x1_convolution_bwd_weights_t : public cpu_primitive_t
             const convolution_desc_t *conv_d = this->desc();
             const memory_desc_t *src_d = this->src_pd_.desc();
             rtus_prepare(this, conv_d, src_d, this->diff_dst_pd_.desc());
-            return jit_avx512_common_1x1_conv_kernel::init_conf(jcp_,
-                            *conv_d, *src_d, *this->diff_weights_pd_.desc(),
-                            *this->diff_dst_pd_.desc(), *this->attr(),
-                            mkldnn_get_max_threads(), rtus_.reduce_src_);
+
+            status_t status = jit_avx512_common_1x1_conv_kernel::init_conf(
+                    jcp_, *conv_d, *src_d, *this->diff_weights_pd_.desc(),
+                    *this->diff_dst_pd_.desc(), *this->attr(),
+                    mkldnn_get_max_threads(), rtus_.reduce_src_);
+            if (status != status::success) return status;
+
+            init_balancers();
+
+            auto scratchpad = scratchpad_registry().registrar();
+            jit_avx512_common_1x1_conv_kernel::init_scratchpad(scratchpad,
+                    jcp_);
+
+            auto reducer_bia_scratchpad = memory_tracking::registrar_t(
+                    scratchpad, memory_tracking::names::prefix_reducer_bia);
+            reducer_bia_conf_.init_scratchpad(reducer_bia_scratchpad);
+
+            rtus_prepare_space_info(this, scratchpad);
+
+            return status::success;
         }
 
         // TODO (Roma): structs conf header cleanup
         jit_1x1_conv_conf_t jcp_;
-
-        struct reduce_to_unit_stride_t {
-            convolution_desc_t conv_d_;
-            bool reduce_src_;
-        } rtus_;
+        cpu_reducer_t<data_type::f32>::conf_t reducer_bia_conf_;
+        reduce_to_unit_stride_t rtus_;
 
     protected:
         virtual status_t set_default_params() override {
@@ -367,32 +377,40 @@ struct jit_avx512_common_1x1_convolution_bwd_weights_t : public cpu_primitive_t
                     : pick(this->ndims() - 3, OIw16i16o, OIhw16i16o)));
             if (this->diff_bias_pd_.desc()->format == any)
                 CHECK(this->diff_bias_pd_.set_format(x));
+            if (this->desc()->alg_kind == alg_kind::convolution_auto)
+                CHECK(this->set_alg_kind(alg_kind::convolution_direct));
             return status::success;
         }
+
+    private:
+        void init_balancers() {
+            const size_t max_buffer_size = jcp_.nthr * 3 * 5 * 5 * 16 * 16;
+            if (with_bias()) {
+                reducer_bia_conf_.init(reduce_balancer_t(jcp_.nthr,
+                            jcp_.oc_block, jcp_.ngroups * jcp_.nb_load,
+                            jcp_.mb, max_buffer_size));
+            }
+        }
     };
 
     template <cpu_isa_t isa, typename conv_t>
     friend void init_rtus_driver(conv_t *self);
-    jit_avx512_common_1x1_convolution_bwd_weights_t(const pd_t *pd,
-                                                 const input_vector &inputs,
-                                                 const output_vector &outputs);
+
+    jit_avx512_common_1x1_convolution_bwd_weights_t(const pd_t *apd,
+            const input_vector &inputs, const output_vector &outputs);
+
     ~jit_avx512_common_1x1_convolution_bwd_weights_t() {
         delete kernel_;
         delete acc_ker_;
         delete reducer_bias_;
         delete rtus_driver_;
         delete trans_kernel_;
-        free(bctx_);
-        free(ws_reduction_);
-        free(scratch_);
-        free(tr_src_);
-        free(padded_bias_);
     }
 
     typedef typename prec_traits<data_type::f32>::type data_t;
 
-    virtual void execute(event_t *e) {
-        switch (conf_.desc()->prop_kind) {
+    virtual void execute(event_t *e) const {
+        switch (pd()->desc()->prop_kind) {
         case prop_kind::backward_weights:
             execute_backward_weights();
             break;
@@ -403,23 +421,14 @@ struct jit_avx512_common_1x1_convolution_bwd_weights_t : public cpu_primitive_t
     }
 
   private:
-    void execute_backward_weights();
+    void execute_backward_weights() const;
+    const pd_t *pd() const { return (const pd_t *)primitive_t::pd(); }
 
-    pd_t conf_;
     jit_avx512_common_1x1_conv_kernel *kernel_;
     cpu_accumulator_1d_t<data_type::f32> *acc_ker_;
     cpu_reducer_t<data_type::f32> *reducer_bias_;
     jit_transpose4x16_src *trans_kernel_;
-
-    /* reduction to unit stride */
     rtus_driver_t<avx512_common> *rtus_driver_;
-    size_t ws_per_thread_;
-    data_t *scratch_;
-    data_t *padded_bias_;
-
-    simple_barrier::ctx_t *bctx_;
-    data_t *tr_src_;
-    data_t *ws_reduction_;
 };
 
 }