Publishing 2019 R1 content
[platform/upstream/dldt.git] / inference-engine / thirdparty / clDNN / src / graph_optimizer / pre_optimize_bias.cpp
1 /*
2 // Copyright (c) 2018 Intel Corporation
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //      http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 */
16
17 ///////////////////////////////////////////////////////////////////////////////////////////////////
18
19 #include "pass_manager.h"
20 #include "program_node.h"
21 #include "layout_optimizer.h"
22 #include "program_impl.h"
23 #include "program_helpers.h"
24 #include "fully_connected_inst.h"
25
26 using namespace cldnn;
27
28 pre_optimize_bias::pre_optimize_bias(layout_optimizer& lo_ref) : base_pass("pre_optimize_bias"), _lo(lo_ref) {}
29
30 void pre_optimize_bias::run(program_impl& p) {
31     run(p, _lo);
32 }
33
34 //function which prepares given primitive for weights optimization
35 template <typename T>
36 void pre_optimize_bias::optimize_bias(T& node, layout_optimizer& lo, program_impl& p)
37 {
38     layout output_layout = node.get_output_layout();
39
40     size_t weights_offset = node.get_primitive()->input.size();
41     size_t bias_offset = weights_offset + program_helpers::wrap_if_single(node.get_primitive()->weights).size();
42     for (size_t i = bias_offset; i < node.get_dependencies().size(); ++i)
43     {
44         //find weights primitive with given pimitive_id and add it to weights_optimizer
45         const program_node& bias = node.get_dependency(i);
46         const auto bias_type = layout_optimizer::data_type::bias;
47         auto reorder = lo.get_reorder(
48             bias.get_output_layout(),
49             bias.id(),
50             bias_type,
51             node,
52             output_layout);
53
54         if (reorder.first)
55             p.add_intermediate(reorder.first, node, i, !reorder.second);
56     }
57 }
58 template void pre_optimize_bias::optimize_bias<convolution_node>(convolution_node& node, layout_optimizer& lo, program_impl& p);
59 template void pre_optimize_bias::optimize_bias<deconvolution_node>(deconvolution_node& node, layout_optimizer& lo, program_impl& p);
60 template void pre_optimize_bias::optimize_bias<fully_connected_node>(fully_connected_node& node, layout_optimizer& lo, program_impl& p);
61 template void pre_optimize_bias::optimize_bias<embed_node>(embed_node& node, layout_optimizer& lo, program_impl& p);
62
63
64 void pre_optimize_bias::run(program_impl& p, layout_optimizer& lo)
65 {
66     for (auto& prim : p.get_processing_order())
67     {
68         if (prim->type() == convolution::type_id())
69         {
70             if (!prim->as<convolution>().weights_quantization_term())
71                 optimize_bias(prim->as<convolution>(), lo, p);
72         }
73         else if (prim->type() == deconvolution::type_id())
74         {
75             optimize_bias(prim->as<deconvolution>(), lo, p);
76         }
77         else if (prim->type() == fully_connected::type_id())
78         {
79             if (!prim->as<fully_connected>().weights_quantization_term())
80                 optimize_bias(prim->as<fully_connected>(), lo, p);
81         }
82         else if (prim->type() == embed::type_id())
83         {
84             optimize_bias(prim->as<embed>(), lo, p);
85         }
86     }
87 }