Deprecate nGraph v0 ops and builders (#1856)
[platform/upstream/dldt.git] / ngraph / core / src / op / add.cpp
1 //*****************************************************************************
2 // Copyright 2017-2020 Intel Corporation
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 //     http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //*****************************************************************************
16
17 #include "ngraph/op/add.hpp"
18 #include "itt.hpp"
19 #include "ngraph/runtime/host_tensor.hpp"
20 #include "ngraph/runtime/reference/add.hpp"
21
22 NGRAPH_SUPPRESS_DEPRECATED_START
23
24 using namespace std;
25 using namespace ngraph;
26
27 // ------------------------------- v0 ------------------------------------------
28
29 constexpr NodeTypeInfo op::v0::Add::type_info;
30
31 op::v0::Add::Add(const Output<Node>& arg0,
32                  const Output<Node>& arg1,
33                  const AutoBroadcastSpec& auto_broadcast)
34     : BinaryElementwiseArithmetic(arg0, arg1, auto_broadcast)
35 {
36     constructor_validate_and_infer_types();
37 }
38
39 shared_ptr<Node> op::v0::Add::clone_with_new_inputs(const OutputVector& new_args) const
40 {
41     check_new_args_count(this, new_args);
42     return make_shared<op::v0::Add>(new_args.at(0), new_args.at(1), this->get_autob());
43 }
44
45 bool op::v0::Add::visit_attributes(AttributeVisitor& visitor)
46 {
47     BinaryElementwiseArithmetic::visit_attributes(visitor);
48     return true;
49 }
50
51 shared_ptr<Node> ngraph::operator+(const Output<Node>& arg0, const Output<Node>& arg1)
52 {
53     return make_shared<op::Add>(arg0, arg1);
54 }
55
56 namespace
57 {
58     template <element::Type_t ET>
59     bool evaluate(const HostTensorPtr& arg0,
60                   const HostTensorPtr& arg1,
61                   const HostTensorPtr& out,
62                   const op::AutoBroadcastSpec& broadcast_spec)
63     {
64         runtime::reference::add(arg0->get_data_ptr<ET>(),
65                                 arg1->get_data_ptr<ET>(),
66                                 out->get_data_ptr<ET>(),
67                                 arg0->get_shape(),
68                                 arg1->get_shape(),
69                                 broadcast_spec);
70         return true;
71     }
72
73     bool evaluate_add(const HostTensorPtr& arg0,
74                       const HostTensorPtr& arg1,
75                       const HostTensorPtr& out,
76                       const op::AutoBroadcastSpec& broadcast_spec)
77     {
78         bool rc = true;
79         out->set_broadcast(broadcast_spec, arg0, arg1);
80         switch (arg0->get_element_type())
81         {
82             TYPE_CASE(i8)(arg0, arg1, out, broadcast_spec);
83             break;
84             TYPE_CASE(i16)(arg0, arg1, out, broadcast_spec);
85             break;
86             TYPE_CASE(i32)(arg0, arg1, out, broadcast_spec);
87             break;
88             TYPE_CASE(i64)(arg0, arg1, out, broadcast_spec);
89             break;
90             TYPE_CASE(u8)(arg0, arg1, out, broadcast_spec);
91             break;
92             TYPE_CASE(u16)(arg0, arg1, out, broadcast_spec);
93             break;
94             TYPE_CASE(u32)(arg0, arg1, out, broadcast_spec);
95             break;
96             TYPE_CASE(u64)(arg0, arg1, out, broadcast_spec);
97             break;
98             TYPE_CASE(bf16)(arg0, arg1, out, broadcast_spec);
99             break;
100             TYPE_CASE(f16)(arg0, arg1, out, broadcast_spec);
101             break;
102             TYPE_CASE(f32)(arg0, arg1, out, broadcast_spec);
103             break;
104         default: rc = false; break;
105         }
106         return rc;
107     }
108 }
109
110 bool op::v0::Add::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
111 {
112     OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v0::Add::evaluate");
113     return evaluate_add(inputs[0], inputs[1], outputs[0], get_autob());
114 }
115
116 // ------------------------------- v1 ------------------------------------------
117
118 NGRAPH_RTTI_DEFINITION(op::v1::Add, "Add", 1, util::BinaryElementwiseArithmetic);
119
120 op::v1::Add::Add(const Output<Node>& arg0,
121                  const Output<Node>& arg1,
122                  const AutoBroadcastSpec& auto_broadcast)
123     : BinaryElementwiseArithmetic(arg0, arg1, auto_broadcast)
124 {
125     constructor_validate_and_infer_types();
126 }
127
128 bool op::v1::Add::visit_attributes(AttributeVisitor& visitor)
129 {
130     BinaryElementwiseArithmetic::visit_attributes(visitor);
131     return true;
132 }
133
134 shared_ptr<Node> op::v1::Add::clone_with_new_inputs(const OutputVector& new_args) const
135 {
136     check_new_args_count(this, new_args);
137     return make_shared<op::v1::Add>(new_args.at(0), new_args.at(1), this->get_autob());
138 }
139
140 bool op::v1::Add::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const
141 {
142     OV_ITT_SCOPED_TASK(itt::domains::nGraphOp, "op::v1::Add::evaluate");
143     return evaluate_add(inputs[0], inputs[1], outputs[0], get_autob());
144 }