return false;
}
auto last_node = batch_to_space->decompose_op()[0];
- last_node->set_friendly_name(batch_to_space->get_friendly_name());
- ngraph::replace_node(batch_to_space, last_node);
+ last_node.get_node()->set_friendly_name(batch_to_space->get_friendly_name());
+ ngraph::replace_node(batch_to_space, last_node.get_node_shared_ptr());
return true;
};
return false;
}
auto last_node = space_to_batch->decompose_op()[0];
- last_node->set_friendly_name(space_to_batch->get_friendly_name());
- ngraph::replace_node(space_to_batch, last_node);
+ last_node.get_node()->set_friendly_name(space_to_batch->get_friendly_name());
+ ngraph::replace_node(space_to_batch, last_node.get_node_shared_ptr());
return true;
};
return m_inputs.at(1);
}
-NodeVector builder::MatmulFactory::make_matmul_op()
+OutputVector builder::MatmulFactory::make_matmul_op()
{
auto left = get_left();
auto right = get_right();
/// \brief Create a sub-graph representing an ONNX MatMul operation.
///
- /// \return NodeVector containing the sub-graph output node.
- virtual NodeVector make_matmul_op();
+ /// \return OutputVector containing the sub-graph output node.
+ virtual OutputVector make_matmul_op();
protected:
/// \return Output representing the left operand.
}
}
- void
- check_concat(const NodeVector& args, const NodeVector& mins, const NodeVector& maxs)
+ void check_concat(const OutputVector& args,
+ const OutputVector& mins,
+ const OutputVector& maxs)
{
auto size = args.size();
if (size != mins.size() || size != maxs.size())
{
auto min = mins[i];
auto max = maxs[i];
- auto type = min->get_element_type();
- if (type != max->get_element_type())
+ auto type = min.get_element_type();
+ if (type != max.get_element_type())
{
throw ngraph_error("check_concat: min and max must have same type");
}
- if (min->get_shape() != Shape{1} || max->get_shape() != Shape{1})
+ if (min.get_shape() != Shape{1} || max.get_shape() != Shape{1})
{
throw ngraph_error("check_concat: min/max shape not Shape{1}: " +
- vector_to_string(min->get_shape()) +
- vector_to_string(max->get_shape()));
+ vector_to_string(min.get_shape()) +
+ vector_to_string(max.get_shape()));
}
}
}
const ngraph::element::Type& output_type,
const bool requantize = true);
- void check_concat(const NodeVector& args,
- const NodeVector& mins,
- const NodeVector& maxs);
+ void check_concat(const OutputVector& args,
+ const OutputVector& mins,
+ const OutputVector& maxs);
}
}
}
{
namespace builder
{
- shared_ptr<Node> QuantizedConcatBuilder(const NodeVector& args,
+ shared_ptr<Node> QuantizedConcatBuilder(const OutputVector& args,
size_t concatenation_axis,
- const NodeVector& mins,
- const NodeVector& maxs)
+ const OutputVector& mins,
+ const OutputVector& maxs)
{
quantization_utils::check_concat(args, mins, maxs);
- auto quant_type = args[0]->get_element_type();
+ auto quant_type = args[0].get_element_type();
// output scale
auto min = make_shared<op::Min>(make_shared<op::Concat>(mins, 0), ngraph::AxisSet{0});
auto max = make_shared<op::Max>(make_shared<op::Concat>(maxs, 0), ngraph::AxisSet{0});
auto out_scale = quantization_utils::get_scale(min, max, quant_type);
- NodeVector rescaled_args(args.size());
+ OutputVector rescaled_args(args.size());
for (size_t i = 0; i < args.size(); ++i)
{
- auto q_type = args[i]->get_element_type();
+ auto q_type = args[i].get_element_type();
auto in_scale = make_shared<ngraph::op::Reshape>(
quantization_utils::get_scale(mins[i], maxs[i], q_type),
AxisVector{0},
Shape{});
- auto zero = make_constant(q_type, in_scale->get_shape(), 0);
+ auto zero = make_constant(q_type, in_scale->get_output_shape(0), 0);
rescaled_args[i] =
make_shared<op::Dequantize>(args[i], in_scale, zero, element::f32, AxisSet{});
AxisSet{},
op::Quantize::RoundMode::ROUND_NEAREST_TOWARD_EVEN);
}
- OutputVector base = as_output_vector(args);
+ OutputVector base = args;
for (auto node : mins)
{
base.push_back(node);
namespace builder
{
NGRAPH_API
- std::shared_ptr<Node> QuantizedConcatBuilder(const NodeVector& args,
+ std::shared_ptr<Node> QuantizedConcatBuilder(const OutputVector& args,
size_t concatenation_axis,
- const NodeVector& mins,
- const NodeVector& maxs);
+ const OutputVector& mins,
+ const OutputVector& maxs);
}
}
std::make_shared<op::Slice>(output, lower_bounds, upper_bounds)
->add_provenance_group_members_above({output}));
}
-
- /// \brief Return the outputs of the node as vector.
- ///
- /// \param[in] node Node with multiple outputs.
- ///
- /// \return Vector of outputs of input node.
- NodeVector get_outputs(const std::shared_ptr<ngraph::Node>& node)
- {
- const auto outputs_number = node->get_output_size();
- ngraph::NodeVector outputs(outputs_number);
- for (int i = 0; i < outputs_number; ++i)
- {
- if (node->output(i).get_node_shared_ptr()->get_output_size() == 1)
- {
- outputs[i] = node->get_output_as_single_output_node(i);
- }
- else
- {
- outputs[i] = std::make_shared<op::GetOutputElement>(node, i);
- }
- }
- return outputs;
- }
}
-NodeVector builder::split(const Output<ngraph::Node>& value,
- const std::vector<size_t>& length_parts,
- size_t axis)
+OutputVector
+ builder::split(const Output<Node>& value, const std::vector<size_t>& length_parts, size_t axis)
{
size_t start_index{0};
- NodeVector outputs;
+ OutputVector outputs;
for (const auto& length_part : length_parts)
{
size_t end_index{start_index + length_part};
return outputs;
}
-NodeVector builder::split(const Output<Node>& value, size_t split_parts, int axis)
+OutputVector builder::split(const Output<Node>& value, size_t split_parts, int axis)
{
size_t axis_to_split{static_cast<size_t>(axis)};
if (axis < 0)
return split(value, length_parts, axis_to_split);
}
-NodeVector builder::opset1::split(const Output<Node>& value,
- const std::vector<size_t>& split_lengths,
- int64_t axis)
+OutputVector builder::opset1::split(const Output<Node>& value,
+ const std::vector<size_t>& split_lengths,
+ int64_t axis)
{
const auto axis_node = ngraph::opset1::Constant::create(element::u64, Shape{}, {axis});
const auto split_lengths_node =
const auto variadic_split =
std::make_shared<ngraph::opset1::VariadicSplit>(value, axis_node, split_lengths_node);
- return get_outputs(variadic_split);
+ return variadic_split->outputs();
}
-NodeVector builder::opset1::split(const Output<Node>& value, size_t num_splits, int64_t axis)
+OutputVector builder::opset1::split(const Output<Node>& value, size_t num_splits, int64_t axis)
{
const auto axis_node = ngraph::opset1::Constant::create(element::u64, Shape{}, {axis});
const auto split = std::make_shared<ngraph::opset1::Split>(value, axis_node, num_splits);
- return get_outputs(split);
+ return split->outputs();
}
///
/// \return The vector containing multiple nodes we split input node into.
///
- NodeVector split(const Output<Node>& value,
- const std::vector<size_t>& length_parts,
- size_t axis = 0);
+ OutputVector split(const Output<Node>& value,
+ const std::vector<size_t>& length_parts,
+ size_t axis = 0);
/// \brief Split node on specified axis into multiple parts.
///
/// indexing). This means that the axis to split on will be counted from
/// the back of the tensor (negative values are subtracted from its rank).
///
- /// \return The vector containing multiple nodes we split input node into.
+ /// \return The vector containing multiple outputs we split input node into.
///
- NodeVector split(const Output<Node>& value, size_t split_parts, int axis = 0);
+ OutputVector split(const Output<Node>& value, size_t split_parts, int axis = 0);
namespace opset1
{
/// indexing). This means that the axis to split on will be counted from
/// the back of the tensor (negative values are subtracted from its rank).
///
- /// \return The vector containing multiple nodes we split input node into.
+ /// \return The vector containing multiple outputs we split input node into.
/// The vector is output of Split:v1 op
///
NGRAPH_API
- NodeVector split(const Output<Node>& value,
- const std::vector<size_t>& split_lengths,
- int64_t axis = 0);
+ OutputVector split(const Output<Node>& value,
+ const std::vector<size_t>& split_lengths,
+ int64_t axis = 0);
/// \brief Split value on specified axis into multiple parts.
///
/// The vector is output of VariadicSplit:v1 op
///
NGRAPH_API
- NodeVector split(const Output<Node>& value, size_t num_splits, int64_t axis = 0);
+ OutputVector split(const Output<Node>& value, size_t num_splits, int64_t axis = 0);
}
} // namespace builder
} // namespace ngraph
m_nodes.emplace_back(node_proto, *this);
const Node& node{m_nodes.back()};
- NodeVector ng_nodes{node.get_ng_nodes()};
+ OutputVector ng_nodes{node.get_ng_nodes()};
// Iterate over the number of outputs for given node in graph.
// Some of them may be optional and trimmed. See:
// https://github.com/onnx/onnx/blob/master/docs/IR.md#optional-inputs-and-outputs
return m_cache->contains(name);
}
- std::shared_ptr<ngraph::Node> Graph::get_ng_node_from_cache(const std::string& name) const
+ Output<ngraph::Node> Graph::get_ng_node_from_cache(const std::string& name) const
{
return m_cache->get_node(name);
}
- NodeVector Graph::get_ng_outputs() const
+ OutputVector Graph::get_ng_outputs() const
{
- NodeVector results;
+ OutputVector results;
for (const auto& output : m_graph_proto->output())
{
results.emplace_back(get_ng_node_from_cache(output.name()));
return results;
}
- NodeVector Graph::make_ng_nodes(const Node& onnx_node) const
+ OutputVector Graph::make_ng_nodes(const Node& onnx_node) const
{
const auto ng_node_factory =
m_model->get_operator(onnx_node.op_type(), onnx_node.domain());
- NodeVector ng_node_vector;
+ OutputVector ng_node_vector;
try
{
ng_node_vector = ng_node_factory(onnx_node);
}
void Graph::set_friendly_names(const Node& onnx_node,
- const NodeVector& ng_node_vector) const
+ const OutputVector& ng_node_vector) const
{
for (int i = 0; i < ng_node_vector.size(); ++i)
{
break;
}
- ng_node_vector[i]->set_friendly_name(onnx_node.output(i));
+ ng_node_vector[i].get_node()->set_friendly_name(onnx_node.output(i));
}
}
}
void Graph::add_provenance_tags(const Node& onnx_node,
- const NodeVector& ng_node_vector) const
+ const OutputVector& ng_node_vector) const
{
if (!ngraph::get_provenance_enabled())
{
const auto ng_inputs = onnx_node.get_ng_inputs();
ngraph::traverse_nodes(
- ng_node_vector,
+ as_node_vector(ng_node_vector),
[&tag](std::shared_ptr<ngraph::Node> ng_node) { ng_node->add_provenance_tag(tag); },
- ng_inputs);
+ as_node_vector(ng_inputs));
}
Subgraph::Subgraph(const ONNX_NAMESPACE::GraphProto& proto,
const std::vector<Node>& get_nodes() const { return m_nodes; }
const std::vector<ValueInfo>& get_inputs() const { return m_inputs; }
const std::vector<ValueInfo>& get_outputs() const { return m_outputs; }
- NodeVector get_ng_outputs() const;
+ OutputVector get_ng_outputs() const;
const ParameterVector& get_ng_parameters() const { return m_parameters; }
bool is_node_in_cache(const std::string& name) const;
- std::shared_ptr<ngraph::Node> get_ng_node_from_cache(const std::string& name) const;
+ Output<ngraph::Node> get_ng_node_from_cache(const std::string& name) const;
const std::string& get_name() const { return m_graph_proto->name(); }
- NodeVector make_ng_nodes(const Node& onnx_node) const;
+ OutputVector make_ng_nodes(const Node& onnx_node) const;
const GraphCache& get_graph_cache() const;
protected:
Model& model,
std::unique_ptr<GraphCache>&& cache);
- void set_friendly_names(const Node& onnx_node, const NodeVector& ng_node_vector) const;
+ void set_friendly_names(const Node& onnx_node,
+ const OutputVector& ng_node_vector) const;
void add_provenance_tag_to_initializer(
const Tensor& initializer, std::shared_ptr<default_opset::Constant> node) const;
void add_provenance_tag_to_input(const ValueInfo& input,
std::shared_ptr<ngraph::Node> node) const;
- void add_provenance_tags(const Node& onnx_node, const NodeVector& ng_node_vector) const;
+ void add_provenance_tags(const Node& onnx_node,
+ const OutputVector& ng_node_vector) const;
private:
const ONNX_NAMESPACE::GraphProto* m_graph_proto;
{
namespace onnx_import
{
- void GraphCache::emplace_node(const std::string& name, std::shared_ptr<ngraph::Node>&& node)
+ void GraphCache::emplace_node(const std::string& name, Output<ngraph::Node>&& node)
{
m_graph_cache_map[name] = std::move(node);
}
- std::shared_ptr<ngraph::Node> GraphCache::get_node(const std::string& name) const
+ Output<ngraph::Node> GraphCache::get_node(const std::string& name) const
{
try
{
}
}
- std::shared_ptr<ngraph::Node> SubgraphCache::get_node(const std::string& name) const
+ Output<ngraph::Node> SubgraphCache::get_node(const std::string& name) const
{
// present in subgraph scope
if (GraphCache::contains(name))
///
/// \param[in] name The name of node added to the cache.
/// \param[in] node The node added to the cache.
- void emplace_node(const std::string& name, std::shared_ptr<ngraph::Node>&& node);
+ void emplace_node(const std::string& name, Output<ngraph::Node>&& node);
/// \brief Get the node from the cache
///
/// \param[in] name The name of the node.
///
/// \return The node named `name`.
- virtual std::shared_ptr<ngraph::Node> get_node(const std::string& name) const;
+ virtual Output<ngraph::Node> get_node(const std::string& name) const;
/// \brief Return true if the node named `name` exist in the cache.
///
virtual bool contains(const std::string& name) const;
private:
- std::map<std::string, std::shared_ptr<ngraph::Node>> m_graph_cache_map;
+ std::map<std::string, Output<ngraph::Node>> m_graph_cache_map;
};
class SubgraphCache : public GraphCache
/// \param[in] name The name of the node.
///
/// \return The node named `name` from subgraph (as present) or from parent graph.
- std::shared_ptr<ngraph::Node> get_node(const std::string& name) const override;
+ Output<ngraph::Node> get_node(const std::string& name) const override;
/// \brief Return true if the node named `name` exist in the cache.
///
}
const std::vector<Attribute>& attributes() const;
- NodeVector get_ng_nodes(const Node& node) const;
- NodeVector get_ng_inputs() const;
+ OutputVector get_ng_nodes(const Node& node) const;
+ OutputVector get_ng_inputs() const;
const std::string& domain() const;
const std::string& op_type() const;
return it->get_subgraph(graph());
}
- NodeVector Node::Impl::get_ng_nodes(const Node& node) const
+ OutputVector Node::Impl::get_ng_nodes(const Node& node) const
{
return m_graph->make_ng_nodes(node);
}
- NodeVector Node::Impl::get_ng_inputs() const
+ OutputVector Node::Impl::get_ng_inputs() const
{
- NodeVector result;
+ OutputVector result;
for (const auto& name : m_node_proto->input())
{
if (!name.empty())
}
else
{
- result.push_back(std::make_shared<NullNode>());
+ result.push_back(std::make_shared<NullNode>()->output(0));
}
}
return result;
{
}
- NodeVector Node::get_ng_inputs() const { return m_pimpl->get_ng_inputs(); }
- NodeVector Node::get_ng_nodes() const { return m_pimpl->get_ng_nodes(*this); }
+ OutputVector Node::get_ng_inputs() const { return m_pimpl->get_ng_inputs(); }
+ OutputVector Node::get_ng_nodes() const { return m_pimpl->get_ng_nodes(*this); }
const std::string& Node::domain() const { return m_pimpl->domain(); }
const std::string& Node::op_type() const { return m_pimpl->op_type(); }
const std::string& Node::get_description() const { return m_pimpl->description(); }
Node& operator=(Node&&) noexcept = delete;
Node& operator=(const Node&) = delete;
- NodeVector get_ng_inputs() const;
- NodeVector get_ng_nodes() const;
+ OutputVector get_ng_inputs() const;
+ OutputVector get_ng_nodes() const;
const std::string& domain() const;
const std::string& op_type() const;
const std::string& get_name() const;
{
return is_null(node.get());
}
+
+bool ngraph::op::is_null(const Output<ngraph::Node>& output)
+{
+ return is_null(output.get_node());
+}
bool is_null(const ngraph::Node* node);
ONNX_IMPORTER_API
bool is_null(const std::shared_ptr<ngraph::Node>& node);
+ ONNX_IMPORTER_API
+ bool is_null(const Output<ngraph::Node>& output);
}
namespace onnx_import
{
namespace onnx_import
{
/// \brief Function which transforms single ONNX operator to nGraph sub-graph.
- using Operator = std::function<NodeVector(const Node&)>;
+ using Operator = std::function<OutputVector(const Node&)>;
/// \brief Map which contains ONNX operators accessible by std::string value as a key.
using OperatorSet = std::unordered_map<std::string, std::reference_wrapper<const Operator>>;
{
namespace set_1
{
- inline NodeVector abs(const Node& node)
+ inline OutputVector abs(const Node& node)
{
return {std::make_shared<default_opset::Abs>(node.get_ng_inputs().at(0))};
}
{
namespace set_1
{
- inline NodeVector acos(const Node& node)
+ inline OutputVector acos(const Node& node)
{
return {std::make_shared<default_opset::Acos>(node.get_ng_inputs().at(0))};
}
{
namespace set_1
{
- inline NodeVector acosh(const Node& node)
+ inline OutputVector acosh(const Node& node)
{
return {std::make_shared<default_opset::Acosh>(node.get_ng_inputs().at(0))};
}
{
namespace set_1
{
- NodeVector add(const Node& node)
+ OutputVector add(const Node& node)
{
const Output<ngraph::Node> lhs_node = node.get_ng_inputs().at(0);
Output<ngraph::Node> rhs_node = node.get_ng_inputs().at(1);
namespace set_7
{
- NodeVector add(const Node& node)
+ OutputVector add(const Node& node)
{
return {std::make_shared<default_opset::Add>(node.get_ng_inputs().at(0),
node.get_ng_inputs().at(1))};
{
namespace set_1
{
- NodeVector add(const Node& node);
+ OutputVector add(const Node& node);
} // namespace set_1
namespace set_7
{
- NodeVector add(const Node& node);
+ OutputVector add(const Node& node);
} // namespace set_7
{
namespace set_1
{
- inline NodeVector logical_and(const Node& node)
+ inline OutputVector logical_and(const Node& node)
{
return {std::make_shared<default_opset::LogicalAnd>(
node.get_ng_inputs().at(0), node.get_ng_inputs().at(1))};
{
namespace set_1
{
- NodeVector argmax(const Node& node)
+ OutputVector argmax(const Node& node)
{
const utils::ArgMinMaxFactory arg_factory(node);
return {arg_factory.make_arg_max()};
///
/// \return The vector containing an Ngraph node which produces the output
/// of an ONNX ArgMax operation.
- NodeVector argmax(const Node& node);
+ OutputVector argmax(const Node& node);
} // namespace set_1
{
namespace set_1
{
- NodeVector argmin(const Node& node)
+ OutputVector argmin(const Node& node)
{
const utils::ArgMinMaxFactory arg_factory(node);
return {arg_factory.make_arg_min()};
///
/// \return The vector containing an Ngraph node which produces the output
/// of an ONNX ArgMin operation.
- NodeVector argmin(const Node& node);
+ OutputVector argmin(const Node& node);
} // namespace set_1
{
namespace set_1
{
- inline NodeVector asin(const Node& node)
+ inline OutputVector asin(const Node& node)
{
return {std::make_shared<default_opset::Asin>(node.get_ng_inputs().at(0))};
}
{
namespace set_1
{
- inline NodeVector asinh(const Node& node)
+ inline OutputVector asinh(const Node& node)
{
return {std::make_shared<default_opset::Asinh>(node.get_ng_inputs().at(0))};
}
{
namespace set_1
{
- inline NodeVector atan(const Node& node)
+ inline OutputVector atan(const Node& node)
{
return {std::make_shared<default_opset::Atan>(node.get_ng_inputs().at(0))};
}
{
namespace set_1
{
- inline NodeVector atanh(const Node& node)
+ inline OutputVector atanh(const Node& node)
{
return {std::make_shared<default_opset::Atanh>(node.get_ng_inputs().at(0))};
}
{
namespace set_1
{
- NodeVector average_pool(const Node& node)
+ OutputVector average_pool(const Node& node)
{
return pooling::LocalPoolingFactory(node).make_avg_pool();
}
///
/// \return The vector containing Ngraph nodes producing output of ONNX AveragePool
/// operation.
- NodeVector average_pool(const Node& node);
+ OutputVector average_pool(const Node& node);
} // namespace set_1
{
namespace set_1
{
- NodeVector batch_norm(const Node& node)
+ OutputVector batch_norm(const Node& node)
{
- NodeVector inputs{node.get_ng_inputs()};
+ OutputVector inputs{node.get_ng_inputs()};
auto x = inputs.at(0);
auto scale = inputs.at(1);
auto bias = inputs.at(2);
- std::shared_ptr<ngraph::Node> mean{nullptr};
- std::shared_ptr<ngraph::Node> var{nullptr};
+ Output<ngraph::Node> mean;
+ Output<ngraph::Node> var;
std::int64_t is_test{node.get_attribute_value<std::int64_t>("is_test", 1)};
double epsilon{node.get_attribute_value<double>("epsilon", 1e-5)};
{
namespace set_1
{
- NodeVector batch_norm(const Node& node);
+ OutputVector batch_norm(const Node& node);
} // namespace set_1
{
namespace set_1
{
- NodeVector cast(const Node& node)
+ OutputVector cast(const Node& node)
{
auto data = node.get_ng_inputs().at(0);
int64_t target_type = node.get_attribute_value<int64_t>("to");
{
namespace set_1
{
- NodeVector cast(const Node& node);
+ OutputVector cast(const Node& node);
} // namespace set_1
{
namespace set_1
{
- inline NodeVector ceil(const Node& node)
+ inline OutputVector ceil(const Node& node)
{
return {std::make_shared<default_opset::Ceiling>(node.get_ng_inputs().at(0))};
}
{
namespace set_1
{
- NodeVector clip(const Node& node)
+ OutputVector clip(const Node& node)
{
const auto data = node.get_ng_inputs().at(0);
namespace set_11
{
- NodeVector clip(const Node& node)
+ OutputVector clip(const Node& node)
{
- const NodeVector inputs{node.get_ng_inputs()};
- const std::shared_ptr<ngraph::Node> data = inputs.at(0);
- const element::Type data_type = data->get_element_type();
- const Shape data_shape = data->get_shape();
- std::shared_ptr<ngraph::Node> min;
- std::shared_ptr<ngraph::Node> max;
+ const OutputVector inputs{node.get_ng_inputs()};
+ const Output<ngraph::Node> data = inputs.at(0);
+ const element::Type data_type = data.get_element_type();
+ const Shape data_shape = data.get_shape();
+ Output<ngraph::Node> min;
+ Output<ngraph::Node> max;
// If second input is provided, assign to min input, otherwise set lowest
// numeric limit of double as min input.
{
namespace set_1
{
- NodeVector clip(const Node& node);
+ OutputVector clip(const Node& node);
} // namespace set_1
namespace set_11
{
- NodeVector clip(const Node& node);
+ OutputVector clip(const Node& node);
} // namespace set_11
{
namespace set_1
{
- NodeVector concat(const Node& node)
+ OutputVector concat(const Node& node)
{
- NodeVector inputs{node.get_ng_inputs()};
+ OutputVector inputs{node.get_ng_inputs()};
std::int64_t axis = node.get_attribute_value<std::int64_t>("axis");
return {std::make_shared<default_opset::Concat>(inputs, axis)};
}
{
namespace set_1
{
- NodeVector concat(const Node& node);
+ OutputVector concat(const Node& node);
} // namespace set_1
}
}
- NodeVector constant(const onnx_import::Node& node)
+ OutputVector constant(const onnx_import::Node& node)
{
return {make_constant(node.get_attribute_value<Tensor>("value"))};
}
{
namespace set_1
{
- NodeVector constant(const Node& node);
+ OutputVector constant(const Node& node);
} // namespace set_1
{
namespace set_1
{
- NodeVector constant_of_shape(const onnx_import::Node& node)
+ OutputVector constant_of_shape(const onnx_import::Node& node)
{
- std::shared_ptr<ngraph::Node> constant_value;
+ Output<ngraph::Node> constant_value;
if (node.has_attribute("value"))
{
auto value_tensor = node.get_attribute_value<Tensor>("value");
{
namespace set_1
{
- NodeVector constant_of_shape(const Node& node);
+ OutputVector constant_of_shape(const Node& node);
} // namespace set_1
namespace
{
std::shared_ptr<ngraph::op::Op>
- make_ng_convolution(const std::shared_ptr<ngraph::Node>& data,
- const std::shared_ptr<ngraph::Node>& filters,
+ make_ng_convolution(const Output<ngraph::Node>& data,
+ const Output<ngraph::Node>& filters,
const ngraph::Strides& strides,
const ngraph::Strides& dilations,
const ngraph::CoordinateDiff& padding_below,
{
if (groups > 1)
{
- auto filters_shape = filters->get_shape();
+ auto filters_shape = filters.get_shape();
filters_shape.at(0) = filters_shape.at(0) / groups;
filters_shape.insert(filters_shape.begin(), groups);
}
}
- std::shared_ptr<ngraph::Node>
- add_bias(const std::shared_ptr<ngraph::Node>& ng_conv,
- const std::shared_ptr<ngraph::Node>& bias)
+ std::shared_ptr<ngraph::Node> add_bias(const Output<ngraph::Node>& ng_conv,
+ const Output<ngraph::Node>& bias)
{
- const auto rank_of_conv =
- ng_conv->get_output_partial_shape(0).rank().get_length();
+ const auto rank_of_conv = ng_conv.get_partial_shape().rank().get_length();
// reshape the bias node {M} to {1, M, 1, 1, ..., 1}
// this is required by the addition operation that needs to be able
// to broadcast the bias to match the shape of the convolution node
std::vector<size_t> reshape_pattern_values(rank_of_conv, 1U);
- reshape_pattern_values[1] = bias->get_shape().front();
+ reshape_pattern_values[1] = bias.get_shape().front();
const auto reshape_pattern =
default_opset::Constant::create(element::u64,
Shape{reshape_pattern_values.size()},
}
} // namespace
- NodeVector conv(const Node& node)
+ OutputVector conv(const Node& node)
{
// in the current implementation we assume that the data input rank is static
// and only the 'batch' dimension can be dynamic
- const NodeVector& inputs = node.get_ng_inputs();
+ const OutputVector& inputs = node.get_ng_inputs();
const auto data = inputs.at(0);
const auto filters = inputs.at(1);
const auto groups = node.get_attribute_value<int64_t>("group", 1);
- NGRAPH_CHECK(data->get_output_partial_shape(0).rank().is_static(),
+ NGRAPH_CHECK(data.get_partial_shape().rank().is_static(),
"The input data tensor's rank has to be known (static)");
const auto strides = convpool::get_strides(node);
else
{
const auto bias = inputs.at(2);
- const auto bias_ps = bias->get_output_partial_shape(0);
+ const auto bias_ps = bias.get_partial_shape();
NGRAPH_CHECK(bias_ps.is_static() && is_vector(bias_ps.to_shape()),
"The bias input needs to be a static 1D vector");
///
/// \return The vector containing Ngraph nodes producing output of ONNX convolution
/// operation.
- NodeVector conv(const Node& node);
+ OutputVector conv(const Node& node);
} // namespace set_1
// limitations under the License.
//*****************************************************************************
+// Disabled in CMakeList
+// Update to higher opset required
+
#include "conv_integer.hpp"
#include "exceptions.hpp"
#include "ngraph/builder/make_constant.hpp"
{
namespace set_1
{
- NodeVector conv_integer(const Node& node)
+ OutputVector conv_integer(const Node& node)
{
- const NodeVector& inputs = node.get_ng_inputs();
+ const OutputVector& inputs = node.get_ng_inputs();
auto num_inputs = inputs.size();
auto input = inputs.at(0);
auto filters = inputs.at(1);
ngraph::op::PadType auto_pad_type = convpool::get_auto_pad(node);
auto& padding_below = paddings.first;
auto& padding_above = paddings.second;
- convpool::calculate_auto_pads(input->get_shape(),
- filters->get_shape(),
+ convpool::calculate_auto_pads(input.get_shape(),
+ filters.get_shape(),
window_movement_strides,
window_dilation_strides,
auto_pad_type,
padding_below,
padding_above);
- const Strides default_data_dilation_strides(input->get_shape().size() - 2, 1);
+ const Strides default_data_dilation_strides(input.get_shape().size() - 2, 1);
auto scale_one = make_constant(ngraph::element::f32, Shape{}, 1);
- auto input_zero_point = make_constant(input->get_element_type(), Shape{}, 0);
- auto filters_zero_point =
- make_constant(filters->get_element_type(), Shape{}, 0);
+ auto input_zero_point = make_constant(input.get_element_type(), Shape{}, 0);
+ auto filters_zero_point = make_constant(filters.get_element_type(), Shape{}, 0);
auto output_zero_point = make_constant(ngraph::element::i32, Shape{}, 0);
if (num_inputs == 2)
// limitations under the License.
//*****************************************************************************
+// Disabled in CMakeList
+// Update to higher opset required
+
#pragma once
#include "core/node.hpp"
///
/// \return The vector containing Ngraph nodes producing output of quantized ONNX
/// convolution operation.
- NodeVector conv_integer(const Node& node);
+ OutputVector conv_integer(const Node& node);
} // namespace set_1
{
namespace
{
- std::shared_ptr<ngraph::Node>
- make_group_conv_backprop(const std::shared_ptr<ngraph::Node>& data,
- const std::shared_ptr<ngraph::Node>& filters,
+ Output<ngraph::Node>
+ make_group_conv_backprop(const Output<ngraph::Node>& data,
+ const Output<ngraph::Node>& filters,
const Strides& strides,
const Strides& dilations,
const CoordinateDiff& pads_begin,
}
}
- std::shared_ptr<ngraph::Node>
- make_conv_backprop(const std::shared_ptr<ngraph::Node>& data,
- const std::shared_ptr<ngraph::Node>& filters,
+ Output<ngraph::Node>
+ make_conv_backprop(const Output<ngraph::Node>& data,
+ const Output<ngraph::Node>& filters,
const Strides& strides,
const Strides& dilations,
const CoordinateDiff& pads_begin,
}
}
- std::shared_ptr<ngraph::Node>
- get_reshaped_filters(const std::shared_ptr<ngraph::Node>& filters,
- const PartialShape& filters_pshape,
- int64_t groups)
+ Output<ngraph::Node> get_reshaped_filters(const Output<ngraph::Node>& filters,
+ const PartialShape& filters_pshape,
+ int64_t groups)
{
if (filters_pshape.is_static())
{
}
}
- std::shared_ptr<ngraph::Node>
- get_prepared_bias(const std::shared_ptr<ngraph::Node>& bias,
- const std::shared_ptr<ngraph::Node>& conv)
+ Output<ngraph::Node> get_prepared_bias(const Output<ngraph::Node>& bias,
+ const Output<ngraph::Node>& conv)
{
// Prepare bias shape [1, C, 1, 1]
- const auto& conv_pshape = conv->get_output_partial_shape(0);
+ const auto& conv_pshape = conv.get_partial_shape();
std::shared_ptr<ngraph::Node> bias_shape_node;
if (conv_pshape.rank().is_static() && conv_pshape[1].is_static())
}
}
- NodeVector conv_transpose(const Node& node)
+ OutputVector conv_transpose(const Node& node)
{
- const NodeVector& inputs = node.get_ng_inputs();
+ const OutputVector& inputs = node.get_ng_inputs();
CHECK_VALID_NODE(node,
inputs.size() == 2 || inputs.size() == 3,
auto data = inputs[0];
auto filters = inputs[1];
- const auto& data_pshape = data->get_output_partial_shape(0);
- const auto& filters_pshape = filters->get_output_partial_shape(0);
+ const auto& data_pshape = data.get_partial_shape();
+ const auto& filters_pshape = filters.get_partial_shape();
std::size_t num_spatial_dims = 0;
Strides strides, dilations;
CHECK_VALID_NODE(
node, groups >= 0, "Incorrect value of 'group' attribute: ", groups);
- std::shared_ptr<ngraph::Node> conv_node;
+ Output<ngraph::Node> conv_node;
// reshape filters to match desired shape:
// [GROUPS, C_INPUT, C_OUTPUT, K_D, ..., K_1]
///
/// \return The vector containing Ngraph nodes producing output of ONNX convolution
/// operation.
- NodeVector conv_transpose(const Node& node);
+ OutputVector conv_transpose(const Node& node);
} // namespace set_1
{
namespace set_1
{
- NodeVector cos(const Node& node)
+ OutputVector cos(const Node& node)
{
return {std::make_shared<default_opset::Cos>(node.get_ng_inputs().at(0))};
}
{
namespace set_1
{
- NodeVector cos(const Node& node);
+ OutputVector cos(const Node& node);
}
}
{
namespace set_1
{
- NodeVector cosh(const Node& node)
+ OutputVector cosh(const Node& node)
{
return {std::make_shared<default_opset::Cosh>(node.get_ng_inputs().at(0))};
}
{
namespace set_1
{
- NodeVector cosh(const Node& node);
+ OutputVector cosh(const Node& node);
}
}
} // namespace onnx_import
{
namespace set_1
{
- NodeVector cum_sum(const Node& node)
+ OutputVector cum_sum(const Node& node)
{
auto inputs = node.get_ng_inputs();
auto data = inputs.at(0);
bool exclusive = node.get_attribute_value<std::int64_t>("exclusive", 0);
bool reverse = node.get_attribute_value<std::int64_t>("reverse", 0);
- std::shared_ptr<ngraph::Node> axis;
+ Output<ngraph::Node> axis;
if (inputs.size() > 1)
{
axis =
default_opset::Constant::create(element::i64, Shape{}, {0}); // default
}
- return NodeVector{
+ return OutputVector{
std::make_shared<default_opset::CumSum>(data, axis, exclusive, reverse)};
}
{
namespace set_1
{
- NodeVector cum_sum(const Node& node);
+ OutputVector cum_sum(const Node& node);
} // namespace set_1
{
namespace set_1
{
- NodeVector depth_to_space(const Node& node)
+ OutputVector depth_to_space(const Node& node)
{
auto data = node.get_ng_inputs().at(0);
const auto mode = node.get_attribute_value<std::string>("mode", "DCR");
? default_opset::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST
: default_opset::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST;
const auto block_size = node.get_attribute_value<std::int64_t>("blocksize");
- return NodeVector{std::make_shared<default_opset::DepthToSpace>(
+ return OutputVector{std::make_shared<default_opset::DepthToSpace>(
data, ngraph_mode, block_size)};
}
} // namespace set_1
///
/// \param[in] node The ONNX input node describing operation.
///
- /// \return NodeVector containing Tensor with shape:
+ /// \return OutputVector containing Tensor with shape:
/// [N, C/(blocksize * blocksize), H * blocksize, W * blocksize]
- NodeVector depth_to_space(const Node& node);
+ OutputVector depth_to_space(const Node& node);
} // namespace set_1
} // namespace op
{
namespace
{
- std::shared_ptr<ngraph::Node> get_zero_point(const NodeVector& inputs)
+ Output<ngraph::Node> get_zero_point(const OutputVector& inputs)
{
if (inputs.size() == 3 && !ngraph::op::is_null(inputs[2]))
{
auto zero_point = inputs[2];
- if (zero_point->get_element_type() != element::f32)
+ if (zero_point.get_element_type() != element::f32)
{
zero_point =
std::make_shared<default_opset::Convert>(zero_point, element::f32);
}
namespace set_1
{
- NodeVector dequantize_linear(const Node& node)
+ OutputVector dequantize_linear(const Node& node)
{
- const NodeVector inputs{node.get_ng_inputs()};
+ const OutputVector inputs{node.get_ng_inputs()};
NGRAPH_CHECK(
2 <= inputs.size() && inputs.size() <= 3,
const auto scale = inputs[1];
const auto zero_point = get_zero_point(inputs);
- common::validate_scalar_input("Dequantization scale", scale, {element::f32});
- common::validate_scalar_input("Zero point", zero_point);
+ common::validate_scalar_input(
+ "Dequantization scale", scale.get_node_shared_ptr(), {element::f32});
+ common::validate_scalar_input("Zero point", zero_point.get_node_shared_ptr());
const auto converted_x =
std::make_shared<default_opset::Convert>(x, element::f32);
{
namespace
{
- void validate_scale(const std::shared_ptr<ngraph::Node> scale,
- const std::shared_ptr<ngraph::Node> x,
+ void validate_scale(const Output<ngraph::Node> scale,
+ const Output<ngraph::Node> x,
const int64_t axis)
{
- const auto& scale_shape = scale->get_output_partial_shape(0);
+ const auto& scale_shape = scale.get_partial_shape();
NGRAPH_CHECK(scale_shape.rank().get_length() == 0 ||
scale_shape.rank().get_length() == 1,
"Dequantization scale needs to be a scalar or a vector.");
if (scale_shape.rank().get_length() == 1)
{
const auto& scale_dim = scale_shape[0];
- const auto& x_shape = x->get_output_partial_shape(0);
+ const auto& x_shape = x.get_partial_shape();
const auto& x_dim_at_axis = x_shape[axis];
NGRAPH_CHECK(scale_dim.same_scheme(x_dim_at_axis),
}
}
- void validate_zero_point(const std::shared_ptr<ngraph::Node> zero_point,
- const std::shared_ptr<ngraph::Node> x,
+ void validate_zero_point(const Output<ngraph::Node> zero_point,
+ const Output<ngraph::Node> x,
const int64_t axis)
{
- const auto& zero_point_shape = zero_point->get_output_partial_shape(0);
+ const auto& zero_point_shape = zero_point.get_partial_shape();
NGRAPH_CHECK(zero_point_shape.rank().get_length() == 0 ||
zero_point_shape.rank().get_length() == 1,
"Zero point needs to be a scalar or a vector.");
if (zero_point_shape.rank().get_length() == 1)
{
const auto& zero_point_dim = zero_point_shape[0];
- const auto& x_shape = x->get_output_partial_shape(0);
+ const auto& x_shape = x.get_partial_shape();
const auto& x_dim_at_axis = x_shape[axis];
NGRAPH_CHECK(zero_point_dim.same_scheme(x_dim_at_axis),
}
}
- std::shared_ptr<ngraph::Node>
- reshape_input(const std::shared_ptr<ngraph::Node> input,
- const int64_t axis,
- const PartialShape& x_shape)
+ std::shared_ptr<ngraph::Node> reshape_input(const Output<ngraph::Node> input,
+ const int64_t axis,
+ const PartialShape& x_shape)
{
std::vector<int64_t> target_dims;
}
}
- NodeVector dequantize_linear(const Node& node)
+ OutputVector dequantize_linear(const Node& node)
{
- const NodeVector inputs{node.get_ng_inputs()};
+ const OutputVector inputs{node.get_ng_inputs()};
NGRAPH_CHECK(2 <= inputs.size() && inputs.size() <= 3,
"The DequantizeLinear op expects 2 required and one optional "
auto scale = inputs[1];
auto zero_point = get_zero_point(inputs);
- const auto x_shape = x->get_output_partial_shape(0);
+ const auto x_shape = x.get_partial_shape();
NGRAPH_CHECK(x_shape.rank().is_static(),
"Rank of the input data tensor has to be known (static).");
{
namespace set_1
{
- NodeVector dequantize_linear(const Node& node);
+ OutputVector dequantize_linear(const Node& node);
} // namespace set_1
namespace set_13
{
- NodeVector dequantize_linear(const Node& node);
+ OutputVector dequantize_linear(const Node& node);
}
} // namespace op
{
namespace set_1
{
- inline NodeVector div(const Node& node)
+ inline OutputVector div(const Node& node)
{
const Output<ngraph::Node> lhs_node = node.get_ng_inputs().at(0);
Output<ngraph::Node> rhs_node = node.get_ng_inputs().at(1);
namespace set_7
{
- inline NodeVector div(const Node& node)
+ inline OutputVector div(const Node& node)
{
return {std::make_shared<default_opset::Divide>(node.get_ng_inputs().at(0),
node.get_ng_inputs().at(1))};
{
namespace set_1
{
- inline NodeVector dropout(const Node& node)
+ inline OutputVector dropout(const Node& node)
{
// First value is actual output of Dropout,
// the second one is just a placeholder for optional trailing output.
- return {node.get_ng_inputs().at(0), std::make_shared<NullNode>()};
+ return {node.get_ng_inputs().at(0).get_node_shared_ptr(),
+ std::make_shared<NullNode>()};
}
} // namespace set_1
{
namespace set_1
{
- NodeVector elu(const Node& node)
+ OutputVector elu(const Node& node)
{
auto data = node.get_ng_inputs().at(0);
double alpha = node.get_attribute_value<double>("alpha", 1);
- return NodeVector{std::make_shared<default_opset::Elu>(data, alpha)};
+ return OutputVector{std::make_shared<default_opset::Elu>(data, alpha)};
}
} // namespace set_1
{
namespace set_1
{
- NodeVector elu(const Node& node);
+ OutputVector elu(const Node& node);
} // namespace set_1
{
namespace set_1
{
- inline NodeVector equal(const Node& node)
+ inline OutputVector equal(const Node& node)
{
return {std::make_shared<default_opset::Equal>(node.get_ng_inputs().at(0),
node.get_ng_inputs().at(1))};
{
namespace set_1
{
- inline NodeVector erf(const Node& node)
+ inline OutputVector erf(const Node& node)
{
return {std::make_shared<default_opset::Erf>(node.get_ng_inputs().at(0))};
}
{
namespace set_1
{
- inline NodeVector exp(const Node& node)
+ inline OutputVector exp(const Node& node)
{
return {std::make_shared<default_opset::Exp>(node.get_ng_inputs().at(0))};
}
{
namespace set_1
{
- NodeVector expand(const Node& node)
+ OutputVector expand(const Node& node)
{
- const std::shared_ptr<ngraph::Node> data{node.get_ng_inputs().at(0)};
- const std::shared_ptr<ngraph::Node> shape{node.get_ng_inputs().at(1)};
+ const Output<ngraph::Node> data{node.get_ng_inputs().at(0)};
+ const Output<ngraph::Node> shape{node.get_ng_inputs().at(1)};
return {std::make_shared<default_opset::Broadcast>(
data, shape, ngraph::op::BroadcastType::BIDIRECTIONAL)};
// Expand operator has been available since version 8 of the default ONNX operator set.
// Currently, Expand is assigned to version 1 due to temporary reason.
{
- NodeVector expand(const Node& node);
+ OutputVector expand(const Node& node);
} // namespace set_1
{
namespace set_1
{
- NodeVector eye_like(const Node& node)
+ OutputVector eye_like(const Node& node)
{
const auto input = node.get_ng_inputs().at(0);
- const auto& input_shape = input->get_shape();
+ const auto& input_shape = input.get_shape();
std::int64_t dtype;
element::Type target_type;
}
else
{
- target_type = input->get_element_type();
+ target_type = input.get_element_type();
}
CHECK_VALID_NODE(node,
{
namespace set_1
{
- NodeVector eye_like(const Node& node);
+ OutputVector eye_like(const Node& node);
} // namespace set_1
} // namespace op
{
namespace set_1
{
- NodeVector fake_quantize(const onnx_import::Node& node)
+ OutputVector fake_quantize(const onnx_import::Node& node)
{
const auto inputs = node.get_ng_inputs();
const auto X = inputs.at(0);
{
namespace set_1
{
- NodeVector fake_quantize(const Node& node);
+ OutputVector fake_quantize(const Node& node);
} // namespace set_1
{
namespace set_1
{
- NodeVector flatten(const Node& node)
+ OutputVector flatten(const Node& node)
{
- NodeVector inputs{node.get_ng_inputs()};
+ OutputVector inputs{node.get_ng_inputs()};
auto data = inputs.at(0);
auto axis = node.get_attribute_value<std::int64_t>("axis", 1);
- const auto data_rank = data->get_output_partial_shape(0).rank();
+ const auto data_rank = data.get_partial_shape().rank();
if (data_rank.is_static())
{
{
namespace set_1
{
- NodeVector flatten(const Node& node);
+ OutputVector flatten(const Node& node);
} // namespace set_1
{
namespace set_1
{
- inline NodeVector floor(const Node& node)
+ inline OutputVector floor(const Node& node)
{
return {std::make_shared<default_opset::Floor>(node.get_ng_inputs().at(0))};
}
{
namespace set_1
{
- inline NodeVector gather(const Node& node)
+ inline OutputVector gather(const Node& node)
{
- NodeVector ng_inputs{node.get_ng_inputs()};
+ OutputVector ng_inputs{node.get_ng_inputs()};
auto data = ng_inputs.at(0);
auto indices = ng_inputs.at(1);
auto axis = node.get_attribute_value<int64_t>("axis", 0);
const auto valid_axis = ngraph::normalize_axis(
- node.get_description(), axis, data->get_output_partial_shape(0).rank());
+ node.get_description(), axis, data.get_partial_shape().rank());
return {std::make_shared<default_opset::Gather>(
data,
// limitations under the License.
//*****************************************************************************
+// Disabled in CMakeList
+// Update to higher opset required
+
#include "ngraph/opsets/opset0.hpp"
#include "utils/common.hpp"
{
namespace set_1
{
- NodeVector gather_nd(const Node& node)
+ OutputVector gather_nd(const Node& node)
{
- NodeVector ng_inputs{node.get_ng_inputs()};
+ OutputVector ng_inputs{node.get_ng_inputs()};
auto data = ng_inputs.at(0);
auto indices = ng_inputs.at(1);
// limitations under the License.
//*****************************************************************************
+// Disabled in CMakeList
+// Update to higher opset required
+
#pragma once
#include "core/node.hpp"
{
namespace set_1
{
- NodeVector gather_nd(const Node& node);
+ OutputVector gather_nd(const Node& node);
} // namespace set_1
{
namespace set_1
{
- NodeVector gemm(const Node& node)
+ OutputVector gemm(const Node& node)
{
- NodeVector inputs{node.get_ng_inputs()};
- std::shared_ptr<ngraph::Node> input_a = inputs.at(0);
- std::shared_ptr<ngraph::Node> input_b = inputs.at(1);
- std::shared_ptr<ngraph::Node> input_c;
+ OutputVector inputs{node.get_ng_inputs()};
+ Output<ngraph::Node> input_a = inputs.at(0);
+ Output<ngraph::Node> input_b = inputs.at(1);
+ Output<ngraph::Node> input_c;
if (inputs.size() == 3)
{
else
{
input_c = default_opset::Constant::create(
- input_b->get_element_type(), ngraph::Shape{}, {0});
+ input_b.get_element_type(), ngraph::Shape{}, {0});
}
const auto alpha = node.get_attribute_value<float>("alpha", 1);
const auto beta = node.get_attribute_value<float>("beta", 1);
const auto alpha_node = default_opset::Constant::create(
- input_b->get_element_type(), Shape{}, {alpha});
+ input_b.get_element_type(), Shape{}, {alpha});
const auto beta_node = default_opset::Constant::create(
- input_c->get_element_type(), Shape{}, {beta});
+ input_c.get_element_type(), Shape{}, {beta});
const bool trans_a = node.get_attribute_value<int64_t>("transA", 0);
const bool trans_b = node.get_attribute_value<int64_t>("transB", 0);
auto beta_times_input_c =
std::make_shared<default_opset::Multiply>(beta_node, input_c);
- return NodeVector{
+ return OutputVector{
std::make_shared<default_opset::Add>(matmul_node, beta_times_input_c)};
}
namespace set_6
{
- NodeVector gemm(const Node& node)
+ OutputVector gemm(const Node& node)
{
- NodeVector inputs{node.get_ng_inputs()};
- std::shared_ptr<ngraph::Node> input_a = inputs.at(0);
- std::shared_ptr<ngraph::Node> input_b = inputs.at(1);
- std::shared_ptr<ngraph::Node> input_c;
+ OutputVector inputs{node.get_ng_inputs()};
+ Output<ngraph::Node> input_a = inputs.at(0);
+ Output<ngraph::Node> input_b = inputs.at(1);
+ Output<ngraph::Node> input_c;
if (inputs.size() == 3)
{
else
{
input_c = default_opset::Constant::create(
- input_b->get_element_type(), ngraph::Shape{}, {0});
+ input_b.get_element_type(), ngraph::Shape{}, {0});
}
const auto alpha = node.get_attribute_value<float>("alpha", 1);
const auto beta = node.get_attribute_value<float>("beta", 1);
const auto alpha_node = default_opset::Constant::create(
- input_b->get_element_type(), Shape{}, {alpha});
+ input_b.get_element_type(), Shape{}, {alpha});
const auto beta_node = default_opset::Constant::create(
- input_c->get_element_type(), Shape{}, {beta});
+ input_c.get_element_type(), Shape{}, {beta});
const bool trans_a = node.get_attribute_value<int64_t>("transA", 0);
const bool trans_b = node.get_attribute_value<int64_t>("transB", 0);
auto beta_times_input_c =
std::make_shared<default_opset::Multiply>(beta_node, input_c);
- return NodeVector{
+ return OutputVector{
std::make_shared<default_opset::Add>(matmul_node, beta_times_input_c)};
}
{
namespace set_1
{
- NodeVector gemm(const Node& node);
+ OutputVector gemm(const Node& node);
} // namespace set_1
namespace set_6
{
- NodeVector gemm(const Node& node);
+ OutputVector gemm(const Node& node);
} // namespace set_6
{
namespace set_1
{
- NodeVector global_average_pool(const Node& node)
+ OutputVector global_average_pool(const Node& node)
{
return pooling::GlobalPoolingFactory(node).make_avg_pool();
}
///
/// \return The vector containing Ngraph nodes producing output of ONNX
/// GlobalAveragePool operation.
- NodeVector global_average_pool(const Node& node);
+ OutputVector global_average_pool(const Node& node);
} // namespace set_1
{
namespace set_1
{
- NodeVector global_max_pool(const Node& node)
+ OutputVector global_max_pool(const Node& node)
{
return pooling::GlobalPoolingFactory(node).make_max_pool();
}
///
/// \return The vector containing Ngraph nodes producing output of ONNX
/// GlobalMaxPool operation.
- NodeVector global_max_pool(const Node& node);
+ OutputVector global_max_pool(const Node& node);
} // namespace set_1
{
namespace set_1
{
- inline NodeVector greater(const Node& node)
+ inline OutputVector greater(const Node& node)
{
return {std::make_shared<default_opset::Greater>(node.get_ng_inputs().at(0),
node.get_ng_inputs().at(1))};
if (linear_before_reset)
{
const auto& ng_inputs = node.get_ng_inputs();
- const auto el_type = ng_inputs.at(0)->get_output_element_type(0);
+ const auto el_type = ng_inputs.at(0).get_element_type();
if (ng_inputs.size() > 3 && !ngraph::op::is_null(ng_inputs.at(3)))
{
// ]
m_map[recurrent::OpInput::B] =
std::make_shared<default_opset::Concat>(
- NodeVector{wr_z_bias,
- wr_r_bias,
- split_bias.at(2),
- split_bias.at(5)},
+ OutputVector{wr_z_bias,
+ wr_r_bias,
+ split_bias.at(2),
+ split_bias.at(5)},
1);
}
else
{
const std::size_t hidden_size =
- m_map[recurrent::OpInput::R]->get_shape().back();
+ m_map[recurrent::OpInput::R].get_shape().back();
const std::size_t num_directions =
- m_map[recurrent::OpInput::W]->get_shape().front();
+ m_map[recurrent::OpInput::W].get_shape().front();
m_map[recurrent::OpInput::B] =
std::make_shared<default_opset::Constant>(
};
}
- NodeVector gru(const Node& node)
+ OutputVector gru(const Node& node)
{
constexpr std::size_t gates_count = 3;
GRUInputMap input_map{node, gates_count};
{
namespace set_1
{
- NodeVector gru(const Node& node);
+ OutputVector gru(const Node& node);
} // namespace set_1
{
namespace set_1
{
- NodeVector hard_sigmoid(const Node& node)
+ OutputVector hard_sigmoid(const Node& node)
{
const auto data = node.get_ng_inputs().at(0);
const auto alpha = default_opset::Constant::create<double>(
- data->get_element_type(),
+ data.get_element_type(),
Shape{},
std::vector<double>{node.get_attribute_value<double>("alpha", 0.2)});
const auto beta = default_opset::Constant::create<double>(
- data->get_element_type(),
+ data.get_element_type(),
Shape{},
std::vector<double>{node.get_attribute_value<double>("beta", 0.5)});
{
namespace set_1
{
- NodeVector hard_sigmoid(const Node& node);
+ OutputVector hard_sigmoid(const Node& node);
} // namespace set_1
{
namespace set_1
{
- NodeVector hardmax(const Node& node)
+ OutputVector hardmax(const Node& node)
{
const auto input = node.get_ng_inputs().at(0);
- const auto& input_shape = input->get_output_partial_shape(0);
+ const auto& input_shape = input.get_partial_shape();
auto axis = node.get_attribute_value<std::int64_t>("axis", 1);
if (input_shape.rank().is_static())
const auto coerced_tensor_shape =
std::make_shared<default_opset::ShapeOf>(coerced_tensor);
- std::shared_ptr<ngraph::Node> row_size =
- std::make_shared<default_opset::Gather>(
- coerced_tensor_shape,
- default_opset::Constant::create(element::i64, {1}, {1}),
- default_opset::Constant::create(element::i64, {}, {0}));
+ Output<ngraph::Node> row_size = std::make_shared<default_opset::Gather>(
+ coerced_tensor_shape,
+ default_opset::Constant::create(element::i64, {1}, {1}),
+ default_opset::Constant::create(element::i64, {}, {0}));
row_size = ngraph::onnx_import::reshape::interpret_as_scalar(row_size);
const auto indices_axis = 1;
const auto results = std::make_shared<default_opset::OneHot>(
topk->output(1), row_size, on_value, off_value, indices_axis);
- const auto converted_results = std::make_shared<default_opset::Convert>(
- results, input->get_element_type());
+ const auto converted_results =
+ std::make_shared<default_opset::Convert>(results, input.get_element_type());
if (input_shape.is_static())
{
{
namespace set_1
{
- NodeVector hardmax(const Node& node);
+ OutputVector hardmax(const Node& node);
} // namespace set_1
} // namespace op
{
namespace set_1
{
- inline NodeVector identity(const Node& node)
+ inline OutputVector identity(const Node& node)
{
auto input = node.get_ng_inputs().at(0);
- if (input->get_element_type() == ngraph::element::boolean)
+ if (input.get_element_type() == ngraph::element::boolean)
{
const auto logic_zero =
default_opset::Constant::create(ngraph::element::boolean, {}, {false});
return {std::make_shared<default_opset::LogicalOr>(input, logic_zero)};
}
const auto zero =
- default_opset::Constant::create(input->get_element_type(), {}, {0});
+ default_opset::Constant::create(input.get_element_type(), {}, {0});
return {std::make_shared<default_opset::Add>(input, zero)};
}
} // namespace set_1
{
namespace set_1
{
- NodeVector image_scaler(const Node& node)
+ OutputVector image_scaler(const Node& node)
{
const auto inputs = node.get_ng_inputs();
NGRAPH_CHECK(
inputs.size() == 1, "ImageScaler 1 input tensor. Got: ", inputs.size());
const auto data = inputs[0];
- const auto& data_shape = data->get_output_partial_shape(0);
+ const auto& data_shape = data.get_partial_shape();
NGRAPH_CHECK(data_shape.rank().same_scheme({4}),
"ImageScaler expects a 4D tensor with NCHW format. Got: ",
data_shape);
{
namespace set_1
{
- NodeVector image_scaler(const Node& node);
+ OutputVector image_scaler(const Node& node);
}
}
}
{
namespace set_1
{
- NodeVector instance_norm(const Node& node)
+ OutputVector instance_norm(const Node& node)
{
Output<ngraph::Node> data(node.get_ng_inputs().at(0));
Output<ngraph::Node> scale(node.get_ng_inputs().at(1));
///
/// \return Vector of nodes containting resulting nGraph nodes.
///
- NodeVector instance_norm(const Node& node);
+ OutputVector instance_norm(const Node& node);
} // namespace set_1
} // namespace op
{
namespace set_1
{
- NodeVector leaky_relu(const Node& node)
+ OutputVector leaky_relu(const Node& node)
{
auto data = node.get_ng_inputs().at(0);
double alpha = node.get_attribute_value<double>("alpha", 0.01);
node, alpha >= 0 && alpha <= 1, " alpha value should be in range (0,1)");
std::shared_ptr<ngraph::Node> alpha_node =
- default_opset::Constant::create(data->get_element_type(), Shape{}, {alpha});
+ default_opset::Constant::create(data.get_element_type(), Shape{}, {alpha});
return {std::make_shared<default_opset::PRelu>(data, alpha_node)};
}
{
namespace set_1
{
- NodeVector leaky_relu(const Node& node);
+ OutputVector leaky_relu(const Node& node);
} // namespace set_1
{
namespace set_1
{
- inline NodeVector less(const Node& node)
+ inline OutputVector less(const Node& node)
{
return {std::make_shared<default_opset::Less>(node.get_ng_inputs().at(0),
node.get_ng_inputs().at(1))};
{
namespace set_1
{
- NodeVector log(const Node& node)
+ OutputVector log(const Node& node)
{
return {std::make_shared<default_opset::Log>(node.get_ng_inputs().at(0))};
}
{
namespace set_1
{
- NodeVector log(const Node& node);
+ OutputVector log(const Node& node);
}
}
{
namespace set_1
{
- NodeVector log_softmax(const Node& node)
+ OutputVector log_softmax(const Node& node)
{
- NodeVector inputs{node.get_ng_inputs()};
+ OutputVector inputs{node.get_ng_inputs()};
const auto data = inputs.at(0);
- const auto data_rank = data->get_output_partial_shape(0).rank();
+ const auto data_rank = data.get_partial_shape().rank();
const auto axis = node.get_attribute_value<int64_t>("axis", 1);
const auto normalized_axis =
{
namespace set_1
{
- NodeVector log_softmax(const Node& node);
+ OutputVector log_softmax(const Node& node);
} // namespace set_1
///
/// \return true if termination condition is true and it cannot be changed
/// during Loop iterations, false otherwise.
- bool is_termination_condition_always_true(
- const std::shared_ptr<ngraph::Node>& loop_cond,
- const std::shared_ptr<ngraph::Node>& body_cond)
+ bool is_termination_condition_always_true(const Output<ngraph::Node>& loop_cond,
+ const Output<ngraph::Node>& body_cond)
{
bool loop_cond_value = false;
- if (ngraph::op::is_constant(loop_cond) &&
- loop_cond->get_element_type() == element::boolean)
+ if (ngraph::op::is_constant(loop_cond.get_node()) &&
+ loop_cond.get_element_type() == element::boolean)
{
- loop_cond_value = as_type_ptr<default_opset::Constant>(loop_cond)
+ loop_cond_value = as_type_ptr<default_opset::Constant>(
+ loop_cond.get_node_shared_ptr())
->cast_vector<bool>()
.at(0);
}
// value of loop_cond - true
// Identity op for boolean value is represented by LogicalOr op whose second
// input is always false
- if (is_type<default_opset::LogicalOr>(body_cond))
+ if (is_type<default_opset::LogicalOr>(body_cond.get_node_shared_ptr()))
{
- const auto second_input =
- body_cond->input_value(1).get_node_shared_ptr();
+ const auto second_input = body_cond.get_node_shared_ptr()
+ ->input_value(1)
+ .get_node_shared_ptr();
if (ngraph::op::is_constant(second_input) &&
second_input->get_element_type() == element::boolean &&
as_type_ptr<default_opset::Constant>(second_input)
}
}
- NodeVector loop(const Node& node)
+ OutputVector loop(const Node& node)
{
const auto& ng_inputs = node.get_ng_inputs();
// optional inputs
- const std::shared_ptr<ngraph::Node> trip_count = ng_inputs.at(0);
- const std::shared_ptr<ngraph::Node> loop_cond = ng_inputs.at(1);
+ const Output<ngraph::Node> trip_count = ng_inputs.at(0);
+ const Output<ngraph::Node> loop_cond = ng_inputs.at(1);
// At this moment nGraph TensorIterator doesn't have support for conditional
// termination of iterations.
// required
const Subgraph& body_graph{node.get_attribute_value<Subgraph>("body")};
- const auto& graph_outputs =
- ngraph::as_output_vector(body_graph.get_ng_outputs());
+ const auto& graph_outputs = body_graph.get_ng_outputs();
const auto& graph_inputs = body_graph.get_ng_parameters();
CHECK_VALID_NODE(
// input.
const auto loop_trip_count = std::make_shared<default_opset::Range>(
default_opset::Constant::create(
- trip_count->get_element_type(), Shape{}, {0}),
+ trip_count.get_element_type(), Shape{}, {0}),
ngraph::onnx_import::reshape::interpret_as_scalar(trip_count),
default_opset::Constant::create(
- trip_count->get_element_type(), Shape{}, {1}));
+ trip_count.get_element_type(), Shape{}, {1}));
// We iterate over trip_count input.
// start=0, stride=1, part_size=1, end=-1, axis=0
*graph_outputs_it, 0, 1, 1, -1, 0));
}
- NodeVector node_outputs;
+ OutputVector node_outputs;
for (const auto& v : final_values)
{
node_outputs.push_back(v.as_single_output_node());
///
/// \return Vector of nodes containting resulting nGraph nodes.
///
- NodeVector loop(const Node& node);
+ OutputVector loop(const Node& node);
} // namespace set_1
} // namespace op
{
namespace set_1
{
- NodeVector lp_norm(const Node& node)
+ OutputVector lp_norm(const Node& node)
{
- const std::shared_ptr<ngraph::Node> data{node.get_ng_inputs().at(0)};
- const auto data_shape = data->get_output_partial_shape(0);
+ const Output<ngraph::Node> data{node.get_ng_inputs().at(0)};
+ const auto data_shape = data.get_partial_shape();
const auto data_rank = data_shape.rank();
CHECK_VALID_NODE(
///
/// \return Vector of nodes containting resulting nGraph nodes.
///
- NodeVector lp_norm(const Node& node);
+ OutputVector lp_norm(const Node& node);
} // namespace set_1
} // namespace op
{
namespace set_1
{
- NodeVector global_lp_pool(const Node& node)
+ OutputVector global_lp_pool(const Node& node)
{
- const std::shared_ptr<ngraph::Node> data{node.get_ng_inputs().at(0)};
+ const Output<ngraph::Node> data{node.get_ng_inputs().at(0)};
const std::size_t channel_axis{1};
- const auto data_shape = data->get_output_partial_shape(0);
+ const auto data_shape = data.get_partial_shape();
NGRAPH_CHECK(data_shape.rank().is_static(),
"Rank of input data must be static");
NGRAPH_CHECK(data_shape.rank().get_length() >= 2,
p_norm >= 0,
"Only positive (including zero) values are supported for 'p' attribute.");
- NodeVector slices =
+ OutputVector slices =
ngraph::builder::opset1::split(data, channels_count, channel_axis);
for (auto& slice : slices)
///
/// \return Vector of nodes containting resulting nGraph nodes.
///
- NodeVector global_lp_pool(const Node& node);
+ OutputVector global_lp_pool(const Node& node);
} // namespace set_1
} // namespace op
{
namespace set_1
{
- NodeVector lrn(const Node& node)
+ OutputVector lrn(const Node& node)
{
auto data = node.get_ng_inputs().at(0);
double alpha = node.get_attribute_value<double>("alpha", 1e-4);
{
namespace set_1
{
- NodeVector lrn(const Node& node);
+ OutputVector lrn(const Node& node);
} // namespace set_1
} // namespace op
struct LSTMNgInputMap
{
- using container_type = std::map<LSTMInput, std::shared_ptr<ngraph::Node>>;
+ using container_type = std::map<LSTMInput, Output<ngraph::Node>>;
using iterator = typename container_type::iterator;
explicit LSTMNgInputMap(const Node& node)
m_map[LSTMInput::LSTM_INPUT_R] = ng_inputs.at(2);
const std::size_t hidden_size =
- m_map[LSTMInput::LSTM_INPUT_R]->get_shape().back();
+ m_map[LSTMInput::LSTM_INPUT_R].get_shape().back();
const std::size_t batch_size =
- m_map[LSTMInput::LSTM_INPUT_X]->get_shape().at(0);
+ m_map[LSTMInput::LSTM_INPUT_X].get_shape().at(0);
const std::size_t num_directions =
- m_map[LSTMInput::LSTM_INPUT_W]->get_shape().front();
+ m_map[LSTMInput::LSTM_INPUT_W].get_shape().front();
// ------ Optional inputs ------
// The bias tensor for input gate. Shape [num_directions, 4*hidden_size]
Shape{batch_size},
std::vector<std::int32_t>(
batch_size,
- m_map[LSTMInput::LSTM_INPUT_X]->get_shape().at(1)));
+ m_map[LSTMInput::LSTM_INPUT_X].get_shape().at(1)));
}
// The initial value of the hidden.
// Shape [num_directions, batch_size, hidden_size]
}
}
- std::shared_ptr<ngraph::Node>& at(const LSTMInput& key)
- {
- return m_map.at(key);
- }
+ Output<ngraph::Node>& at(const LSTMInput& key) { return m_map.at(key); }
container_type m_map;
};
namespace set_1
{
- NodeVector lstm(const Node& node)
+ OutputVector lstm(const Node& node)
{
LSTMNgInputMap input_map{node};
LSTMAttributes attributes{node};
{
namespace set_1
{
- NodeVector lstm(const Node& node);
+ OutputVector lstm(const Node& node);
} // namespace set_1
{
namespace set_1
{
- NodeVector matmul(const Node& node)
+ OutputVector matmul(const Node& node)
{
return {std::make_shared<default_opset::MatMul>(node.get_ng_inputs().at(0),
node.get_ng_inputs().at(1))};
{
namespace set_1
{
- NodeVector matmul_integer(const Node& node)
+ OutputVector matmul_integer(const Node& node)
{
auto ng_inputs = node.get_ng_inputs();
auto factory = builder::MatmulIntegerFactory(
OutputVector(std::begin(ng_inputs), std::end(ng_inputs)));
- std::size_t left_rank{ng_inputs.at(0)->get_shape().size()};
- std::size_t right_rank{ng_inputs.at(1)->get_shape().size()};
+ std::size_t left_rank{ng_inputs.at(0).get_shape().size()};
+ std::size_t right_rank{ng_inputs.at(1).get_shape().size()};
if (left_rank == 0 || right_rank == 0)
{
///
/// \return The vector containing Ngraph nodes producing output of quantized ONNX
/// matrix multiplication operation.
- NodeVector matmul_integer(const Node& node);
+ OutputVector matmul_integer(const Node& node);
} // namespace set_1
{
namespace set_1
{
- inline NodeVector max(const Node& node)
+ inline OutputVector max(const Node& node)
{
return variadic::make_ng_variadic_op<default_opset::Maximum>(
node, ngraph::op::AutoBroadcastSpec::NONE);
namespace set_8
{
- inline NodeVector max(const Node& node)
+ inline OutputVector max(const Node& node)
{
return variadic::make_ng_variadic_op<default_opset::Maximum>(node);
}
{
namespace set_1
{
- NodeVector max_pool(const Node& node)
+ OutputVector max_pool(const Node& node)
{
auto max_pool = pooling::LocalPoolingFactory(node).make_max_pool();
max_pool.emplace_back(std::make_shared<NullNode>()); // Indices (optional)
/// \return The vector containing Ngraph nodes producing output of ONNX MaxPool
/// operation.
///
- NodeVector max_pool(const Node& node);
+ OutputVector max_pool(const Node& node);
} // namespace set_1
{
namespace set_1
{
- NodeVector mean(const Node& node)
+ OutputVector mean(const Node& node)
{
auto sum = variadic::make_ng_variadic_op<default_opset::Add>(node).front();
auto count = default_opset::Constant::create(
- sum->get_element_type(), Shape{}, {node.get_ng_inputs().size()});
+ sum.get_element_type(), Shape{}, {node.get_ng_inputs().size()});
return {std::make_shared<default_opset::Divide>(sum, count)};
}
{
namespace set_1
{
- NodeVector mean(const Node& node);
+ OutputVector mean(const Node& node);
} // namespace set_1
{
namespace set_1
{
- NodeVector mean_variance_normalization(const Node& node)
+ OutputVector mean_variance_normalization(const Node& node)
{
auto data = node.get_ng_inputs().at(0);
bool across_channels =
namespace set_9
{
- NodeVector mean_variance_normalization(const Node& node)
+ OutputVector mean_variance_normalization(const Node& node)
{
auto data = node.get_ng_inputs().at(0);
auto axes = node.get_attribute_value<std::vector<int64_t>>("axes", {0, 2, 3});
const std::vector<std::size_t> normalized_axes = ngraph::normalize_axes(
- node.get_description(), axes, data->get_output_partial_shape(0).rank());
+ node.get_description(), axes, data.get_partial_shape().rank());
return {std::make_shared<default_opset::MVN>(data, AxisSet(normalized_axes))};
}
{
namespace set_1
{
- NodeVector mean_variance_normalization(const Node& node);
+ OutputVector mean_variance_normalization(const Node& node);
} // namespace set_1
namespace set_9
{
- NodeVector mean_variance_normalization(const Node& node);
+ OutputVector mean_variance_normalization(const Node& node);
} // namespace set_9
} // namespace op
{
namespace set_1
{
- inline NodeVector min(const Node& node)
+ inline OutputVector min(const Node& node)
{
return variadic::make_ng_variadic_op<default_opset::Minimum>(
node, ngraph::op::AutoBroadcastSpec::NONE);
namespace set_8
{
- inline NodeVector min(const Node& node)
+ inline OutputVector min(const Node& node)
{
return variadic::make_ng_variadic_op<default_opset::Minimum>(node);
}
{
namespace set_1
{
- NodeVector mod(const Node& node)
+ OutputVector mod(const Node& node)
{
- std::shared_ptr<ngraph::Node> dividend{node.get_ng_inputs().at(0)};
- std::shared_ptr<ngraph::Node> divisor{node.get_ng_inputs().at(1)};
+ Output<ngraph::Node> dividend{node.get_ng_inputs().at(0)};
+ Output<ngraph::Node> divisor{node.get_ng_inputs().at(1)};
std::int64_t fmod = node.get_attribute_value<std::int64_t>("fmod", 0);
CHECK_VALID_NODE(
{
namespace set_1
{
- NodeVector mod(const Node& node);
+ OutputVector mod(const Node& node);
} // namespace set_1
} // namespace op
{
namespace set_1
{
- inline NodeVector mul(const Node& node)
+ inline OutputVector mul(const Node& node)
{
const Output<ngraph::Node> lhs_node = node.get_ng_inputs().at(0);
Output<ngraph::Node> rhs_node = node.get_ng_inputs().at(1);
namespace set_7
{
- inline NodeVector mul(const Node& node)
+ inline OutputVector mul(const Node& node)
{
return {std::make_shared<default_opset::Multiply>(node.get_ng_inputs().at(0),
node.get_ng_inputs().at(1))};
{
namespace set_1
{
- inline NodeVector neg(const Node& node) { return {-node.get_ng_inputs().at(0)}; }
+ inline OutputVector neg(const Node& node) { return {-node.get_ng_inputs().at(0)}; }
} // namespace set_1
} // namespace op
{
namespace set_1
{
- NodeVector non_max_suppression(const Node& node)
+ OutputVector non_max_suppression(const Node& node)
{
// TODO: this op will not be tested until at least
// a reference implementation is added
const auto ng_inputs = node.get_ng_inputs();
- const std::shared_ptr<ngraph::Node> boxes = ng_inputs.at(0);
- const std::shared_ptr<ngraph::Node> scores = ng_inputs.at(1);
+ const Output<ngraph::Node> boxes = ng_inputs.at(0);
+ const Output<ngraph::Node> scores = ng_inputs.at(1);
- std::shared_ptr<ngraph::Node> max_output_boxes_per_class;
+ Output<ngraph::Node> max_output_boxes_per_class;
if (ng_inputs.size() > 2)
{
max_output_boxes_per_class =
default_opset::Constant::create(element::i64, Shape{}, {0});
}
- std::shared_ptr<ngraph::Node> iou_threshold;
+ Output<ngraph::Node> iou_threshold;
if (ng_inputs.size() > 3)
{
iou_threshold =
default_opset::Constant::create(element::f32, Shape{}, {.0f});
}
- std::shared_ptr<ngraph::Node> score_threshold;
+ Output<ngraph::Node> score_threshold;
if (ng_inputs.size() > 4)
{
score_threshold =
{
namespace set_1
{
- NodeVector non_max_suppression(const Node& node);
+ OutputVector non_max_suppression(const Node& node);
} // namespace set_1
} // namespace op
{
namespace set_1
{
- NodeVector non_zero(const Node& node)
+ OutputVector non_zero(const Node& node)
{
const auto data = node.get_ng_inputs().at(0);
return {std::make_shared<default_opset::NonZero>(data, element::i64)};
///
/// \return The vector containing nGraph nodes producing output of ONNX NonZero
/// operation.
- NodeVector non_zero(const Node& node);
+ OutputVector non_zero(const Node& node);
} // namespace set_1
{
namespace set_1
{
- inline NodeVector logical_not(const Node& node)
+ inline OutputVector logical_not(const Node& node)
{
return {
std::make_shared<default_opset::LogicalNot>(node.get_ng_inputs().at(0))};
{
namespace set_1
{
- NodeVector onehot(const Node& node)
+ OutputVector onehot(const Node& node)
{
- NodeVector inputs{node.get_ng_inputs()};
+ OutputVector inputs{node.get_ng_inputs()};
auto indices =
std::make_shared<default_opset::Convert>(inputs.at(0), element::i64);
auto depth = reshape::interpret_as_scalar(inputs.at(1));
{
namespace set_1
{
- NodeVector onehot(const Node& node);
+ OutputVector onehot(const Node& node);
} // namespace set_1
{
namespace set_1
{
- inline NodeVector logical_or(const Node& node)
+ inline OutputVector logical_or(const Node& node)
{
return {std::make_shared<default_opset::LogicalOr>(node.get_ng_inputs().at(0),
node.get_ng_inputs().at(1))};
{
namespace set_1
{
- NodeVector pad(const Node& node)
+ OutputVector pad(const Node& node)
{
auto data = node.get_ng_inputs().at(0);
- const auto data_rank =
- node.get_ng_inputs().at(0)->get_output_partial_shape(0).rank();
+ const auto data_rank = node.get_ng_inputs().at(0).get_partial_shape().rank();
CHECK_VALID_NODE(
node, data_rank.is_static(), "Data rank must be static for pad op");
const auto data_rank_value = data_rank.get_length();
std::make_shared<default_opset::Constant>(
element::i64, ngraph::Shape{padding_above.size()}, padding_above),
std::make_shared<default_opset::Constant>(
- data->get_element_type(), ngraph::Shape{}, std::vector<double>{value}),
+ data.get_element_type(), ngraph::Shape{}, std::vector<double>{value}),
pad_mode)};
}
} // namespace set_1
namespace set_11
{
- NodeVector pad(const Node& node)
+ OutputVector pad(const Node& node)
{
auto data = node.get_ng_inputs().at(0);
auto pads = node.get_ng_inputs().at(1);
- std::shared_ptr<ngraph::Node> values;
- std::shared_ptr<ngraph::Node> padding_begin;
- std::shared_ptr<ngraph::Node> padding_end;
+ Output<ngraph::Node> values;
+ Output<ngraph::Node> padding_begin;
+ Output<ngraph::Node> padding_end;
if (node.get_ng_inputs().size() == 3)
{
else
{
values = default_opset::Constant::create(
- data->get_element_type(), ngraph::Shape{}, {0});
+ data.get_element_type(), ngraph::Shape{}, {0});
}
- if (ngraph::op::is_constant(pads))
+ if (ngraph::op::is_constant(pads.get_node()))
{
std::vector<std::int64_t> pads_vector =
- ngraph::as_type_ptr<default_opset::Constant>(pads)
+ ngraph::as_type_ptr<default_opset::Constant>(pads.get_node_shared_ptr())
->get_vector<std::int64_t>();
std::size_t const half_size = pads_vector.size() / 2;
{
auto axis =
default_opset::Constant::create(element::i64, ngraph::Shape{}, {0});
- NodeVector padding = builder::opset1::split(pads, 2, 0);
+ OutputVector padding = builder::opset1::split(pads, 2, 0);
padding_begin =
std::make_shared<default_opset::Convert>(padding.at(0), element::i64);
{
namespace set_1
{
- NodeVector pad(const Node& node);
+ OutputVector pad(const Node& node);
} // namespace set_1
namespace set_11
{
- NodeVector pad(const Node& node);
+ OutputVector pad(const Node& node);
} // namespace set_11
{
namespace set_1
{
- inline NodeVector pow(const Node& node)
+ inline OutputVector pow(const Node& node)
{
return {std::make_shared<default_opset::Power>(node.get_ng_inputs().at(0),
node.get_ng_inputs().at(1))};
{
namespace set_1
{
- NodeVector prelu(const Node& node)
+ OutputVector prelu(const Node& node)
{
- NodeVector ng_inputs{node.get_ng_inputs()};
+ OutputVector ng_inputs{node.get_ng_inputs()};
const auto& data = ng_inputs.at(0);
const auto& slope = ng_inputs.at(1);
return {std::make_shared<default_opset::PRelu>(data, slope)};
{
namespace set_1
{
- NodeVector prelu(const Node& node);
+ OutputVector prelu(const Node& node);
} // namespace set_1
{
namespace set_1
{
- NodeVector qlinear_matmul(const Node& node)
+ OutputVector qlinear_matmul(const Node& node)
{
auto ng_inputs = node.get_ng_inputs();
auto factory = builder::QLinearMatmulFactory(
(OutputVector(std::begin(ng_inputs), std::end(ng_inputs))));
- std::size_t left_rank{ng_inputs.at(0)->get_shape().size()};
- std::size_t right_rank{ng_inputs.at(1)->get_shape().size()};
+ std::size_t left_rank{ng_inputs.at(0).get_shape().size()};
+ std::size_t right_rank{ng_inputs.at(1).get_shape().size()};
if (left_rank == 0 || right_rank == 0)
{
{
namespace set_1
{
- NodeVector qlinear_matmul(const Node& node);
+ OutputVector qlinear_matmul(const Node& node);
} // namespace set_1
} // namespace op
// limitations under the License.
//*****************************************************************************
+// Disabled in CMakeList
+// Update to higher opset required
+
#include <cstddef>
#include <memory>
#include <vector>
{
struct OpScale
{
- std::shared_ptr<ngraph::Node> data_scale;
- std::shared_ptr<ngraph::Node> filter_scale;
- std::shared_ptr<ngraph::Node> output_scale;
+ Output<ngraph::Node> data_scale;
+ Output<ngraph::Node> filter_scale;
+ Output<ngraph::Node> output_scale;
};
struct OpZeroPoint
{
- std::shared_ptr<ngraph::Node> data_zero_point;
- std::shared_ptr<ngraph::Node> filter_zero_point;
- std::shared_ptr<ngraph::Node> output_zero_point;
+ Output<ngraph::Node> data_zero_point;
+ Output<ngraph::Node> filter_zero_point;
+ Output<ngraph::Node> output_zero_point;
};
std::shared_ptr<ngraph::Node>
- make_ng_quant_conv(const std::shared_ptr<ngraph::Node>& data,
- const std::shared_ptr<ngraph::Node>& filters,
+ make_ng_quant_conv(const Output<ngraph::Node>& data,
+ const Output<ngraph::Node>& filters,
const Strides& strides,
const Strides& filter_dilations,
const CoordinateDiff& padding_below,
int groups,
const OpScale& op_scale,
const OpZeroPoint& op_zero_point,
- const std::shared_ptr<ngraph::Node>& bias = nullptr)
+ const Output<ngraph::Node>& bias = nullptr)
{
ngraph::element::Type output_type;
- if (data->get_element_type() == ngraph::element::u8 &&
- filters->get_element_type() == ngraph::element::i8)
+ if (data.get_element_type() == ngraph::element::u8 &&
+ filters.get_element_type() == ngraph::element::i8)
{
output_type = ngraph::element::i8;
}
- else if (data->get_element_type() == ngraph::element::u8 &&
- filters->get_element_type() == ngraph::element::u8)
+ else if (data.get_element_type() == ngraph::element::u8 &&
+ filters.get_element_type() == ngraph::element::u8)
{
output_type = ngraph::element::u8;
}
{
// Split one convolution op to N ops where N is the number of groups
// and concat results after computation.
- std::size_t n_data_channels{data->get_shape().at(1)};
- std::size_t n_filters_channels{filters->get_shape().at(0)};
+ std::size_t n_data_channels{data.get_shape().at(1)};
+ std::size_t n_filters_channels{filters.get_shape().at(0)};
std::size_t data_group_size{n_data_channels / groups};
std::size_t filters_group_size{n_filters_channels / groups};
- NodeVector convolution_nodes;
+ OutputVector convolution_nodes;
// initial bounds for splice
- std::vector<std::size_t> data_lower_bounds(data->get_shape().size());
- std::vector<std::size_t> data_upper_bounds{data->get_shape()};
+ std::vector<std::size_t> data_lower_bounds(data.get_shape().size());
+ std::vector<std::size_t> data_upper_bounds{data.get_shape()};
std::vector<std::size_t> filters_lower_bounds(
filters->get_shape().size());
- std::vector<std::size_t> filters_upper_bounds{filters->get_shape()};
+ std::vector<std::size_t> filters_upper_bounds{filters.get_shape()};
for (int64_t group{0}; group < groups; ++group)
{
auto sliced_filters = std::make_shared<ngraph::opset0::Slice>(
filters, filters_lower_bounds, filters_upper_bounds);
- if (bias)
+ if (bias.get_node())
{
throw ngraph_error(
"Groups != 1 not supported for Quantized Convolution with "
}
else
{
- if (bias)
+ if (bias.get_node())
{
return ngraph::builder::quantization::
QuantizedLinearConvolutionBias(data,
} // namespace
- NodeVector quant_conv(const Node& node)
+ OutputVector quant_conv(const Node& node)
{
- const NodeVector& inputs = node.get_ng_inputs();
+ const OutputVector& inputs = node.get_ng_inputs();
auto data = inputs.at(0);
auto filters = inputs.at(3);
CHECK_VALID_NODE(node,
((groups >= 0) &&
- (groups <= static_cast<int64_t>(data->get_shape().at(1))) &&
- (groups <= static_cast<int64_t>(filters->get_shape().at(0)))),
+ (groups <= static_cast<int64_t>(data.get_shape().at(1))) &&
+ (groups <= static_cast<int64_t>(filters.get_shape().at(0)))),
"incorrect value of 'group' attribute: ",
groups);
- std::size_t n_data_channels{data->get_shape().at(1)};
- std::size_t n_filters_channels{filters->get_shape().at(0)};
+ std::size_t n_data_channels{data.get_shape().at(1)};
+ std::size_t n_filters_channels{filters.get_shape().at(0)};
CHECK_VALID_NODE(
node,
ngraph::op::PadType auto_pad_type = convpool::get_auto_pad(node);
CoordinateDiff& padding_below = paddings.first;
CoordinateDiff& padding_above = paddings.second;
- convpool::calculate_auto_pads(data->get_shape(),
- filters->get_shape(),
+ convpool::calculate_auto_pads(data.get_shape(),
+ filters.get_shape(),
strides,
filter_dilations,
auto_pad_type,
std::shared_ptr<ngraph::Node> conv_node = nullptr;
// no bias param
- if (inputs.size() == 9 && !inputs.at(8)->is_null())
+ if (inputs.size() == 9 && !ngraph::op::is_null(inputs.at(8)))
{
auto bias = inputs.at(8);
conv_node = make_ng_quant_conv(
// limitations under the License.
//*****************************************************************************
+// Disabled in CMakeList
+// Update to higher opset required
+
#pragma once
#include "core/node.hpp"
///
/// \return The vector containing Ngraph nodes producing output of ONNX quantizied
/// convolution operation.
- NodeVector quant_conv(const Node& node);
+ OutputVector quant_conv(const Node& node);
} // namespace set_1
{
namespace
{
- std::shared_ptr<ngraph::Node> get_zero_point(const NodeVector& inputs)
+ std::shared_ptr<ngraph::Node> get_zero_point(const OutputVector& inputs)
{
if (inputs.size() > 2)
{
- return inputs.at(2);
+ return inputs.at(2).get_node_shared_ptr();
}
else
{
namespace set_1
{
- NodeVector quantize_linear(const Node& node)
+ OutputVector quantize_linear(const Node& node)
{
- NodeVector inputs{node.get_ng_inputs()};
- auto x = inputs.at(0);
- auto y_scale = inputs.at(1);
+ OutputVector inputs{node.get_ng_inputs()};
+ auto x = inputs.at(0).get_node_shared_ptr();
+ auto y_scale = inputs.at(1).get_node_shared_ptr();
auto y_zero_point = detail::get_zero_point(inputs);
x = detail::validate_data(node, x);
namespace set_13
{
- NodeVector quantize_linear(const Node& node)
+ OutputVector quantize_linear(const Node& node)
{
- NodeVector inputs{node.get_ng_inputs()};
- auto x = inputs.at(0);
- auto y_scale = inputs.at(1);
+ OutputVector inputs{node.get_ng_inputs()};
+ auto x = inputs.at(0).get_node_shared_ptr();
+ auto y_scale = inputs.at(1).get_node_shared_ptr();
auto y_zero_point = detail::get_zero_point(inputs);
x = detail::validate_data(node, x);
{
namespace set_1
{
- NodeVector quantize_linear(const Node& node);
+ OutputVector quantize_linear(const Node& node);
} // namespace set_1
namespace set_13
{
- NodeVector quantize_linear(const Node& node);
+ OutputVector quantize_linear(const Node& node);
} // namespace set_13
{
namespace set_1
{
- NodeVector range(const Node& node)
+ OutputVector range(const Node& node)
{
- const std::shared_ptr<ngraph::Node> start{node.get_ng_inputs().at(0)};
- const std::shared_ptr<ngraph::Node> stop{node.get_ng_inputs().at(1)};
- const std::shared_ptr<ngraph::Node> step{node.get_ng_inputs().at(2)};
+ const Output<ngraph::Node> start{node.get_ng_inputs().at(0)};
+ const Output<ngraph::Node> stop{node.get_ng_inputs().at(1)};
+ const Output<ngraph::Node> step{node.get_ng_inputs().at(2)};
return {std::make_shared<default_opset::Range>(start, stop, step)};
}
} // namespace set_1
{
namespace set_1
{
- NodeVector range(const Node& node);
+ OutputVector range(const Node& node);
} // namespace set_1
{
namespace set_1
{
- NodeVector reciprocal(const Node& node)
+ OutputVector reciprocal(const Node& node)
{
auto data = node.get_ng_inputs().at(0);
auto one_node =
- default_opset::Constant::create(data->get_element_type(), Shape{}, {1});
+ default_opset::Constant::create(data.get_element_type(), Shape{}, {1});
return {std::make_shared<default_opset::Divide>(one_node, data)};
}
{
namespace set_1
{
- NodeVector reciprocal(const Node& node);
+ OutputVector reciprocal(const Node& node);
} // namespace set_1
{
namespace set_1
{
- NodeVector reduce_log_sum(const Node& node)
+ OutputVector reduce_log_sum(const Node& node)
{
- std::shared_ptr<ngraph::Node> sum_node{reduction::make_ng_reduction_op(
+ Output<ngraph::Node> sum_node{reduction::make_ng_reduction_op(
node,
node.get_ng_inputs().at(0),
std::make_shared<default_opset::ReduceSum,
- const std::shared_ptr<ngraph::Node>&,
- const std::shared_ptr<ngraph::Node>&,
+ const Output<ngraph::Node>&,
+ const Output<ngraph::Node>&,
bool>)};
return {std::make_shared<default_opset::Log>(sum_node)};
}
- NodeVector reduce_log_sum_exp(const Node& node)
+ OutputVector reduce_log_sum_exp(const Node& node)
{
auto exp_node =
std::make_shared<default_opset::Exp>(node.get_ng_inputs().at(0));
- std::shared_ptr<ngraph::Node> sum_node{reduction::make_ng_reduction_op(
+ Output<ngraph::Node> sum_node{reduction::make_ng_reduction_op(
node,
exp_node,
std::make_shared<default_opset::ReduceSum,
- const std::shared_ptr<ngraph::Node>&,
- const std::shared_ptr<ngraph::Node>&,
+ const Output<ngraph::Node>&,
+ const Output<ngraph::Node>&,
bool>)};
return {std::make_shared<default_opset::Log>(sum_node)};
}
- NodeVector reduce_l1(const Node& node)
+ OutputVector reduce_l1(const Node& node)
{
- auto l1_norm_reduction = [](const std::shared_ptr<ngraph::Node>& node,
+ auto l1_norm_reduction = [](const Output<ngraph::Node>& node,
const ngraph::AxisSet& axis_set) {
const auto axis_set_const = default_opset::Constant::create(
element::i64, {axis_set.size()}, axis_set.to_vector());
node, node.get_ng_inputs().at(0), l1_norm_reduction)};
}
- NodeVector reduce_l2(const Node& node)
+ OutputVector reduce_l2(const Node& node)
{
- auto l2_norm_reduction = [](const std::shared_ptr<ngraph::Node>& node,
+ auto l2_norm_reduction = [](const Output<ngraph::Node>& node,
const ngraph::AxisSet& axis_set) {
const auto axis_set_const = default_opset::Constant::create(
element::i64, {axis_set.size()}, axis_set.to_vector());
node, node.get_ng_inputs().at(0), l2_norm_reduction)};
}
- NodeVector reduce_max(const Node& node)
+ OutputVector reduce_max(const Node& node)
{
return {reduction::make_ng_reduction_op(
node,
node.get_ng_inputs().at(0),
std::make_shared<default_opset::ReduceMax,
- const std::shared_ptr<ngraph::Node>&,
- const std::shared_ptr<ngraph::Node>&,
+ const Output<ngraph::Node>&,
+ const Output<ngraph::Node>&,
bool>)};
}
- NodeVector reduce_mean(const Node& node)
+ OutputVector reduce_mean(const Node& node)
{
return {reduction::make_ng_reduction_op(
node,
node.get_ng_inputs().at(0),
std::make_shared<default_opset::ReduceMean,
- const std::shared_ptr<ngraph::Node>&,
- const std::shared_ptr<ngraph::Node>&,
+ const Output<ngraph::Node>&,
+ const Output<ngraph::Node>&,
bool>)};
}
- NodeVector reduce_min(const Node& node)
+ OutputVector reduce_min(const Node& node)
{
return {reduction::make_ng_reduction_op(
node,
node.get_ng_inputs().at(0),
std::make_shared<default_opset::ReduceMin,
- const std::shared_ptr<ngraph::Node>&,
- const std::shared_ptr<ngraph::Node>&,
+ const Output<ngraph::Node>&,
+ const Output<ngraph::Node>&,
bool>)};
}
- NodeVector reduce_prod(const Node& node)
+ OutputVector reduce_prod(const Node& node)
{
return {reduction::make_ng_reduction_op(
node,
node.get_ng_inputs().at(0),
std::make_shared<default_opset::ReduceProd,
- const std::shared_ptr<ngraph::Node>&,
- const std::shared_ptr<ngraph::Node>&,
+ const Output<ngraph::Node>&,
+ const Output<ngraph::Node>&,
bool>)};
}
- NodeVector reduce_sum(const Node& node)
+ OutputVector reduce_sum(const Node& node)
{
return {reduction::make_ng_reduction_op(
node,
node.get_ng_inputs().at(0),
std::make_shared<default_opset::ReduceSum,
- const std::shared_ptr<ngraph::Node>&,
- const std::shared_ptr<ngraph::Node>&,
+ const Output<ngraph::Node>&,
+ const Output<ngraph::Node>&,
bool>)};
}
- NodeVector reduce_sum_square(const Node& node)
+ OutputVector reduce_sum_square(const Node& node)
{
- auto input = std::shared_ptr<ngraph::Node>{node.get_ng_inputs().at(0)};
+ auto input = Output<ngraph::Node>{node.get_ng_inputs().at(0)};
auto square_node = std::make_shared<default_opset::Multiply>(input, input);
return {reduction::make_ng_reduction_op(
node,
square_node,
std::make_shared<default_opset::ReduceSum,
- const std::shared_ptr<ngraph::Node>&,
- const std::shared_ptr<ngraph::Node>&,
+ const Output<ngraph::Node>&,
+ const Output<ngraph::Node>&,
bool>)};
}
///
/// \return The nGraph node equivalent of the ONNX operation.
///
- NodeVector reduce_log_sum(const Node& node);
+ OutputVector reduce_log_sum(const Node& node);
/// \brief Compute the log sum exponent of the input tensor's elements along
/// the provided axes.
///
/// \return The nGraph node equivalent of the ONNX operation.
///
- NodeVector reduce_log_sum_exp(const Node& node);
+ OutputVector reduce_log_sum_exp(const Node& node);
/// \brief Compute the L1 norm of the input tensor's element along the provided
/// axes.
///
/// \return The nGraph node equivalent of the ONNX operation.
///
- NodeVector reduce_l1(const Node& node);
+ OutputVector reduce_l1(const Node& node);
/// \brief Compute the L2 norm of the input tensor's element along the provided
/// axes.
///
/// \return The nGraph node equivalent of the ONNX operation.
///
- NodeVector reduce_l2(const Node& node);
+ OutputVector reduce_l2(const Node& node);
/// \brief Compute the maximum value of the input tensor's elements along the
/// provided axes.
///
/// \return The nGraph node equivalent of the ONNX operation.
///
- NodeVector reduce_max(const Node& node);
+ OutputVector reduce_max(const Node& node);
/// \brief Compute the mean value of the input tensor's elements along the
/// provided axes.
///
/// \return The nGraph node equivalent of the ONNX operation.
///
- NodeVector reduce_mean(const Node& node);
+ OutputVector reduce_mean(const Node& node);
/// \brief Compute the minimum value of the input tensor's elements along the
/// provided axes.
///
/// \return The nGraph node equivalent of the ONNX operation.
///
- NodeVector reduce_min(const Node& node);
+ OutputVector reduce_min(const Node& node);
/// \brief Compute the product of the input tensor's elements along the
/// provided axes.
///
/// \return The nGraph node equivalent of the ONNX operation.
///
- NodeVector reduce_prod(const Node& node);
+ OutputVector reduce_prod(const Node& node);
/// \brief Compute the sum of the input tensor's elements along the provided
/// axes.
///
/// \return The nGraph node equivalent of the ONNX operation.
///
- NodeVector reduce_sum(const Node& node);
+ OutputVector reduce_sum(const Node& node);
/// \brief Compute the sum square of the input tensor's element along the
/// provided axes.
///
/// \return The nGraph node equivalent of the ONNX operation.
///
- NodeVector reduce_sum_square(const Node& node);
+ OutputVector reduce_sum_square(const Node& node);
} // namespace set_1
{
namespace set_1
{
- inline NodeVector relu(const Node& node)
+ inline OutputVector relu(const Node& node)
{
- NodeVector ng_inputs{node.get_ng_inputs()};
+ OutputVector ng_inputs{node.get_ng_inputs()};
return {std::make_shared<default_opset::Relu>(ng_inputs.at(0))};
}
{
namespace set_1
{
- NodeVector reshape(const Node& node)
+ OutputVector reshape(const Node& node)
{
- NodeVector ng_inputs{node.get_ng_inputs()};
+ OutputVector ng_inputs{node.get_ng_inputs()};
const auto data = ng_inputs.at(0);
- std::shared_ptr<ngraph::Node> pattern;
+ Output<ngraph::Node> pattern;
// Since opset 5 the target shape is provided as input
if (ng_inputs.size() == 2)
///
/// \return Ngraph node representing this operation.
///
- NodeVector reshape(const Node& node);
+ OutputVector reshape(const Node& node);
} // namespace set_1
{
namespace
{
- std::shared_ptr<ngraph::Node> calculate_output_shape_based_on_scales(
- const std::shared_ptr<ngraph::Node>& data,
- const std::shared_ptr<ngraph::Node>& scales)
+ std::shared_ptr<ngraph::Node>
+ calculate_output_shape_based_on_scales(const Output<ngraph::Node>& data,
+ const Output<ngraph::Node>& scales)
{
- const auto& data_shape = data->get_output_partial_shape(0);
- const auto& scales_shape = scales->get_output_partial_shape(0);
+ const auto& data_shape = data.get_partial_shape();
+ const auto& scales_shape = scales.get_partial_shape();
- if (ngraph::op::is_constant(scales) && data_shape.is_static())
+ if (ngraph::op::is_constant(scales.get_node()) && data_shape.is_static())
{
const auto scales_const =
- as_type_ptr<default_opset::Constant>(scales->shared_from_this());
+ as_type_ptr<default_opset::Constant>(scales.get_node_shared_ptr());
const auto scales_vector = scales_const->cast_vector<float>();
const auto data_static_shape = data_shape.to_shape();
}
const auto shape_of_data = std::make_shared<default_opset::Convert>(
- std::make_shared<default_opset::ShapeOf>(data), scales->get_element_type());
+ std::make_shared<default_opset::ShapeOf>(data), scales.get_element_type());
const auto multiply =
std::make_shared<default_opset::Multiply>(shape_of_data, scales);
const auto output_shape =
return output_shape;
}
- NodeVector build_resize(const Node& node,
- const std::shared_ptr<ngraph::Node>& output_shape,
- const AxisSet& axes)
+ OutputVector build_resize(const Node& node,
+ const std::shared_ptr<ngraph::Node>& output_shape,
+ const AxisSet& axes)
{
const auto mode = node.get_attribute_value<std::string>("mode", "nearest");
namespace set_11
{
- NodeVector resize(const onnx_import::Node& node)
+ OutputVector resize(const onnx_import::Node& node)
{
// cubic_coeff_a, extrapolation_value attributes are ignored
// (they do not have influence on supported modes)
// in "tf_crop_and_resize" which is not handled now
const auto inputs = node.get_ng_inputs();
const auto& data = inputs.at(0);
- const auto& data_shape = data->get_output_partial_shape(0);
+ const auto& data_shape = data.get_partial_shape();
if (inputs.size() == 4) // sizes input is provided
{
const auto& sizes = inputs.at(3);
- const auto& sizes_shape = sizes->get_output_partial_shape(0);
+ const auto& sizes_shape = sizes.get_partial_shape();
CHECK_VALID_NODE(
node,
size_t axes_size = sizes_shape.is_static() ? sizes_shape[0].get_length()
: data_shape.rank().get_length();
- return build_resize(
- node, sizes, AxisSet(common::get_monotonic_range(axes_size)));
+ return build_resize(node,
+ sizes.get_node_shared_ptr(),
+ AxisSet(common::get_monotonic_range(axes_size)));
}
const auto& scales = inputs.at(2);
- const auto& scales_shape = scales->get_output_partial_shape(0);
+ const auto& scales_shape = scales.get_partial_shape();
CHECK_VALID_NODE(
node,
namespace set_1
{
- NodeVector resize(const onnx_import::Node& node)
+ OutputVector resize(const onnx_import::Node& node)
{
const auto inputs = node.get_ng_inputs();
const auto& data = inputs.at(0);
const auto& scales = inputs.at(1);
- const auto& data_shape = data->get_output_partial_shape(0);
- const auto& scales_shape = scales->get_output_partial_shape(0);
+ const auto& data_shape = data.get_partial_shape();
+ const auto& scales_shape = scales.get_partial_shape();
CHECK_VALID_NODE(
node,
{
namespace set_1
{
- NodeVector resize(const Node& node);
+ OutputVector resize(const Node& node);
} // namespace set_1
namespace set_11
{
- NodeVector resize(const Node& node);
+ OutputVector resize(const Node& node);
}
} // namespace op
{
namespace set_1
{
- NodeVector reverse_sequence(const Node& node)
+ OutputVector reverse_sequence(const Node& node)
{
const auto data = node.get_ng_inputs().at(0);
// nGraph supports only int32 type of sequence_lengths
const auto sequence_lengths_i32 = std::make_shared<default_opset::Convert>(
node.get_ng_inputs().at(1), element::i32);
- const auto data_rank = data->get_output_partial_shape(0).rank();
+ const auto data_rank = data.get_partial_shape().rank();
const auto batch_axis = node.get_attribute_value<int64_t>("batch_axis", 1);
const auto normalized_batch_axis =
{
namespace set_1
{
- NodeVector reverse_sequence(const Node& node);
+ OutputVector reverse_sequence(const Node& node);
} // namespace set_1
};
}
- NodeVector rnn(const Node& node)
+ OutputVector rnn(const Node& node)
{
constexpr std::size_t gates_count = 1;
RNNInputMap input_map{node, gates_count};
{
namespace set_1
{
- NodeVector rnn(const Node& node);
+ OutputVector rnn(const Node& node);
} // namespace set_1
{
namespace set_1
{
- NodeVector roi_align(const Node& node)
+ OutputVector roi_align(const Node& node)
{
const auto inputs = node.get_ng_inputs();
{
namespace set_1
{
- NodeVector roi_align(const Node& node);
+ OutputVector roi_align(const Node& node);
} // namespace set_1
// limitations under the License.
//*****************************************************************************
+// Disabled in CMakeList
+// Update to higher opset required
+
#include <memory>
#include "ngraph/opsets/opset0.hpp"
{
namespace set_1
{
- NodeVector round(const Node& node)
+ OutputVector round(const Node& node)
{
- const std::shared_ptr<ngraph::Node> data{node.get_ng_inputs().at(0)};
+ const Output<ngraph::Node> data{node.get_ng_inputs().at(0)};
return {std::make_shared<ngraph::opset0::Round>(data)};
}
} // namespace set_1
// limitations under the License.
//*****************************************************************************
+// Disabled in CMakeList
+// Update to higher opset required
+
#pragma once
#include "core/node.hpp"
{
namespace set_1
{
- NodeVector round(const Node& node);
+ OutputVector round(const Node& node);
} // namespace set_1
{
namespace set_1
{
- NodeVector scatter_elements(const Node& node)
+ OutputVector scatter_elements(const Node& node)
{
const auto data = node.get_ng_inputs().at(0);
const auto indices = node.get_ng_inputs().at(1);
{
namespace set_1
{
- NodeVector scatter_elements(const Node& node);
+ OutputVector scatter_elements(const Node& node);
} // namespace set_1
// limitations under the License.
//*****************************************************************************
+// Disabled in CMakeList
+// Update to higher opset required
+
#include <memory>
#include "ngraph/opsets/opset0.hpp"
{
namespace set_1
{
- NodeVector scatter_nd(const Node& node)
+ OutputVector scatter_nd(const Node& node)
{
- NodeVector ng_inputs{node.get_ng_inputs()};
+ OutputVector ng_inputs{node.get_ng_inputs()};
auto data = ng_inputs.at(0);
auto indices = ng_inputs.at(1);
auto updates = ng_inputs.at(2);
// limitations under the License.
//*****************************************************************************
+// Disabled in CMakeList
+// Update to higher opset required
+
#pragma once
#include "core/node.hpp"
{
namespace set_1
{
- NodeVector scatter_nd(const Node& node);
+ OutputVector scatter_nd(const Node& node);
} // namespace set_1
{
namespace set_1
{
- NodeVector selu(const Node& node)
+ OutputVector selu(const Node& node)
{
auto data = node.get_ng_inputs().at(0);
auto alpha =
node.get_attribute_value<double>("gamma", 1.05070102214813232421875);
auto alpha_node =
- default_opset::Constant::create(data->get_element_type(), Shape{}, {alpha});
+ default_opset::Constant::create(data.get_element_type(), Shape{}, {alpha});
auto gamma_node =
- default_opset::Constant::create(data->get_element_type(), Shape{}, {gamma});
+ default_opset::Constant::create(data.get_element_type(), Shape{}, {gamma});
return {std::make_shared<default_opset::Selu>(data, alpha_node, gamma_node)};
}
{
namespace set_1
{
- NodeVector selu(const Node& node);
+ OutputVector selu(const Node& node);
} // namespace set_1
{
namespace set_1
{
- NodeVector shape(const Node& node)
+ OutputVector shape(const Node& node)
{
const auto data = node.get_ng_inputs().at(0);
- const auto data_shape = data->get_output_partial_shape(0);
+ const auto data_shape = data.get_partial_shape();
if (data_shape.is_static())
{
{
namespace set_1
{
- NodeVector shape(const Node& node);
+ OutputVector shape(const Node& node);
} // namespace set_1
{
namespace set_1
{
- NodeVector shrink(const Node& node)
+ OutputVector shrink(const Node& node)
{
const auto input = node.get_ng_inputs().at(0);
const float bias = node.get_attribute_value<float>("bias", 0.0f);
" must not be negative.");
std::shared_ptr<default_opset::Constant> negative_lambd;
- const auto input_element_type = input->get_element_type();
+ const auto input_element_type = input.get_element_type();
if (input_element_type.is_signed())
{
negative_lambd =
/// Input values greater or equal to '-lambd' and less or equal to 'lambd' are
/// zeroed-out. 'Bias' is added to the values that are less than '-lambd'
/// and subtracted from values greater than 'lambd'.
- NodeVector shrink(const Node& node);
+ OutputVector shrink(const Node& node);
} // namespace set_1
} // namespace op
{
namespace set_1
{
- inline NodeVector sigmoid(const Node& node)
+ inline OutputVector sigmoid(const Node& node)
{
return {std::make_shared<default_opset::Sigmoid>(node.get_ng_inputs().at(0))};
}
{
namespace set_1
{
- inline NodeVector sign(const Node& node)
+ inline OutputVector sign(const Node& node)
{
return {std::make_shared<default_opset::Sign>(node.get_ng_inputs().at(0))};
}
{
namespace set_1
{
- inline NodeVector sin(const Node& node)
+ inline OutputVector sin(const Node& node)
{
return {std::make_shared<default_opset::Sin>(node.get_ng_inputs().at(0))};
}
{
namespace set_1
{
- inline NodeVector sinh(const Node& node)
+ inline OutputVector sinh(const Node& node)
{
return {std::make_shared<default_opset::Sinh>(node.get_ng_inputs().at(0))};
}
{
namespace set_1
{
- NodeVector size(const Node& node)
+ OutputVector size(const Node& node)
{
auto data = node.get_ng_inputs().at(0);
std::int64_t tensor_elements_count{
- static_cast<std::int64_t>(shape_size(data->get_shape()))};
+ static_cast<std::int64_t>(shape_size(data.get_shape()))};
return {std::make_shared<default_opset::Constant>(
ngraph::element::i64,
{
namespace set_1
{
- NodeVector size(const Node& node);
+ OutputVector size(const Node& node);
} // namespace set_1
///
/// \return Sub-graph represents adjusted indices or input indices
/// if any transformation was needed.
- std::shared_ptr<ngraph::Node>
- adjust_indices_if_needed(const std::shared_ptr<ngraph::Node>& indices,
- const std::vector<uint64_t>& axes,
- uint64_t slice_indices_length,
- int64_t fill_in_value)
+ Output<ngraph::Node> adjust_indices_if_needed(const Output<ngraph::Node>& indices,
+ const std::vector<uint64_t>& axes,
+ uint64_t slice_indices_length,
+ int64_t fill_in_value)
{
const bool are_axes_sorted = std::is_sorted(axes.begin(), axes.end());
- const auto indices_shape = indices->get_output_partial_shape(0);
+ const auto indices_shape = indices.get_partial_shape();
// if length of slice indices vector is known
if (indices_shape.rank().is_static() &&
indices_shape.rank().get_length() == 1 && indices_shape[0].is_static())
namespace set_10
{
- NodeVector slice(const Node& node)
+ OutputVector slice(const Node& node)
{
- NodeVector inputs{node.get_ng_inputs()};
+ OutputVector inputs{node.get_ng_inputs()};
const auto data = inputs.at(0);
- const auto data_rank = data->get_output_partial_shape(0).rank();
+ const auto data_rank = data.get_partial_shape().rank();
auto starts = inputs.at(1);
auto ends = inputs.at(2);
// Slice is calculated over all axes as default
- std::shared_ptr<ngraph::Node> axes;
+ Output<ngraph::Node> axes;
if (inputs.size() >= 4) // axes input provided
{
axes = inputs.at(3);
- CHECK_VALID_NODE(
- node, ngraph::op::is_constant(axes), "Axes input must be constant");
+ CHECK_VALID_NODE(node,
+ ngraph::op::is_constant(axes.get_node()),
+ "Axes input must be constant");
}
else
{
common::get_monotonic_range<int64_t>(data_rank_value));
}
- const auto axes_const = as_type_ptr<default_opset::Constant>(axes);
+ const auto axes_const =
+ as_type_ptr<default_opset::Constant>(axes.get_node_shared_ptr());
auto raw_axes_vec = axes_const->cast_vector<int64_t>();
std::vector<uint64_t> axes_vec =
get_normalized_axes_vector(node, data_rank, raw_axes_vec);
*std::max_element(std::begin(axes_vec), std::end(axes_vec)) + 1;
const auto begin_end_mask = axes_to_mask(axes_vec, slice_indices_length);
- std::shared_ptr<ngraph::Node> steps = nullptr;
+ Output<ngraph::Node> steps;
if (inputs.size() == 5) // steps input provided
{
steps = inputs.at(4);
namespace set_1
{
- NodeVector slice(const Node& node)
+ OutputVector slice(const Node& node)
{
- std::shared_ptr<ngraph::Node> data = node.get_ng_inputs().at(0);
- const auto data_rank = data->get_output_partial_shape(0).rank();
+ Output<ngraph::Node> data = node.get_ng_inputs().at(0);
+ const auto data_rank = data.get_partial_shape().rank();
const auto starts_atr =
node.get_attribute_value<std::vector<int64_t>>("starts");
std::vector<int64_t>(slice_indices_length, 1));
starts =
- adjust_indices_if_needed(starts, normalized_axes, slice_indices_length, 0);
- ends = adjust_indices_if_needed(ends, normalized_axes, slice_indices_length, 0);
+ adjust_indices_if_needed(starts, normalized_axes, slice_indices_length, 0)
+ .get_node_shared_ptr();
+ ends = adjust_indices_if_needed(ends, normalized_axes, slice_indices_length, 0)
+ .get_node_shared_ptr();
strides =
- adjust_indices_if_needed(strides, normalized_axes, slice_indices_length, 1);
+ adjust_indices_if_needed(strides, normalized_axes, slice_indices_length, 1)
+ .get_node_shared_ptr();
return {std::make_shared<default_opset::StridedSlice>(
data, starts, ends, strides, begin_end_mask, begin_end_mask)};
{
namespace set_10
{
- NodeVector slice(const Node& node);
+ OutputVector slice(const Node& node);
} // namespace set_10
namespace set_1
{
- NodeVector slice(const Node& node);
+ OutputVector slice(const Node& node);
} // namespace set_1
{
namespace
{
- std::shared_ptr<ngraph::Node> onnx_softmax(const std::shared_ptr<ngraph::Node> data,
+ std::shared_ptr<ngraph::Node> onnx_softmax(const Output<ngraph::Node> data,
const int64_t axis)
{
const auto coerced_data = ngraph::builder::opset1::flatten(data, axis);
std::make_shared<default_opset::Subtract>(coerced_data, reshaped_max);
const auto result = std::make_shared<default_opset::Softmax>(data_minus_max, 1);
- if (data->get_output_partial_shape(0).is_static())
+ if (data.get_partial_shape().is_static())
{
- return ngraph::builder::opset1::reshape(result, data->get_output_shape(0));
+ return ngraph::builder::opset1::reshape(result, data.get_shape());
}
else
{
{
namespace set_1
{
- NodeVector softmax(const Node& node)
+ OutputVector softmax(const Node& node)
{
const auto data = node.get_ng_inputs().at(0);
- const auto data_rank = data->get_output_partial_shape(0).rank();
+ const auto data_rank = data.get_partial_shape().rank();
NGRAPH_CHECK(data_rank.is_static(),
"ONNX Softmax data rank needs to be known (static)");
case 0:
{
result =
- default_opset::Constant::create(data->get_element_type(), Shape{}, {1});
+ default_opset::Constant::create(data.get_element_type(), Shape{}, {1});
break;
}
case 1:
{
// checks if the axis belongs to the allowed values set (-1 and 0 for 1D)
ngraph::normalize_axis(
- node.get_description(), axis, data->get_output_partial_shape(0).rank());
+ node.get_description(), axis, data.get_partial_shape().rank());
result = std::make_shared<default_opset::Softmax>(data, 0);
break;
}
default:
{
const auto normalized_axis = ngraph::normalize_axis(
- node.get_description(), axis, data->get_output_partial_shape(0).rank());
+ node.get_description(), axis, data.get_partial_shape().rank());
result = onnx_softmax(data, normalized_axis);
break;
{
namespace set_1
{
- NodeVector softmax(const Node& node);
+ OutputVector softmax(const Node& node);
} // namespace set_1
{
namespace set_1
{
- NodeVector softplus(const Node& node)
+ OutputVector softplus(const Node& node)
{
const auto data = node.get_ng_inputs().at(0);
const std::shared_ptr<ngraph::Node> zero_node =
- default_opset::Constant::create(data->get_element_type(), Shape{}, {0.f});
+ default_opset::Constant::create(data.get_element_type(), Shape{}, {0.f});
const std::shared_ptr<ngraph::Node> one_node =
- default_opset::Constant::create(data->get_element_type(), Shape{}, {1.f});
+ default_opset::Constant::create(data.get_element_type(), Shape{}, {1.f});
// data + log(exp(-data) + 1)
const std::shared_ptr<ngraph::Node> positive_val_node =
{
namespace set_1
{
- NodeVector softplus(const Node& node);
+ OutputVector softplus(const Node& node);
} // namespace set_1
{
namespace set_1
{
- NodeVector softsign(const Node& node)
+ OutputVector softsign(const Node& node)
{
auto data = node.get_ng_inputs().at(0);
std::shared_ptr<ngraph::Node> one_node =
- default_opset::Constant::create(data->get_element_type(), Shape{}, {1});
+ default_opset::Constant::create(data.get_element_type(), Shape{}, {1});
auto abs_data = std::make_shared<default_opset::Abs>(data);
auto data_plus_one_node =
std::make_shared<default_opset::Add>(abs_data, one_node);
{
namespace set_1
{
- NodeVector softsign(const Node& node);
+ OutputVector softsign(const Node& node);
} // namespace set_1
{
namespace set_1
{
- NodeVector space_to_depth(const Node& node)
+ OutputVector space_to_depth(const Node& node)
{
auto data = node.get_ng_inputs().at(0);
std::size_t block_size = node.get_attribute_value<std::int64_t>("blocksize");
const auto mode = default_opset::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST;
- return NodeVector{
+ return OutputVector{
std::make_shared<default_opset::SpaceToDepth>(data, mode, block_size)};
}
} // namespace set_1
///
/// \param[in] node The ONNX input node describing operation.
///
- /// \return NodeVector containing Tensor with shape:
+ /// \return OutputVector containing Tensor with shape:
/// [N, C * blocksize * blocksize, H / blocksize, W / blocksize]
- NodeVector space_to_depth(const Node& node);
+ OutputVector space_to_depth(const Node& node);
} // namespace set_1
} // namespace op
{
namespace set_1
{
- NodeVector split(const Node& node)
+ OutputVector split(const Node& node)
{
const auto input = node.get_ng_inputs().at(0);
const auto axis = node.get_attribute_value<int64_t>("axis", 0);
{
namespace set_1
{
- NodeVector split(const Node& node);
+ OutputVector split(const Node& node);
} // namespace set_1
{
namespace set_1
{
- inline NodeVector sqrt(const Node& node)
+ inline OutputVector sqrt(const Node& node)
{
return {std::make_shared<default_opset::Sqrt>(node.get_ng_inputs().at(0))};
}
{
namespace set_1
{
- NodeVector squeeze(const Node& node)
+ OutputVector squeeze(const Node& node)
{
auto data = node.get_ng_inputs().at(0);
std::vector<std::int64_t> axes =
node.get_attribute_value<std::vector<std::int64_t>>("axes", {});
- const auto data_rank = data->get_output_partial_shape(0).rank();
+ const auto data_rank = data.get_partial_shape().rank();
std::vector<std::size_t> normalized_axes =
ngraph::normalize_axes(node.get_description(), axes, data_rank);
{
namespace set_1
{
- NodeVector squeeze(const Node& node);
+ OutputVector squeeze(const Node& node);
} // namespace set_1
{
namespace set_1
{
- inline NodeVector sub(const Node& node)
+ inline OutputVector sub(const Node& node)
{
const Output<ngraph::Node> lhs_node = node.get_ng_inputs().at(0);
Output<ngraph::Node> rhs_node = node.get_ng_inputs().at(1);
namespace set_7
{
- inline NodeVector sub(const Node& node)
+ inline OutputVector sub(const Node& node)
{
return {std::make_shared<default_opset::Subtract>(node.get_ng_inputs().at(0),
node.get_ng_inputs().at(1))};
{
namespace set_1
{
- inline NodeVector sum(const Node& node)
+ inline OutputVector sum(const Node& node)
{
return variadic::make_ng_variadic_op<default_opset::Add>(
node, ngraph::op::AutoBroadcastSpec::NONE);
namespace set_8
{
- inline NodeVector sum(const Node& node)
+ inline OutputVector sum(const Node& node)
{
return variadic::make_ng_variadic_op<default_opset::Add>(node);
}
| Identity | 1- | 0,1 |
| InstanceNormalization | 1- | 0, (1) | Have to change to only v1 ops (NGONNX-1015)
| LRN | 1- | 0,1 |
-| LSTM | 1-7- | 0,(1) | (NGONNX-1015), there is `GetOutputElement` (3 outputs), LSTMSequence and LSTMCell uses some v0 ops which aren't used in v1
+| LSTM | 1-7- | 0,(1) |
| LeakyRelu | 1-6- | 0,(1) | (NGONNX-1015)
| Less | 1-7-9 | 0,1 |
| Log | 1-6- | 0,1 |
{
namespace set_1
{
- inline NodeVector tan(const Node& node)
+ inline OutputVector tan(const Node& node)
{
return {std::make_shared<default_opset::Tan>(node.get_ng_inputs().at(0))};
}
{
namespace set_1
{
- inline NodeVector tanh(const Node& node)
+ inline OutputVector tanh(const Node& node)
{
return {std::make_shared<default_opset::Tanh>(node.get_ng_inputs().at(0))};
}
{
namespace set_1
{
- NodeVector thresholded_relu(const Node& node)
+ OutputVector thresholded_relu(const Node& node)
{
const auto data = node.get_ng_inputs().at(0);
const double alpha = node.get_attribute_value<double>("alpha", 1.0);
const auto alpha_node =
- default_opset::Constant::create(data->get_element_type(), Shape{}, {alpha});
+ default_opset::Constant::create(data.get_element_type(), Shape{}, {alpha});
const auto data_map = std::make_shared<default_opset::Convert>(
std::make_shared<default_opset::Greater>(data, alpha_node),
- data->get_element_type());
+ data.get_element_type());
return {std::make_shared<default_opset::Multiply>(data, data_map)};
}
{
namespace set_1
{
- NodeVector thresholded_relu(const Node& node);
+ OutputVector thresholded_relu(const Node& node);
} // namespace set_1
{
namespace set_1
{
- NodeVector tile(const Node& node)
+ OutputVector tile(const Node& node)
{
auto input = node.get_ng_inputs().at(0);
auto repeats = node.get_ng_inputs().at(1);
///
/// \param node The ONNX node object representing this operation.
/// \return The vector containing nGraph a node producing the output of the Tile op.
- NodeVector tile(const Node& node);
+ OutputVector tile(const Node& node);
} // namespace set_1
#include "default_opset.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/constant.hpp"
-#include "ngraph/op/get_output_element.hpp"
#include "ngraph/op/topk.hpp"
#include "ngraph/shape.hpp"
#include "ngraph/type/element_type.hpp"
std::int64_t axis{node.get_attribute_value<std::int64_t>("axis", -1)};
const auto data = node.get_ng_inputs().at(0);
- const auto data_rank = data->get_output_partial_shape(0).rank();
+ const auto data_rank = data.get_partial_shape().rank();
return ngraph::normalize_axis(node.get_description(), axis, data_rank);
}
/// \return Return the second input to the TopK node reshaped to a scalar.
- std::shared_ptr<ngraph::Node> get_k(const ngraph::onnx_import::Node& node)
+ ngraph::Output<ngraph::Node> get_k(const ngraph::onnx_import::Node& node)
{
auto k_node = node.get_ng_inputs().at(1);
- NGRAPH_CHECK(shape_size(k_node->get_shape()) == 1,
+ NGRAPH_CHECK(shape_size(k_node.get_shape()) == 1,
"ONNX TopK operator: 'K' parameter must contain a single positive value.",
node);
return ngraph::onnx_import::reshape::interpret_as_scalar(k_node);
}
-
- /// \return Return the outputs of the TopK node.
- ngraph::NodeVector get_outputs(const std::shared_ptr<ngraph::Node>& node)
- {
- std::shared_ptr<ngraph::Node> values =
- std::make_shared<ngraph::op::v0::GetOutputElement>(node, 0);
- std::shared_ptr<ngraph::Node> indices =
- std::make_shared<ngraph::op::v0::GetOutputElement>(node, 1);
-
- return {values, indices};
- }
}
namespace ngraph
{
namespace set_1
{
- NodeVector topk(const Node& node)
+ OutputVector topk(const Node& node)
{
auto data = node.get_ng_inputs().at(0);
std::int64_t k{node.get_attribute_value<std::int64_t>("k")};
default_opset::TopK::SortType::SORT_VALUES,
element::i64);
- return get_outputs(top_k);
+ return {top_k->output(0), top_k->output(1)};
}
}
namespace set_10
{
- NodeVector topk(const Node& node)
+ OutputVector topk(const Node& node)
{
auto data = node.get_ng_inputs().at(0);
auto k = get_k(node);
default_opset::TopK::SortType::SORT_VALUES,
element::i64);
- return get_outputs(top_k);
+ return {top_k->output(0), top_k->output(1)};
}
}
namespace set_11
{
- NodeVector topk(const Node& node)
+ OutputVector topk(const Node& node)
{
// Process inputs
auto data = node.get_ng_inputs().at(0);
std::shared_ptr<ngraph::Node> top_k = std::make_shared<default_opset::TopK>(
data, k, axis, mode, sort_type, element::i64);
- return get_outputs(top_k);
+ return {top_k->output(0), top_k->output(1)};
}
}
/// \param node The ONNX node object representing this operation.
/// \return The vector containing Ngraph nodes producing output of ONNX TopK
/// operation (both values and indices).
- NodeVector topk(const Node& node);
+ OutputVector topk(const Node& node);
}
/// \brief Performs TopK operation from ONNX version 1.5
/// attribute.
namespace set_10
{
- NodeVector topk(const Node& node);
+ OutputVector topk(const Node& node);
}
/// \brief Performs TopK operation from ONNX version 1.6
/// \details ONNX op set 11 added support for `largest` and `sorted` attributes.
namespace set_11
{
- NodeVector topk(const Node& node);
+ OutputVector topk(const Node& node);
}
} // namespace op
{
namespace set_1
{
- NodeVector transpose(const Node& node)
+ OutputVector transpose(const Node& node)
{
- std::shared_ptr<ngraph::Node> data = node.get_ng_inputs().at(0);
+ Output<ngraph::Node> data = node.get_ng_inputs().at(0);
auto permute_axes =
node.get_attribute_value<std::vector<std::size_t>>("perm", {});
{
namespace set_1
{
- NodeVector transpose(const Node& node);
+ OutputVector transpose(const Node& node);
} // namespace set_1
{
namespace set_1
{
- NodeVector unsqueeze(const Node& node)
+ OutputVector unsqueeze(const Node& node)
{
auto data = node.get_ng_inputs().at(0);
auto axes = node.get_attribute_value<std::vector<std::int64_t>>("axes", {});
{
namespace set_1
{
- NodeVector unsqueeze(const Node& node);
+ OutputVector unsqueeze(const Node& node);
} // namespace set_1
namespace set_1
{
- NodeVector upsample(const onnx_import::Node& node)
+ OutputVector upsample(const onnx_import::Node& node)
{
const auto inputs = node.get_ng_inputs();
const auto data = inputs.at(0);
- const auto data_shape = data->get_output_partial_shape(0);
+ const auto data_shape = data.get_partial_shape();
const auto scales = node.get_attribute_value<std::vector<float>>("scales");
const auto mode = node.get_attribute_value<std::string>("mode", "nearest");
namespace set_9
{
- NodeVector upsample(const onnx_import::Node& node)
+ OutputVector upsample(const onnx_import::Node& node)
{
const auto inputs = node.get_ng_inputs();
const auto data = inputs.at(0);
const auto scales = inputs.at(1);
- const auto data_shape = data->get_output_partial_shape(0);
- const auto scales_shape = scales->get_output_partial_shape(0);
+ const auto data_shape = data.get_partial_shape();
+ const auto scales_shape = scales.get_partial_shape();
const auto mode = node.get_attribute_value<std::string>("mode", "nearest");
check_mode_support(node, mode);
attrs.axes.insert(ax);
}
- if (ngraph::op::is_constant(scales) && data_shape.is_static())
+ if (ngraph::op::is_constant(scales.get_node()) && data_shape.is_static())
{
const auto scales_const =
- as_type_ptr<default_opset::Constant>(scales->shared_from_this());
+ as_type_ptr<default_opset::Constant>(scales.get_node_shared_ptr());
auto scales_vector = scales_const->cast_vector<float>();
auto data_static_shape = data_shape.to_shape();
{
namespace set_1
{
- NodeVector upsample(const Node& node);
+ OutputVector upsample(const Node& node);
} // namespace set_1
namespace set_9
{
- NodeVector upsample(const Node& node);
+ OutputVector upsample(const Node& node);
} // namespace set_9
{
namespace set_1
{
- inline NodeVector where(const Node& node)
+ inline OutputVector where(const Node& node)
{
- NodeVector ng_inputs{node.get_ng_inputs()};
+ OutputVector ng_inputs{node.get_ng_inputs()};
return {std::make_shared<default_opset::Select>(
ng_inputs.at(0), ng_inputs.at(1), ng_inputs.at(2))};
{
namespace set_1
{
- inline NodeVector logical_xor(const Node& node)
+ inline OutputVector logical_xor(const Node& node)
{
return {std::make_shared<default_opset::LogicalXor>(
node.get_ng_inputs().at(0),
make_topk_subgraph(default_opset::TopK::Mode mode) const;
const std::int64_t m_keep_dims;
- std::shared_ptr<ngraph::Node> m_input_node;
+ Output<ngraph::Node> m_input_node;
std::int64_t m_axis;
};
{
Shape get_kernel_shape(const Node& node)
{
- const auto& data_shape = node.get_ng_inputs().at(0)->get_output_partial_shape(0);
+ const auto& data_shape = node.get_ng_inputs().at(0).get_partial_shape();
const size_t input_spatial_dims = data_shape.rank().get_length() - 2;
return node.get_attribute_value<std::vector<size_t>>(
"kernel_shape", std::vector<size_t>(input_spatial_dims, 1UL));
std::vector<std::size_t> get_attr_default_value(const Node& node,
const std::string& attr_name)
{
- const auto data_rank =
- node.get_ng_inputs().at(0)->get_output_partial_shape(0).rank();
+ const auto data_rank = node.get_ng_inputs().at(0).get_partial_shape().rank();
CHECK_VALID_NODE(node,
data_rank.is_static(),
"If '",
std::pair<CoordinateDiff, CoordinateDiff> get_pads(const Node& node)
{
- const auto data_rank =
- node.get_ng_inputs().at(0)->get_output_partial_shape(0).rank();
+ const auto data_rank = node.get_ng_inputs().at(0).get_partial_shape().rank();
CHECK_VALID_NODE(node,
data_rank.is_static(),
"The rank of node must be static in order to calculate pads");
m_padding_above = Shape{std::begin(padding_above), std::end(padding_above)};
}
- NodeVector PoolingFactory::make_avg_pool() const
+ OutputVector PoolingFactory::make_avg_pool() const
{
const bool count_include_pad =
m_onnx_node.get_attribute_value<std::int64_t>("count_include_pad", 0);
m_auto_pad)};
}
- NodeVector PoolingFactory::make_max_pool() const
+ OutputVector PoolingFactory::make_max_pool() const
{
return {std::make_shared<default_opset::MaxPool>(m_inputs.at(0),
m_strides,
GlobalPoolingFactory::GlobalPoolingFactory(const Node& node)
: PoolingFactory(node)
{
- const auto data_shape = node.get_ng_inputs().at(0)->get_output_partial_shape(0);
+ const auto data_shape = node.get_ng_inputs().at(0).get_partial_shape();
const auto data_rank = data_shape.rank();
CHECK_VALID_NODE(
node, data_rank.is_static(), "Data rank must be static for global pooling ops");
/// \brief Creates average pooling ONNX operation.
/// \return Vector of output nodes.
///
- NodeVector make_avg_pool() const;
+ OutputVector make_avg_pool() const;
///
/// \brief Creates max pooling ONNX operation.
/// \return Vector of output nodes.
///
- NodeVector make_max_pool() const;
+ OutputVector make_max_pool() const;
protected:
explicit PoolingFactory(const Node& node);
Node m_onnx_node;
- const NodeVector m_inputs;
+ const OutputVector m_inputs;
Shape m_kernel_shape;
Strides m_strides;
Strides m_dilations;
m_map[OpInput::W] = ng_inputs.at(1);
m_map[OpInput::R] = ng_inputs.at(2);
- const auto el_type = ng_inputs.at(0)->get_output_element_type(0);
+ const auto el_type = ng_inputs.at(0).get_element_type();
- const auto x_pshape = m_map[OpInput::X]->get_output_partial_shape(0);
- const auto w_pshape = m_map[OpInput::W]->get_output_partial_shape(0);
- const auto r_pshape = m_map[OpInput::R]->get_output_partial_shape(0);
+ const auto x_pshape = m_map[OpInput::X].get_partial_shape();
+ const auto w_pshape = m_map[OpInput::W].get_partial_shape();
+ const auto r_pshape = m_map[OpInput::R].get_partial_shape();
NGRAPH_CHECK(x_pshape.rank().is_static() && x_pshape[0].is_static() &&
x_pshape[1].is_static(),
"RecurrentSequence input X must have static \"seq_length\" and "
"RecurrentSequence input R must have static \"hidden_size\" "
"(innermost) dimension.");
- const std::size_t hidden_size = m_map[OpInput::R]->get_shape().back();
- const std::size_t batch_size = m_map[OpInput::X]->get_shape().at(1);
- const std::size_t num_directions = m_map[OpInput::W]->get_shape().front();
+ const std::size_t hidden_size = m_map[OpInput::R].get_shape().back();
+ const std::size_t batch_size = m_map[OpInput::X].get_shape().at(1);
+ const std::size_t num_directions = m_map[OpInput::W].get_shape().front();
if (ng_inputs.size() > 3 && !ngraph::op::is_null(ng_inputs.at(3)))
{
else
{
m_map[OpInput::SEQ_LENGTHS] = std::make_shared<default_opset::Constant>(
- element::i32, Shape{batch_size}, m_map[OpInput::X]->get_shape().at(0));
+ element::i32, Shape{batch_size}, m_map[OpInput::X].get_shape().at(0));
}
// The initial value of the hidden.
if (ng_inputs.size() > 5 && !ngraph::op::is_null(ng_inputs.at(5)))
{
}
- std::shared_ptr<ngraph::Node>& OpInputMap::at(const OpInput& key)
- {
- return m_map.at(key);
- }
-
- const std::shared_ptr<ngraph::Node>& OpInputMap::at(const OpInput& key) const
+ Output<ngraph::Node>& OpInputMap::at(const OpInput& key) { return m_map.at(key); }
+ const Output<ngraph::Node>& OpInputMap::at(const OpInput& key) const
{
return m_map.at(key);
}
{
}
- NodeVector RecurrentSequence::run_sequence(const RecurrentCellFunction& kernel)
+ OutputVector RecurrentSequence::run_sequence(const RecurrentCellFunction& kernel)
{
- NodeVector results;
+ OutputVector results;
if (m_direction == ngraph::op::RecurrentSequenceDirection::FORWARD ||
m_direction == ngraph::op::RecurrentSequenceDirection::REVERSE)
{
}
else if (m_direction == ngraph::op::RecurrentSequenceDirection::BIDIRECTIONAL)
{
- NodeVector fwd_results{recurrent_sequence_pass(kernel)};
- NodeVector rev_results{recurrent_sequence_pass(kernel, true)};
+ OutputVector fwd_results{recurrent_sequence_pass(kernel)};
+ OutputVector rev_results{recurrent_sequence_pass(kernel, true)};
// Stack together respective outputs from both forward and reverse passess.
std::shared_ptr<ngraph::Node> Y{std::make_shared<default_opset::Concat>(
- NodeVector{fwd_results.at(0), rev_results.at(0)}, 1)};
+ OutputVector{fwd_results.at(0), rev_results.at(0)}, 1)};
results.push_back(Y);
std::shared_ptr<ngraph::Node> Y_h{std::make_shared<default_opset::Concat>(
- NodeVector{fwd_results.at(1), rev_results.at(1)}, 0)};
+ OutputVector{fwd_results.at(1), rev_results.at(1)}, 0)};
results.push_back(Y_h);
}
else
return results;
}
- NodeVector
+ OutputVector
RecurrentSequence::recurrent_sequence_pass(const RecurrentCellFunction& kernel,
bool is_reverse)
{
OutputVector h_list;
// back-up nodes which we may later modify.
- std::shared_ptr<ngraph::Node> orig_W = m_args.at(OpInput::W);
- std::shared_ptr<ngraph::Node> orig_R = m_args.at(OpInput::R);
- std::shared_ptr<ngraph::Node> orig_B = m_args.at(OpInput::B);
-
- std::shared_ptr<ngraph::Node> X = m_args.at(OpInput::X);
- std::shared_ptr<ngraph::Node> H_t =
- prepare_input(m_args.at(OpInput::INIT_H), is_reverse);
- std::shared_ptr<ngraph::Node> W = prepare_input(m_args.at(OpInput::W), is_reverse);
- std::shared_ptr<ngraph::Node> R = prepare_input(m_args.at(OpInput::R), is_reverse);
- std::shared_ptr<ngraph::Node> B = prepare_input(m_args.at(OpInput::B), is_reverse);
- std::shared_ptr<ngraph::Node> seq_lengths = m_args.at(OpInput::SEQ_LENGTHS);
+ Output<ngraph::Node> orig_W = m_args.at(OpInput::W);
+ Output<ngraph::Node> orig_R = m_args.at(OpInput::R);
+ Output<ngraph::Node> orig_B = m_args.at(OpInput::B);
+
+ Output<ngraph::Node> X = m_args.at(OpInput::X);
+ Output<ngraph::Node> H_t = prepare_input(m_args.at(OpInput::INIT_H), is_reverse);
+ Output<ngraph::Node> W = prepare_input(m_args.at(OpInput::W), is_reverse);
+ Output<ngraph::Node> R = prepare_input(m_args.at(OpInput::R), is_reverse);
+ Output<ngraph::Node> B = prepare_input(m_args.at(OpInput::B), is_reverse);
+ Output<ngraph::Node> seq_lengths = m_args.at(OpInput::SEQ_LENGTHS);
m_args.at(OpInput::W) = W;
m_args.at(OpInput::R) = R;
X, seq_lengths, 1 /*batch_axis*/, 0 /*seq_axis*/);
}
- OutputVector in_seq_steps =
- as_output_vector(builder::opset1::split(X, X->get_shape().at(0)));
+ OutputVector in_seq_steps = builder::opset1::split(X, X.get_shape().at(0));
for (auto& in_x : in_seq_steps)
{
RecurrentSequence::prepare_input(Output<ngraph::Node> node, bool is_reverse) const
{
// In bidirectional mode inputs are stacked together, so we must split them.
- std::shared_ptr<ngraph::Node> tmp = node.get_node_shared_ptr();
+ Output<ngraph::Node> tmp = node;
if (m_direction == ngraph::op::RecurrentSequenceDirection::BIDIRECTIONAL)
{
tmp = builder::opset1::split(node, 2).at(is_reverse ? 1 : 0);
///
struct OpInputMap
{
- using container_type = std::map<OpInput, std::shared_ptr<ngraph::Node>>;
+ using container_type = std::map<OpInput, Output<ngraph::Node>>;
explicit OpInputMap(const onnx_import::Node& node, std::size_t gates_count);
OpInputMap(container_type&& map);
virtual ~OpInputMap() = default;
- std::shared_ptr<ngraph::Node>& at(const OpInput& key);
- const std::shared_ptr<ngraph::Node>& at(const OpInput& key) const;
+ Output<ngraph::Node>& at(const OpInput& key);
+ const Output<ngraph::Node>& at(const OpInput& key) const;
container_type m_map;
};
///
/// \return The node vector containing results from all sequence steps.
///
- NodeVector run_sequence(const RecurrentCellFunction& kernel);
+ OutputVector run_sequence(const RecurrentCellFunction& kernel);
private:
///
///
/// \return The node vector with pass results.
///
- NodeVector recurrent_sequence_pass(const RecurrentCellFunction& kernel,
- bool is_reverse = false);
+ OutputVector recurrent_sequence_pass(const RecurrentCellFunction& kernel,
+ bool is_reverse = false);
OpInputMap& m_args;
ngraph::op::RecurrentSequenceDirection m_direction;
auto reduction_axes =
node.get_attribute_value<std::vector<std::int64_t>>("axes", {});
- const auto input_rank =
- node.get_ng_inputs().at(0)->get_output_partial_shape(0).rank();
+ const auto input_rank = node.get_ng_inputs().at(0).get_partial_shape().rank();
std::vector<std::size_t> normalized_axes =
ngraph::normalize_axes(node.get_description(), reduction_axes, input_rank);
}
} // namespace detail
- std::shared_ptr<ngraph::Node>
- make_ng_reduction_op(const Node& node,
- const std::shared_ptr<ngraph::Node>& ng_input,
- ReductionFunction reduction_function)
+ std::shared_ptr<ngraph::Node> make_ng_reduction_op(const Node& node,
+ const Output<ngraph::Node>& ng_input,
+ ReductionFunction reduction_function)
{
- auto data_shape = ng_input->get_shape();
+ auto data_shape = ng_input.get_shape();
auto reduction_axes = detail::get_reduction_axes(node);
std::shared_ptr<ngraph::Node>
make_ng_reduction_op(const Node& node,
- const std::shared_ptr<ngraph::Node>& ng_input,
+ const Output<ngraph::Node>& ng_input,
RuntimeReductionFunction reduction_function)
{
- const auto data_ps = node.get_ng_inputs().at(0)->get_output_partial_shape(0);
+ const auto data_ps = node.get_ng_inputs().at(0).get_partial_shape();
NGRAPH_CHECK(data_ps.rank().is_static(),
"Reduction operations input rank is required to be static");
// An overload for reduction operators that take reduction axes as input
using RuntimeReductionFunction = std::function<std::shared_ptr<ngraph::Node>(
- const std::shared_ptr<ngraph::Node>&, const std::shared_ptr<ngraph::Node>&, bool)>;
+ const Output<ngraph::Node>&, const std::shared_ptr<ngraph::Node>&, bool)>;
// An overload for reduction operators that take reduction axes as an attribute
using ReductionFunction = std::function<std::shared_ptr<ngraph::Node>(
- const std::shared_ptr<ngraph::Node>&, const ngraph::AxisSet&)>;
+ const Output<ngraph::Node>&, const ngraph::AxisSet&)>;
///
/// \brief Create an nGraph version of an ONNX reduction operation.
///
std::shared_ptr<ngraph::Node>
make_ng_reduction_op(const Node& node,
- const std::shared_ptr<ngraph::Node>& ng_input,
+ const Output<ngraph::Node>& ng_input,
ReductionFunction reduction_function);
///
///
std::shared_ptr<ngraph::Node>
make_ng_reduction_op(const Node& node,
- const std::shared_ptr<ngraph::Node>& ng_input,
+ const Output<ngraph::Node>& ng_input,
RuntimeReductionFunction reduction_function);
} // namespace reduction
return inferred_dims;
}
- std::shared_ptr<ngraph::Node>
- interpret_as_scalar(const std::shared_ptr<ngraph::Node>& node)
+ Output<ngraph::Node> interpret_as_scalar(const Output<ngraph::Node>& node)
{
- Shape node_shape = node->get_shape();
+ Shape node_shape = node.get_shape();
// If node is already a scalar, return original
if (is_scalar(node_shape))
node_shape);
// If node is a Constant, recreate as Constant with Shape{}
- if (ngraph::op::is_constant(node))
+ if (ngraph::op::is_constant(node.get_node()))
{
const auto value =
- ngraph::as_type_ptr<default_opset::Constant>(node)->get_data_ptr();
+ ngraph::as_type_ptr<default_opset::Constant>(node.get_node_shared_ptr())
+ ->get_data_ptr();
return std::make_shared<default_opset::Constant>(
- node->get_element_type(), ngraph::Shape{}, value);
+ node.get_element_type(), ngraph::Shape{}, value);
}
return builder::opset1::reshape(node, Shape{});
///
/// \return Original node or a node representing a reshape of the original.
///
- std::shared_ptr<ngraph::Node>
- interpret_as_scalar(const std::shared_ptr<ngraph::Node>& node);
+ Output<ngraph::Node> interpret_as_scalar(const Output<ngraph::Node>& node);
} // namespace reshape
} // namespace onnx_import
///
/// \return nGraph node equivalent of the ONNX operation
template <class T>
- inline NodeVector
+ inline OutputVector
make_ng_variadic_op(const Node& node,
const ngraph::op::AutoBroadcastSpec& auto_broadcast =
ngraph::op::AutoBroadcastSpec::NUMPY)
{
- const NodeVector ng_inputs{node.get_ng_inputs()};
+ const OutputVector ng_inputs{node.get_ng_inputs()};
// Templated binary operation - Creates Add, Minimum, Maximum, etc.
- const auto binary_operation =
- [&auto_broadcast](const std::shared_ptr<ngraph::Node>& arg0,
- const std::shared_ptr<ngraph::Node>& arg1) {
- return std::make_shared<T>(arg0, arg1, auto_broadcast);
- };
+ const auto binary_operation = [&auto_broadcast](const Output<ngraph::Node>& arg0,
+ const Output<ngraph::Node>& arg1) {
+ return std::make_shared<T>(arg0, arg1, auto_broadcast);
+ };
// Create a result node as a series of binary operations
const auto result = std::accumulate(
NodeVector node_vector;
for (auto& value : values)
{
- node_vector.push_back(value.as_single_output_node());
+ node_vector.emplace_back(value.get_node_shared_ptr());
}
return node_vector;
}
///
/// \return A vector of nodes comprising the sub-graph. The order of output
/// tensors must match the match output tensors of the FusedOp
- virtual NodeVector decompose_op() const { return NodeVector(); }
+ virtual OutputVector decompose_op() const { return OutputVector(); }
/// Returns the NodeTypeInfo for the node's class.
/// During transition to type_info, returns a dummy type_info for Node if the class
/// has not been updated yet.
constructor_validate_and_infer_types();
}
-NodeVector op::v1::BatchToSpace::decompose_op() const
+OutputVector op::v1::BatchToSpace::decompose_op() const
{
auto data = input_value(0);
auto block = input_value(1);
vector<int64_t> end_mask(data_shape.size(), 0);
flat_node = make_shared<op::v1::StridedSlice>(
flat_node, crops_begin_const, upperbounds, begin_mask, end_mask);
- return NodeVector{flat_node};
+ return OutputVector{flat_node};
}
void ngraph::op::v1::BatchToSpace::pre_validate_and_infer_types()
const Output<Node>& crops_begin,
const Output<Node>& crops_end);
- NodeVector decompose_op() const override;
+ OutputVector decompose_op() const override;
void pre_validate_and_infer_types() override;
std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
set_output_type(0, get_input_element_type(0), get_input_partial_shape(0));
}
-NodeVector op::Clamp::decompose_op() const
+OutputVector op::Clamp::decompose_op() const
{
const auto data = input_value(0);
const auto type = data.get_element_type();
void pre_validate_and_infer_types() override;
- virtual NodeVector decompose_op() const override;
+ virtual OutputVector decompose_op() const override;
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
return true;
}
-NodeVector op::DepthToSpace::decompose_op() const
+OutputVector op::DepthToSpace::decompose_op() const
{
auto data = input_value(0);
auto data_shape = data.get_shape();
}
flat_node = builder::opset1::reshape(flat_node, squeezed_shape);
- return NodeVector{flat_node};
+ return OutputVector{flat_node};
}
shared_ptr<Node> op::DepthToSpace::clone_with_new_inputs(const OutputVector& new_args) const
std::size_t get_block_size() const { return m_blocksize; }
DepthToSpaceMode get_mode() const { return m_mode; }
- virtual NodeVector decompose_op() const override;
+ virtual OutputVector decompose_op() const override;
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
return true;
}
-NodeVector op::FakeQuantize::decompose_op() const
+OutputVector op::FakeQuantize::decompose_op() const
{
Output<Node> data{input_value(0)};
Output<Node> input_low{input_value(1)};
AutoBroadcastSpec(AutoBroadcastType::NUMPY));
bool visit_attributes(AttributeVisitor& visitor) override;
- virtual NodeVector decompose_op() const override;
+ virtual OutputVector decompose_op() const override;
virtual void validate_and_infer_types() override;
virtual std::shared_ptr<Node>
}
// f(x) = 0.5 * x * (1.0 + erf( x / sqrt(2.0) )
-NodeVector op::Gelu::decompose_op() const
+OutputVector op::Gelu::decompose_op() const
{
auto data = input_value(0);
Gelu(const Output<Node>& data);
bool visit_attributes(AttributeVisitor& visitor) override;
- virtual NodeVector decompose_op() const override;
+ virtual OutputVector decompose_op() const override;
void pre_validate_and_infer_types() override;
}
}
-NodeVector op::GRN::decompose_op() const
+OutputVector op::GRN::decompose_op() const
{
Output<Node> data{input_value(0)};
const Shape& input_shape{data.get_shape()};
data = builder::reshape(data, input_shape);
}
- return as_node_vector({data});
+ return OutputVector{data};
}
shared_ptr<Node> op::GRN::clone_with_new_inputs(const OutputVector& new_args) const
bool visit_attributes(AttributeVisitor& visitor) override;
float get_bias() const { return m_bias; }
virtual void pre_validate_and_infer_types() override;
- virtual NodeVector decompose_op() const override;
+ virtual OutputVector decompose_op() const override;
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
".");
}
-NodeVector op::v3::GRUCell::decompose_op() const
+OutputVector op::v3::GRUCell::decompose_op() const
{
// ------ VARIABLE'S NAMES AND ACRONYM DEFINITIONS ------
// The names used below are analogous to the one used in ONNX documentation.
auto Ht_R = make_shared<op::Dot>(H_t, R_transpose);
// split to gates:
- NodeVector Xt_W_zrh = builder::split(Xt_W, 3, 1);
- NodeVector R_zrh = builder::split(R_transpose, 3, 1);
- NodeVector Ht_R_zrh = builder::split(Ht_R, 3, 1);
- NodeVector biases_zrh = m_linear_before_reset ? builder::split(B, 4) : builder::split(B, 3);
+ OutputVector Xt_W_zrh = builder::split(Xt_W, 3, 1);
+ OutputVector R_zrh = builder::split(R_transpose, 3, 1);
+ OutputVector Ht_R_zrh = builder::split(Ht_R, 3, 1);
+ OutputVector biases_zrh = m_linear_before_reset ? builder::split(B, 4) : builder::split(B, 3);
// zt = f(Xt*(Wz^T) + Ht-1*(Rz^T) + Wbz + Rbz)
auto z_t = m_activation_f(clip(add(Xt_W_zrh[0], add(Ht_R_zrh[0], biases_zrh[0]))));
bool visit_attributes(AttributeVisitor& visitor) override;
virtual void pre_validate_and_infer_types() override;
- virtual NodeVector decompose_op() const override;
+ virtual OutputVector decompose_op() const override;
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
"The element types of both alpha and beta inputs must match the data input type.");
}
-NodeVector op::HardSigmoid::decompose_op() const
+OutputVector op::HardSigmoid::decompose_op() const
{
const auto data = input_value(0);
bool visit_attributes(AttributeVisitor& visitor) override;
virtual void pre_validate_and_infer_types() override;
- virtual NodeVector decompose_op() const override;
+ virtual OutputVector decompose_op() const override;
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
};
".");
}
-NodeVector op::LSTMCell::decompose_op() const
+OutputVector op::LSTMCell::decompose_op() const
{
// ------ VARIABLE'S NAMES AND ACRONYM DEFINITIONS ------
// The names used below are analogous to the one used in ONNX documentation.
Output<Node> W = input_value(3);
Output<Node> R = input_value(4);
Output<Node> bias = input_value(5);
- NodeVector p_iof = builder::split(input_value(6), s_peepholes_count);
+ OutputVector p_iof = builder::split(input_value(6), s_peepholes_count);
// Converting to IFCO format since it's DNNL default.
if (m_weights_format != op::LSTMWeightsFormat::IFCO)
// Xt*(W^T) + Ht-1*(R^T) + Wb + Rb -- for [iofc] gates.
auto gates = add(Xt_W, add(Ht_R, bias));
- NodeVector split_gates = builder::split(gates, 4, -1);
+ OutputVector split_gates = builder::split(gates, 4, -1);
auto i_t = split_gates.at(0);
auto f_t = split_gates.at(1);
auto c_t = split_gates.at(2);
if (m_input_forget)
{
// Couple input with forget gate: 1 - i_t
- f_t = sub(op::Constant::create(i_t->get_element_type(),
- i_t->get_shape(),
- vector<float>(shape_size(i_t->get_shape()), 1.f)),
+ f_t = sub(op::Constant::create(i_t.get_element_type(),
+ i_t.get_shape(),
+ vector<float>(shape_size(i_t.get_shape()), 1.f)),
i_t);
}
else
{op::LSTMWeightsFormat::IOFC, {0, 2, 3, 1}},
};
- NodeVector splitted_node = builder::split(node, s_gates_count);
- NodeVector nodes_in_new_format;
+ OutputVector splitted_node = builder::split(node, s_gates_count);
+ OutputVector nodes_in_new_format;
nodes_in_new_format.reserve(s_gates_count);
for (const auto& axis : gate_order_conversion_map.at(m_weights_format))
{
bool visit_attributes(AttributeVisitor& visitor) override;
virtual void pre_validate_and_infer_types() override;
- virtual NodeVector decompose_op() const override;
+ virtual OutputVector decompose_op() const override;
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
visitor.on_attribute("weights_format", m_weights_format);
return true;
}
-NodeVector op::v0::LSTMSequence::decompose_op() const
+OutputVector op::v0::LSTMSequence::decompose_op() const
{
- NodeVector results;
+ OutputVector results;
if (m_direction == direction::FORWARD || m_direction == direction::REVERSE)
{
results = lstm_pass(m_direction == direction::REVERSE);
}
if (m_direction == direction::BIDIRECTIONAL)
{
- NodeVector fwd_results{lstm_pass()};
- NodeVector rev_results{lstm_pass(true)};
+ OutputVector fwd_results{lstm_pass()};
+ OutputVector rev_results{lstm_pass(true)};
// Stack together respective outputs from both forward and reverse passess.
shared_ptr<Node> Y{
- make_shared<opset1::Concat>(NodeVector{fwd_results.at(0), rev_results.at(0)}, 1)};
+ make_shared<opset1::Concat>(OutputVector{fwd_results.at(0), rev_results.at(0)}, 1)};
shared_ptr<Node> Y_h{
- make_shared<opset1::Concat>(NodeVector{fwd_results.at(1), rev_results.at(1)}, 1)};
+ make_shared<opset1::Concat>(OutputVector{fwd_results.at(1), rev_results.at(1)}, 1)};
shared_ptr<Node> Y_c{
- make_shared<opset1::Concat>(NodeVector{fwd_results.at(2), rev_results.at(2)}, 1)};
- results = NodeVector{Y, Y_h, Y_c};
+ make_shared<opset1::Concat>(OutputVector{fwd_results.at(2), rev_results.at(2)}, 1)};
+ results = OutputVector{Y, Y_h, Y_c};
}
return results;
}
return make_shared<opset1::Select>(mask_condition, mask_value, data);
}
-NodeVector op::v0::LSTMSequence::lstm_pass(bool is_reverse) const
+OutputVector op::v0::LSTMSequence::lstm_pass(bool is_reverse) const
{
// ------ VARIABLE'S NAMES AND ACRONYM DEFINITIONS ------
// The names used below are analogous to the one used in ONNX documentation.
X = make_shared<opset1::ReverseSequence>(X, seq_lengths, 0 /*batch_axis*/, 1 /*seq_axis*/);
}
- NodeVector in_seqs = builder::opset1::split(X, X->get_shape().at(1), 1);
+ OutputVector in_seqs = builder::opset1::split(X, X->get_shape().at(1), 1);
for (auto& in_x : in_seqs)
{
size_t num_direction_axis) const
{
// In bidirectional mode inputs are stacked together, so we must split them.
- shared_ptr<Node> tmp = node.get_node_shared_ptr();
+ Output<Node> tmp = node;
if (m_direction == direction::BIDIRECTIONAL)
{
tmp = builder::opset1::split(node, 2, num_direction_axis).at(is_reverse ? 1 : 0);
}
bool visit_attributes(AttributeVisitor& visitor) override;
- virtual NodeVector decompose_op() const override;
+ virtual OutputVector decompose_op() const override;
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
std::size_t batch_axis = 0,
const Output<Node>& default_value = Output<Node>()) const;
- NodeVector lstm_pass(bool is_reverse = false) const;
+ OutputVector lstm_pass(bool is_reverse = false) const;
// Split(bi-directional) and squeeze input data to remove 'num_direction' dimension.
std::shared_ptr<Node> prepare_input(Output<Node> node,
}
}
-NodeVector op::MatMul::decompose_op() const
+OutputVector op::MatMul::decompose_op() const
{
auto A = input_value(0);
auto B = input_value(1);
bool visit_attributes(AttributeVisitor& visitor) override;
virtual void pre_validate_and_infer_types() override;
- virtual NodeVector decompose_op() const override;
+ virtual OutputVector decompose_op() const override;
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
return true;
}
-NodeVector op::v1::Mod::decompose_op() const
+OutputVector op::v1::Mod::decompose_op() const
{
const auto dividend = make_shared<op::Abs>(input_value(0));
const auto dividend_sign = make_shared<op::Sign>(input_value(0));
const AutoBroadcastSpec& auto_broadcast = AutoBroadcastType::NUMPY);
bool visit_attributes(AttributeVisitor& visitor) override;
- virtual NodeVector decompose_op() const override;
+ virtual OutputVector decompose_op() const override;
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
set_output_type(0, get_input_element_type(0), get_input_partial_shape(0));
}
-NodeVector op::MVN::decompose_op() const
+OutputVector op::MVN::decompose_op() const
{
auto data = input_value(0);
auto data_shape = data.get_shape(); // assume that data has n and c channels.
variance = variance + eps_node;
variance = std::make_shared<op::Broadcast>(variance, data_shape, m_reduction_axes);
- return as_node_vector({mean_normalization / variance});
+ return OutputVector{mean_normalization / variance};
}
}
bool normalize_variance = true,
double eps = 1e-9);
- virtual NodeVector decompose_op() const override;
+ virtual OutputVector decompose_op() const override;
virtual void validate_and_infer_types() override;
return axes;
}
-NodeVector op::NormalizeL2::decompose_op() const
+OutputVector op::NormalizeL2::decompose_op() const
{
Output<Node> data{input_value(0)};
const Shape input_shape{data.get_shape()};
data = make_shared<op::Divide>(data, norm, AutoBroadcastSpec(AutoBroadcastType::NUMPY));
- return as_node_vector({data});
+ return OutputVector{data};
}
shared_ptr<Node> op::NormalizeL2::clone_with_new_inputs(const OutputVector& new_args) const
bool visit_attributes(AttributeVisitor& visitor) override;
float get_eps() const { return m_eps; }
EpsMode get_eps_mode() const { return m_eps_mode; }
- virtual NodeVector decompose_op() const override;
+ virtual OutputVector decompose_op() const override;
virtual void pre_validate_and_infer_types() override;
AxisSet get_reduction_axes() const;
set_output_type(0, get_input_element_type(0), get_input_partial_shape(0));
}
-NodeVector op::PRelu::decompose_op() const
+OutputVector op::PRelu::decompose_op() const
{
auto data = input_value(0);
auto data_shape = data.get_shape();
PRelu(const Output<Node>& data, const Output<Node>& slope);
bool visit_attributes(AttributeVisitor& visitor) override;
- virtual NodeVector decompose_op() const override;
+ virtual OutputVector decompose_op() const override;
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
".");
}
-NodeVector op::RNNCell::decompose_op() const
+OutputVector op::RNNCell::decompose_op() const
{
// ------ VARIABLE'S NAMES AND ACRONYM DEFINITIONS ------
// The names used below are analogous to the one used in ONNX documentation.
bool visit_attributes(AttributeVisitor& visitor) override;
virtual void pre_validate_and_infer_types() override;
- virtual NodeVector decompose_op() const override;
+ virtual OutputVector decompose_op() const override;
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
return true;
}
-NodeVector op::v0::Selu::decompose_op() const
+OutputVector op::v0::Selu::decompose_op() const
{
const auto data = input_value(0);
const auto alpha = input_value(1);
const Output<Node>& lambda);
bool visit_attributes(AttributeVisitor& visitor) override;
- virtual NodeVector decompose_op() const override;
+ virtual OutputVector decompose_op() const override;
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
}
}
-NodeVector op::ShuffleChannels::decompose_op() const
+OutputVector op::ShuffleChannels::decompose_op() const
{
const auto data = input_value(0);
const auto& data_shape = data.get_shape();
virtual void pre_validate_and_infer_types() override;
- virtual NodeVector decompose_op() const override;
+ virtual OutputVector decompose_op() const override;
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
constructor_validate_and_infer_types();
}
-NodeVector op::v1::SpaceToBatch::decompose_op() const
+OutputVector op::v1::SpaceToBatch::decompose_op() const
{
auto data = input_value(0);
auto block = input_value(1);
}
flat_node = builder::opset1::reshape(flat_node, squeezed_shape);
- return NodeVector{flat_node};
+ return OutputVector{flat_node};
}
void ngraph::op::v1::SpaceToBatch::pre_validate_and_infer_types()
const Output<ngraph::Node>& pads_begin,
const Output<ngraph::Node>& pads_end);
- NodeVector decompose_op() const override;
+ OutputVector decompose_op() const override;
void pre_validate_and_infer_types() override;
std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
return true;
}
-NodeVector op::SpaceToDepth::decompose_op() const
+OutputVector op::SpaceToDepth::decompose_op() const
{
auto data = input_value(0);
auto data_shape = data.get_shape();
squeezed_shape.insert(squeezed_shape.begin() + 1, c_dim * std::pow(m_blocksize, spatial_dims));
flat_node = builder::opset1::reshape(flat_node, squeezed_shape);
- return NodeVector{flat_node};
+ return OutputVector{flat_node};
}
shared_ptr<Node> op::SpaceToDepth::clone_with_new_inputs(const OutputVector& new_args) const
bool visit_attributes(AttributeVisitor& visitor) override;
std::size_t get_block_size() const { return m_blocksize; }
SpaceToDepthMode get_mode() const { return m_mode; }
- virtual NodeVector decompose_op() const override;
+ virtual OutputVector decompose_op() const override;
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
return true;
}
-NodeVector op::SquaredDifference::decompose_op() const
+OutputVector op::SquaredDifference::decompose_op() const
{
const auto x1 = input_value(0);
const auto x2 = input_value(1);
const AutoBroadcastSpec& auto_broadcast = AutoBroadcastType::NUMPY);
bool visit_attributes(AttributeVisitor& visitor) override;
- virtual NodeVector decompose_op() const override;
+ virtual OutputVector decompose_op() const override;
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
return true;
}
-NodeVector op::Squeeze::decompose_op() const
+OutputVector op::Squeeze::decompose_op() const
{
NODE_VALIDATION_CHECK(
this,
Squeeze(const Output<Node>& data, const Output<Node>& axes);
bool visit_attributes(AttributeVisitor& visitor) override;
- virtual NodeVector decompose_op() const override;
+ virtual OutputVector decompose_op() const override;
virtual void pre_validate_and_infer_types() override;
bool evaluate(const HostTensorVector& outputs,
const HostTensorVector& inputs) override;
}
}
-NodeVector op::Stack::decompose_op() const
+OutputVector op::Stack::decompose_op() const
{
auto axis = get_axis();
std::vector<std::shared_ptr<ngraph::Node>> args;
virtual void pre_validate_and_infer_types() override;
- virtual NodeVector decompose_op() const override;
+ virtual OutputVector decompose_op() const override;
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
set_output_type(0, get_input_element_type(0), PartialShape{output_shape});
}
-NodeVector op::Unsqueeze::decompose_op() const
+OutputVector op::Unsqueeze::decompose_op() const
{
NODE_VALIDATION_CHECK(
this,
Unsqueeze(const Output<Node>& data, const Output<Node>& axes);
virtual void pre_validate_and_infer_types() override;
- virtual NodeVector decompose_op() const override;
+ virtual OutputVector decompose_op() const override;
bool visit_attributes(AttributeVisitor& visitor) override;
bool evaluate(const HostTensorVector& outputs,
set_output_type(0, result_et, output_pshape);
}
-NodeVector op::v1::GroupConvolutionBackpropData::decompose_op() const
+OutputVector op::v1::GroupConvolutionBackpropData::decompose_op() const
{
auto data = input_value(0);
auto filters = input_value(1);
auto groups = filters.get_shape()[0];
// slice data
- auto sliced_data = builder::split(data, groups, 1);
+ OutputVector sliced_data = builder::split(data, groups, 1);
// slice filters
- auto sliced_filters = builder::split(filters, groups, 0);
+ OutputVector sliced_filters = builder::split(filters, groups, 0);
// We have to squeeze first empty dimension (groups).
- std::transform(std::begin(sliced_filters),
- std::end(sliced_filters),
- std::begin(sliced_filters),
- [](const std::shared_ptr<Node>& n) -> std::shared_ptr<Node> {
- return builder::opset1::squeeze(n);
- });
+ std::transform(
+ std::begin(sliced_filters),
+ std::end(sliced_filters),
+ std::begin(sliced_filters),
+ [](const Output<Node>& n) -> Output<Node> { return builder::opset1::squeeze(n); });
for (auto i = 0; i < groups; ++i)
{
}
}
-NodeVector op::v0::GroupConvolution::decompose_op() const
+OutputVector op::v0::GroupConvolution::decompose_op() const
{
auto data = input_value(0);
auto filters = input_value(1);
// Remove group dimmension after slicing
sliced_filter = make_shared<op::Reshape>(
sliced_filters[group],
- get_default_order(sliced_filters[group]->get_shape().size()),
+ get_default_order(sliced_filters[group].get_shape().size()),
Shape(std::next(std::begin(filters_shape), 1), std::end(filters_shape)));
}
convolution_nodes.push_back(
get_groups());
}
-NodeVector op::v0::GroupConvolutionBackpropData::decompose_op() const
+OutputVector op::v0::GroupConvolutionBackpropData::decompose_op() const
{
auto filters = input_value(1);
auto output_delta = input_value(2);
bool visit_attributes(AttributeVisitor& visitor) override;
virtual bool is_dynamic() const override;
- virtual NodeVector decompose_op() const override;
+ virtual OutputVector decompose_op() const override;
virtual void pre_validate_and_infer_types() override;
virtual std::shared_ptr<Node>
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
- virtual NodeVector decompose_op() const override;
+ virtual OutputVector decompose_op() const override;
virtual void pre_validate_and_infer_types() override;
virtual void post_validate_and_infer_types() override;
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
- virtual NodeVector decompose_op() const override;
+ virtual OutputVector decompose_op() const override;
virtual void pre_validate_and_infer_types() override;
set_input_is_relevant_to_shape(0);
}
-NodeVector op::v0::Split::decompose_op() const
+OutputVector op::v0::Split::decompose_op() const
{
return builder::split(input_value(0), m_splits, m_axis);
}
void pre_validate_and_infer_types() override;
- virtual NodeVector decompose_op() const override;
+ virtual OutputVector decompose_op() const override;
virtual std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
return Output<Node>(shared_from_this(), output_index);
}
-NodeVector op::v0::TensorIterator::decompose_op() const
+OutputVector op::v0::TensorIterator::decompose_op() const
{
// Stub
- return NodeVector{};
+ return OutputVector{};
}
void op::v0::TensorIterator::revalidate_and_infer_types_for_body_ops()
std::shared_ptr<Node>
clone_with_new_inputs(const OutputVector& new_args) const override;
- NodeVector decompose_op() const override;
+ OutputVector decompose_op() const override;
/// \return the body of the iteration
std::shared_ptr<BodyLambda> get_body() const { return m_body; }
/// \param body set the body of the iteration
NodeVector nodes;
for (auto& val : input_values())
nodes.emplace_back(val.get_node_shared_ptr());
- auto subgraph = extract_subgraph(subgraph_outputs, nodes);
+ auto subgraph = extract_subgraph(ngraph::as_node_vector(subgraph_outputs), nodes);
validate_nodes_and_infer_types(subgraph);
size_t i = 0;
- for (auto output_node : subgraph_outputs)
+ for (auto output : subgraph_outputs)
{
- for (size_t j = 0; j < output_node->get_output_size(); j++, i++)
+ if (i >= get_output_size())
{
- if (i >= get_output_size())
- {
- set_output_size(i + 1);
- }
- set_output_type(
- i, output_node->get_output_element_type(j), output_node->get_output_shape(j));
+ set_output_size(i + 1);
}
+ set_output_type(i, output.get_element_type(), output.get_shape());
+ i++;
}
post_validate_and_infer_types();
int index = 0;
for (auto& output : split->outputs())
{
- output.replace(slices[index++]->output(0));
+ output.replace(slices[index++]);
}
split->outputs().clear();
- for (auto& slice : slices)
+ for (auto& slice : as_node_vector(slices))
{
auto const_data = std::dynamic_pointer_cast<op::Constant>(
slice->input_value(0).get_node_shared_ptr());
{
for (auto& input : variadic_split->output(i).get_target_inputs())
{
- input.replace_source_output((slices[i]->output(0)));
+ input.replace_source_output(slices[i]);
}
}
variadic_split->outputs().clear();
- for (auto& slice : slices)
+ for (auto& slice : as_node_vector(slices))
{
auto const_data = std::dynamic_pointer_cast<op::Constant>(
slice->input_value(0).get_node_shared_ptr());
NGRAPH_TEST(${BACKEND_NAME}, onnx_model_override_op)
{
onnx_import::register_operator(
- "FalseAdd", 1, "", [](const onnx_import::Node& node) -> NodeVector {
- NodeVector ng_inputs{node.get_ng_inputs()};
+ "FalseAdd", 1, "", [](const onnx_import::Node& node) -> OutputVector {
+ OutputVector ng_inputs{node.get_ng_inputs()};
return {std::make_shared<ngraph::op::Add>(ng_inputs.at(0), ng_inputs.at(1))};
});
onnx_import::register_operator(
- "FalseAdd", 1, "", [](const onnx_import::Node& node) -> NodeVector {
- NodeVector ng_inputs{node.get_ng_inputs()};
+ "FalseAdd", 1, "", [](const onnx_import::Node& node) -> OutputVector {
+ OutputVector ng_inputs{node.get_ng_inputs()};
return {std::make_shared<ngraph::op::Subtract>(ng_inputs.at(0), ng_inputs.at(1))};
});
NGRAPH_TEST(${BACKEND_NAME}, onnx_model_custom_op)
{
onnx_import::register_operator(
- "AddQ", 1, "com.intel.ai", [](const onnx_import::Node& node) -> NodeVector {
- NodeVector ng_inputs{node.get_ng_inputs()};
+ "AddQ", 1, "com.intel.ai", [](const onnx_import::Node& node) -> OutputVector {
+ OutputVector ng_inputs{node.get_ng_inputs()};
return {std::make_shared<ngraph::op::Add>(ng_inputs.at(0), ng_inputs.at(1))};
});
NGRAPH_TEST(${BACKEND_NAME}, onnx_model_custom_op_default_domain)
{
onnx_import::register_operator(
- "AddQ", 1, "com.intel.ai", [](const onnx_import::Node& node) -> NodeVector {
- NodeVector ng_inputs{node.get_ng_inputs()};
+ "AddQ", 1, "com.intel.ai", [](const onnx_import::Node& node) -> OutputVector {
+ OutputVector ng_inputs{node.get_ng_inputs()};
return {std::make_shared<ngraph::op::Add>(ng_inputs.at(0), ng_inputs.at(1))};
});
// Registered custom operator
onnx_import::register_operator(
- "AddQ", 1, "com.intel.ai", [](const onnx_import::Node& node) -> NodeVector {
- NodeVector ng_inputs{node.get_ng_inputs()};
+ "AddQ", 1, "com.intel.ai", [](const onnx_import::Node& node) -> OutputVector {
+ OutputVector ng_inputs{node.get_ng_inputs()};
return {std::make_shared<ngraph::op::Add>(ng_inputs.at(0), ng_inputs.at(1))};
});
EXPECT_TRUE(onnx_import::is_operator_supported("AddQ", 1, "com.intel.ai"));
NGRAPH_TEST(${BACKEND_NAME}, onnx_model_missing_op_domain)
{
onnx_import::register_operator(
- "CustomAdd", 1, "custom.op", [](const onnx_import::Node& node) -> NodeVector {
- NodeVector ng_inputs{node.get_ng_inputs()};
+ "CustomAdd", 1, "custom.op", [](const onnx_import::Node& node) -> OutputVector {
+ OutputVector ng_inputs{node.get_ng_inputs()};
return {std::make_shared<ngraph::op::Add>(ng_inputs.at(0), ng_inputs.at(1))};
});
NGRAPH_TEST(${BACKEND_NAME}, onnx_model_missing_input)
{
onnx_import::register_operator(
- "TestMissingInOut", 1, "com.intel.ai", [](const onnx_import::Node& node) -> NodeVector {
- NodeVector ng_inputs{node.get_ng_inputs()};
- std::shared_ptr<ngraph::Node> A = ng_inputs.at(0);
- std::shared_ptr<ngraph::Node> B = ng_inputs.at(1);
- std::shared_ptr<ngraph::Node> C = ng_inputs.at(2);
+ "TestMissingInOut", 1, "com.intel.ai", [](const onnx_import::Node& node) -> OutputVector {
+ OutputVector ng_inputs{node.get_ng_inputs()};
+ Output<ngraph::Node> A = ng_inputs.at(0);
+ Output<ngraph::Node> B = ng_inputs.at(1);
+ Output<ngraph::Node> C = ng_inputs.at(2);
A = A * C;
if (!ngraph::op::is_null(B))
});
onnx_import::register_operator(
- "TestMissingIn", 1, "com.intel.ai", [](const onnx_import::Node& node) -> NodeVector {
- NodeVector ng_inputs{node.get_ng_inputs()};
+ "TestMissingIn", 1, "com.intel.ai", [](const onnx_import::Node& node) -> OutputVector {
+ OutputVector ng_inputs{node.get_ng_inputs()};
std::shared_ptr<ngraph::Node> result = std::make_shared<ngraph::op::Constant>(
element::f32, ngraph::Shape{2, 2}, std::vector<float>{1, 1, 1, 1});
An->add_provenance_tag("in1");
Ax->add_provenance_tag("in2");
ngraph::Shape shape_r{2, 2};
- auto QConcat = ngraph::builder::QuantizedConcatBuilder(
- ngraph::NodeVector{A}, 0, ngraph::NodeVector{An}, ngraph::NodeVector{Ax});
- auto f = make_shared<ngraph::Function>(ngraph::NodeVector{QConcat},
+ auto QConcat = ngraph::builder::QuantizedConcatBuilder({A}, 0, {An}, {Ax});
+ auto f = make_shared<ngraph::Function>(ngraph::OutputVector{QConcat},
ngraph::ParameterVector{A, An, Ax});
QConcat->add_provenance_tag("hello");
auto check_if_result = [](shared_ptr<Node> n) {
// Op supported by backend. Do not decompose
return modified;
}
- auto subgraph_outputs = node->decompose_op();
+
+ OutputVector output_vector = node->decompose_op();
+ NodeVector subgraph_outputs = as_node_vector(output_vector);
if (ngraph::get_provenance_enabled())
{
}
// Run recursively until no more fused ops
- NodeVector nodes;
- for (auto& val : node->input_values())
- nodes.emplace_back(val.get_node_shared_ptr());
- auto subgraph = extract_subgraph(subgraph_outputs, nodes);
+ auto subgraph = extract_subgraph(subgraph_outputs, as_node_vector(node->input_values()));
for (auto subgraph_node : subgraph)
{
run_on_node(subgraph_node);