* **3**: 1D tensor of type *T* with 3 or 4 elements: `[image_height, image_width, scale_height_and_width]` or `[image_height, image_width, scale_height, scale_width]`. Required.
-**Outputs**4
+**Outputs**
* **1**: tensor of type *T* and shape `[batch_size * post_nms_topn, 5]`.
dims = output.get_shape();
}
for (const auto& dim : dims) {
- if (!dim) THROW_IE_EXCEPTION << outName << " has zero dimension that is not allowable";
+ if (!dim)
+ THROW_IE_EXCEPTION << outName << " has zero dimension that is not allowable";
}
if (ptr) {
std::make_shared<Builder::NodeConverter<::ngraph::op::PadIE>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::v1::Power>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::PowerIE>>(),
- std::make_shared<Builder::NodeConverter<::ngraph::op::Proposal>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::ProposalIE>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::Relu>>(),
std::make_shared<Builder::NodeConverter<::ngraph::op::SeluIE>>(),
}
template <>
-CNNLayer::Ptr NodeConverter<ngraph::op::Proposal>::createLayer(const std::shared_ptr<ngraph::Node>& layer) const {
+CNNLayer::Ptr NodeConverter<ngraph::op::v0::Proposal>::createLayer(const std::shared_ptr<ngraph::Node>& layer) const {
THROW_IE_EXCEPTION << "Proposal operation should be converted to ProposalIE";
}
p_prob_item = outputs[1]->buffer();
auto dims0 = inputs[0]->getTensorDesc().getDims();
- size_t img_info_size = inputs[2]->getTensorDesc().getDims()[1];
+ auto img_info_dims = inputs[2]->getTensorDesc().getDims();
+ if (img_info_dims.size() != 2)
+ THROW_IE_EXCEPTION << "Size of im_info tensor for Proposal is incorrect! Size of im_info must be 2. "
+ << "Now im_info size is " << img_info_dims.size() << ".";
+
+ if (img_info_dims[1] != 3 && img_info_dims[1] != 4)
+ THROW_IE_EXCEPTION << "Shape of im_info tensor for Proposal is incorrect! "
+ << "Shape of im_info must be of [1, 3] or [1, 4]! "
+ << "Now shape of im_info is" << img_info_dims[0] << ", " << img_info_dims[1] << "].";
+
+ size_t img_info_size = img_info_dims[1];
+
// input image height & width
const float img_H = p_img_info_cpu[0];
// scale factor for height & width
const float scale_H = p_img_info_cpu[2];
- const float scale_W = img_info_size > 3 ? p_img_info_cpu[3] : scale_H;
+ const float scale_W = img_info_size == 4 ? p_img_info_cpu[3] : scale_H;
XARCH::proposal_exec(p_bottom_item, p_d_anchor_item, dims0,
{img_H, img_W, scale_H, scale_W}, anchors.data(), roi_indices.data(), p_roi_item, p_prob_item, conf);
std::make_shared<LayerCreator<ngraph::op::Range>>("Range"),
std::make_shared<LayerCreator<ngraph::op::PriorBox>>("PriorBox"),
std::make_shared<LayerCreator<ngraph::op::PriorBoxClustered>>("PriorBoxClustered"),
- std::make_shared<LayerCreator<ngraph::op::Proposal>>("Proposal"),
std::make_shared<LayerCreator<ngraph::op::v1::ReduceMax>>("ReduceMax"),
std::make_shared<LayerCreator<ngraph::op::v1::ReduceMin>>("ReduceMin"),
std::make_shared<LayerCreator<ngraph::op::v1::ReduceMean>>("ReduceMean"),
return std::make_shared<ngraph::op::PriorBoxClustered>(inputs[0], inputs[1], attr);
}
-// Proposal layer
-template <>
-std::shared_ptr<ngraph::Node> V10Parser::LayerCreator<ngraph::op::Proposal>::createLayer(
- const ngraph::OutputVector& inputs, const pugi::xml_node& node, std::istream& binStream,
- const GenericLayerParams& layerParsePrms) {
- checkParameters(inputs, layerParsePrms, 3);
- pugi::xml_node dn = node.child("data");
-
- if (dn.empty())
- THROW_IE_EXCEPTION << "Cannot read parameter for " << getType() << " layer with name: " << layerParsePrms.name;
-
- ngraph::op::ProposalAttrs attr;
- attr.base_size = GetUIntAttr(dn, "base_size");
- attr.pre_nms_topn = GetUIntAttr(dn, "pre_nms_topn");
- attr.post_nms_topn = GetUIntAttr(dn, "post_nms_topn");
- attr.nms_thresh = GetFloatAttr(dn, "nms_thresh");
- attr.feat_stride = GetUIntAttr(dn, "feat_stride");
- attr.min_size = GetUIntAttr(dn, "min_size");
- attr.ratio = getParameters<float>(dn, "ratio");
- attr.scale = getParameters<float>(dn, "scale");
- attr.clip_after_nms = (GetIntAttr(dn, "clip_after_nms", 0) != 0);
- attr.clip_before_nms = (GetIntAttr(dn, "clip_before_nms", 1) != 0);
- attr.normalize = (GetIntAttr(dn, "normalize", 0) != 0);
- attr.box_size_scale = GetFloatAttr(dn, "box_size_scale", 1.0f);
- attr.box_coordinate_scale = GetFloatAttr(dn, "box_coordinate_scale", 1.0f);
- attr.framework = GetStrAttr(dn, "framework", "");
-
- return std::make_shared<ngraph::op::Proposal>(inputs[0], inputs[1], inputs[2], attr);
-}
-
// PriorBox layer
template <>
std::shared_ptr<ngraph::Node> V10Parser::LayerCreator<ngraph::op::PriorBox>::createLayer(
// \brief Constructs a Proposal operation
//
// \param class_probs Class probability scores
- // \param class_logits Class prediction logits
+ // \param class_bbox_deltas Class prediction bbox_deltas
// \param image_shape Shape of image
// \param attrs Proposal op attributes
ProposalIE(const Output<Node>& class_probs,
- const Output<Node>& class_logits,
+ const Output<Node>& class_bbox_deltas,
const Output<Node>& image_shape,
const ProposalAttrs& attrs);
namespace pass {
class TRANSFORMATIONS_API ConvertProposalToLegacyMatcher;
+class TRANSFORMATIONS_API ConvertProposal4ToLegacyMatcher;
} // namespace pass
} // namespace ngraph
+class ngraph::pass::ConvertProposal4ToLegacyMatcher: public ngraph::pass::MatcherPass {
+public:
+ ConvertProposal4ToLegacyMatcher();
+};
+
class ngraph::pass::ConvertProposalToLegacyMatcher: public ngraph::pass::MatcherPass {
public:
ConvertProposalToLegacyMatcher();
constexpr NodeTypeInfo op::ProposalIE::type_info;
-op::ProposalIE::ProposalIE(const Output<Node>& class_probs, const Output<Node>& class_logits,
+op::ProposalIE::ProposalIE(const Output<Node>& class_probs, const Output<Node>& class_bbox_deltas,
const Output<Node>& image_shape, const ProposalAttrs& attrs)
- : Op({class_probs, class_logits, image_shape}), m_attrs(attrs) {
+ : Op({class_probs, class_bbox_deltas, image_shape}), m_attrs(attrs) {
constructor_validate_and_infer_types();
}
void op::ProposalIE::validate_and_infer_types() {
- set_input_is_relevant_to_shape(2);
-
const auto& class_probs_pshape = get_input_partial_shape(0);
- const auto& class_logits_pshape = get_input_partial_shape(1);
+ const auto& class_bbox_deltas_pshape = get_input_partial_shape(1);
const auto& image_shape_pshape = get_input_partial_shape(2);
- if (class_probs_pshape.is_static() && class_logits_pshape.is_static() && image_shape_pshape.is_static()) {
+
+ if (class_probs_pshape.is_static() && class_bbox_deltas_pshape.is_static() && image_shape_pshape.is_static()) {
const Shape class_probs_shape {class_probs_pshape.to_shape()};
- const Shape class_logits_shape {class_logits_pshape.to_shape()};
+ const Shape class_bbox_deltas_shape {class_bbox_deltas_pshape.to_shape()};
const Shape image_shape_shape {image_shape_pshape.to_shape()};
NODE_VALIDATION_CHECK(
this, class_probs_shape.size() == 4,
"Proposal layer shape class_probs input must have rank 4 (class_probs_shape: ", class_probs_shape, ").");
- NODE_VALIDATION_CHECK(this, class_logits_shape.size() == 4,
- "Proposal layer shape class_logits_shape input must have rank 4 (class_logits_shape: ",
- class_logits_shape, ").");
+ NODE_VALIDATION_CHECK(this, class_bbox_deltas_shape.size() == 4,
+ "Proposal layer shape class_bbox_deltas_shape input must have rank 4 (class_bbox_deltas_shape: ",
+ class_bbox_deltas_shape, ").");
NODE_VALIDATION_CHECK(
this, image_shape_shape.size() == 2,
auto batch_size = class_probs_shape[0];
set_output_type(0, get_input_element_type(0), Shape {batch_size * m_attrs.post_nms_topn, 5});
+ if (m_attrs.infer_probs)
+ set_output_type(1, get_input_element_type(0), Shape {batch_size * m_attrs.post_nms_topn});
} else {
set_output_type(0, get_input_element_type(0), PartialShape::dynamic());
+ if (m_attrs.infer_probs)
+ set_output_type(1, get_input_element_type(0), PartialShape::dynamic());
}
}
anchor->add_matcher<ngraph::pass::ConvertNormalizeL2WithMulToNormalizeIE>();
anchor->add_matcher<ngraph::pass::ConvertHardSigmoidToLegacyMatcher>();
anchor->add_matcher<ngraph::pass::ConvertProposalToLegacyMatcher>();
+ anchor->add_matcher<ngraph::pass::ConvertProposal4ToLegacyMatcher>();
anchor->add_matcher<ngraph::pass::ConvertTileToLegacyMatcher>();
anchor->add_matcher<ngraph::pass::ConvertLRNToLegacyMatcher>();
anchor->add_matcher<ngraph::pass::ConvertPadToLegacyMatcher>();
#include <vector>
#include <ngraph/opsets/opset1.hpp>
-
+#include <ngraph/opsets/opset4.hpp>
+#include <ngraph/pattern/op/wrap_type.hpp>
#include <ngraph_ops/proposal_ie.hpp>
#include <ngraph/rt_info.hpp>
-ngraph::pass::ConvertProposalToLegacyMatcher::ConvertProposalToLegacyMatcher() {
- auto input_0 = std::make_shared<pattern::op::Label>(element::f32, Shape{1, 1, 1, 1});
- auto input_1 = std::make_shared<pattern::op::Label>(element::f32, Shape{1, 1, 1, 1});
- auto input_2 = std::make_shared<pattern::op::Label>(element::f32, Shape{3});
+bool convert_to_proposal_ie(std::shared_ptr<ngraph::op::v0::Proposal> proposal, bool infer_probs = false) {
+ ngraph::Output<ngraph::Node> last; // 2D tensor of size [1, 3-4] with im_info will be retrieved from this node
+ ngraph::NodeVector ops_to_replace, new_ops;
+ ops_to_replace.push_back(proposal);
- ngraph::op::ProposalAttrs attr = {};
+ if (auto reshape = std::dynamic_pointer_cast<ngraph::opset1::Reshape>(proposal->input_value(2).get_node_shared_ptr())) {
+ const ngraph::PartialShape& im_info_shape = reshape->get_input_partial_shape(0);
+ if (im_info_shape != ngraph::Shape({1, 3}) && im_info_shape != ngraph::Shape({1, 4})) {
+ return false;
+ }
+ last = reshape->input_value(0);
+ ops_to_replace.push_back(reshape);
+ } else {
+ auto const_shape = ngraph::opset1::Constant::create(ngraph::element::i64, ngraph::Shape{2}, {1, -1});
+ last = std::make_shared<ngraph::opset1::Reshape>(proposal->input_value(2), const_shape, true);
+ new_ops.push_back(last.get_node_shared_ptr());
+ }
+
+ auto ie_attrs = proposal->get_attrs();
+ ie_attrs.infer_probs = infer_probs;
+ auto proposal_ie = std::make_shared<ngraph::op::ProposalIE>(proposal->input_value(0),
+ proposal->input_value(1),
+ last,
+ ie_attrs);
+ new_ops.push_back(proposal_ie);
+
+ proposal_ie->set_friendly_name(proposal->get_friendly_name());
+ ngraph::copy_runtime_info(ops_to_replace, new_ops);
+ ngraph::replace_node(proposal, proposal_ie);
+
+ return true;
+}
- auto proposal = std::make_shared<ngraph::opset1::Proposal>(input_0, input_1, input_2, attr);
+ngraph::pass::ConvertProposalToLegacyMatcher::ConvertProposalToLegacyMatcher() {
+ auto proposal = ngraph::pattern::wrap_type<ngraph::opset1::Proposal>();
- ngraph::matcher_pass_callback callback = [](pattern::Matcher& m) {
- auto proposal = std::dynamic_pointer_cast<ngraph::opset1::Proposal> (m.get_match_root());
+ ngraph::matcher_pass_callback callback = [](pattern::Matcher &m) {
+ auto proposal = std::dynamic_pointer_cast<ngraph::opset1::Proposal>(m.get_match_root());
if (!proposal) {
return false;
}
+ convert_to_proposal_ie(proposal);
+ return true;
+ };
+ auto m = std::make_shared<ngraph::pattern::Matcher>(proposal, "ConvertProposalToProposalIE");
+ this->register_matcher(m, callback);
+}
- Output<Node> last;
-
- ngraph::NodeVector ops_to_replace, new_ops;
- ops_to_replace.push_back(proposal);
+ngraph::pass::ConvertProposal4ToLegacyMatcher::ConvertProposal4ToLegacyMatcher() {
+ auto proposal = ngraph::pattern::wrap_type<ngraph::opset4::Proposal>();
- if (auto reshape = std::dynamic_pointer_cast<opset1::Reshape>(proposal->input_value(2).get_node_shared_ptr())) {
- auto input_shape = reshape->get_input_shape(0);
- if (input_shape.size() == 2) {
- last = reshape->input_value(0);
- ops_to_replace.push_back(reshape);
- }
- }
+ ngraph::matcher_pass_callback callback = [](pattern::Matcher &m) {
+ auto proposal = std::dynamic_pointer_cast<ngraph::opset4::Proposal>(m.get_match_root());
- if (!last.get_node_shared_ptr()) {
- std::vector<int64_t> dims{1, -1};
- auto const_shape = std::make_shared<ngraph::opset1::Constant>(element::i64, Shape{2}, dims);
- last = std::make_shared<ngraph::opset1::Reshape>(proposal->input_value(2), const_shape, true);
- new_ops.push_back(last.get_node_shared_ptr());
+ if (!proposal) {
+ return false;
}
-
- auto proposal_ie = std::make_shared<ngraph::op::ProposalIE> (proposal->input_value(0),
- proposal->input_value(1),
- last,
- proposal->get_attrs());
- new_ops.push_back(proposal_ie);
-
- proposal_ie->set_friendly_name(proposal->get_friendly_name());
- ngraph::copy_runtime_info(ops_to_replace, new_ops);
- ngraph::replace_node(proposal, proposal_ie);
+ convert_to_proposal_ie(proposal, true);
return true;
};
-
- auto m = std::make_shared<ngraph::pattern::Matcher>(proposal, "ConvertProposalToProposalIE");
+ auto m = std::make_shared<ngraph::pattern::Matcher>(proposal, "ConvertProposal4ToProposalIE");
this->register_matcher(m, callback);
-}
\ No newline at end of file
+}
reshape = create_op_node_with_second_input(graph, Reshape, [im_info_shape[1]], {'name': 'im_info/Reshape'})
node.in_port(2).get_connection().set_destination(reshape.in_port(0))
reshape.out_port(0).connect(node.in_port(2))
-
- if node.has_port('out', 1) and not node.out_port(1).disconnected():
- # This is the case when Proposal layer is used from extension, not from opset.
- # Setting version attribute is not recommended, this will be fixed after Proposal will be updated in IE.
- graph.node[node.id]['version'] = 'extension'
'pre_nms_topn': pre_nms_topn,
'post_nms_topn': post_nms_topn,
'nms_thresh': nms_thresh,
- 'for_deformable': 1,
}
ProposalOp.update_node_stat(node, node_attrs)
mandatory_props = {
'type': __class__.op,
'op': __class__.op,
- 'version': 'opset1',
+ 'version': 'opset4',
'post_nms_topn': 300, # default in caffe-shared
'infer': ProposalOp.proposal_infer,
'in_ports_count': 3,
'out_ports_count': 2,
- 'for_deformable': 0,
'normalize': 0,
}
super().__init__(graph, mandatory_props, attrs)
'normalize',
'clip_after_nms',
'clip_before_nms',
- 'for_deformable',
]
@staticmethod
{
AttributeAdapter<AT> adapter(value);
start_structure(name);
- on_adapter(get_name_with_context(), adapter);
+ // todo: it's workaround, remove after fixing #35906
+ if (get_name_with_context().find("the_proposal") != std::string::npos)
+ on_adapter(name, adapter);
+ else
+ on_adapter(get_name_with_context(), adapter);
finish_structure();
}
/// \returns The nested context of visits
// clip_before_nms Clip before NMs
// clip_after_nms Clip after NMs
// normalize Normalize boxes to [0,1]
- // box_size_scale Scale factor for scaling box size logits
- // box_coordinate_scale Scale factor for scaling box coordiate logits
+ // box_size_scale Scale factor for scaling box size
+ // box_coordinate_scale Scale factor for scaling box coordiate
// framework Calculation frameworkrithm to use
struct ProposalAttrs
{
size_t min_size = 1;
std::vector<float> ratio;
std::vector<float> scale;
- bool clip_before_nms = false;
+ bool clip_before_nms = true;
bool clip_after_nms = false;
bool normalize = false;
float box_size_scale = 1.0f;
float box_coordinate_scale = 1.0f;
std::string framework;
+ bool infer_probs = false;
};
namespace v0
class NGRAPH_API Proposal : public Op
{
public:
- static constexpr NodeTypeInfo type_info{"Proposal", 0};
- const NodeTypeInfo& get_type_info() const override { return type_info; }
+ NGRAPH_RTTI_DECLARATION;
Proposal() = default;
/// \brief Constructs a Proposal operation
///
/// \param class_probs Class probability scores
- /// \param class_logits Class prediction logits
+ /// \param bbox_deltas Prediction of bounding box deltas
/// \param image_shape Shape of image
/// \param attrs Proposal op attributes
Proposal(const Output<Node>& class_probs,
- const Output<Node>& class_logits,
+ const Output<Node>& bbox_deltas,
const Output<Node>& image_shape,
const ProposalAttrs& attrs);
const ProposalAttrs& get_attrs() const { return m_attrs; }
virtual bool visit_attributes(AttributeVisitor& visitor) override;
- private:
+ protected:
ProposalAttrs m_attrs;
};
}
+
+ namespace v4
+ {
+ class NGRAPH_API Proposal : public op::v0::Proposal
+ {
+ public:
+ NGRAPH_RTTI_DECLARATION;
+ Proposal() = default;
+ /// \brief Constructs a Proposal operation
+ ///
+ /// \param class_probs Class probability scores
+ /// \param bbox_deltas Prediction of bounding box deltas
+ /// \param image_shape Shape of image
+ /// \param attrs Proposal op attributes
+ Proposal(const Output<Node>& class_probs,
+ const Output<Node>& bbox_deltas,
+ const Output<Node>& image_shape,
+ const ProposalAttrs& attrs);
+
+ void validate_and_infer_types() override;
+ virtual std::shared_ptr<Node>
+ clone_with_new_inputs(const OutputVector& new_args) const override;
+ const ProposalAttrs& get_attrs() const { return m_attrs; }
+ };
+ }
+
using v0::Proposal;
}
NGRAPH_OP(Power, ngraph::op::v1)
NGRAPH_OP(PriorBox, ngraph::op::v0)
NGRAPH_OP(PriorBoxClustered, ngraph::op::v0)
-NGRAPH_OP(Proposal, ngraph::op::v0)
+NGRAPH_OP(Proposal, ngraph::op::v4)
NGRAPH_OP(Range, ngraph::op::v0)
NGRAPH_OP(Relu, ngraph::op::v0)
NGRAPH_OP(ReduceMax, ngraph::op::v1)
using namespace std;
using namespace ngraph;
-constexpr NodeTypeInfo op::Proposal::type_info;
+NGRAPH_RTTI_DEFINITION(op::v0::Proposal, "Proposal", 0);
-op::Proposal::Proposal(const Output<Node>& class_probs,
- const Output<Node>& class_logits,
- const Output<Node>& image_shape,
- const ProposalAttrs& attrs)
- : Op({class_probs, class_logits, image_shape})
+op::v0::Proposal::Proposal(const Output<Node>& class_probs,
+ const Output<Node>& bbox_deltas,
+ const Output<Node>& image_shape,
+ const ProposalAttrs& attrs)
+ : Op({class_probs, bbox_deltas, image_shape})
, m_attrs(attrs)
{
constructor_validate_and_infer_types();
}
-void op::Proposal::validate_and_infer_types()
+void op::v0::Proposal::validate_and_infer_types()
{
- set_input_is_relevant_to_shape(2);
-
const auto& class_probs_pshape = get_input_partial_shape(0);
- const auto& class_logits_pshape = get_input_partial_shape(1);
+ const auto& class_bbox_deltas_pshape = get_input_partial_shape(1);
const auto& image_shape_pshape = get_input_partial_shape(2);
- if (class_probs_pshape.is_static() && class_logits_pshape.is_static() &&
+ if (class_probs_pshape.is_static() && class_bbox_deltas_pshape.is_static() &&
image_shape_pshape.is_static())
{
const Shape class_probs_shape{class_probs_pshape.to_shape()};
- const Shape class_logits_shape{class_logits_pshape.to_shape()};
+ const Shape class_bbox_deltas_shape{class_bbox_deltas_pshape.to_shape()};
const Shape image_shape_shape{image_shape_pshape.to_shape()};
NODE_VALIDATION_CHECK(
class_probs_shape,
").");
- NODE_VALIDATION_CHECK(
- this,
- class_logits_shape.size() == 4,
- "Proposal layer shape class_logits_shape input must have rank 4 (class_logits_shape: ",
- class_logits_shape,
- ").");
+ NODE_VALIDATION_CHECK(this,
+ class_bbox_deltas_shape.size() == 4,
+ "Proposal layer shape class_bbox_deltas_shape input must have rank 4 "
+ "(class_bbox_deltas_shape: ",
+ class_bbox_deltas_shape,
+ ").");
NODE_VALIDATION_CHECK(
this,
}
}
-shared_ptr<Node> op::Proposal::clone_with_new_inputs(const OutputVector& new_args) const
+shared_ptr<Node> op::v0::Proposal::clone_with_new_inputs(const OutputVector& new_args) const
{
check_new_args_count(this, new_args);
- return make_shared<Proposal>(new_args.at(0), new_args.at(1), new_args.at(2), m_attrs);
+ return make_shared<op::v0::Proposal>(new_args.at(0), new_args.at(1), new_args.at(2), m_attrs);
}
-bool op::Proposal::visit_attributes(AttributeVisitor& visitor)
+bool op::v0::Proposal::visit_attributes(AttributeVisitor& visitor)
{
- visitor.on_attribute("attrs", m_attrs);
+ // temporary workaround, remove after #35906 is fixed
+ // visitor.on_attribute("attrs", m_attrs);
+ visitor.on_attribute("the_proposal", m_attrs);
return true;
}
visitor.on_attribute("framework", m_ref.framework);
return true;
}
+
+NGRAPH_RTTI_DEFINITION(op::v4::Proposal, "Proposal", 4);
+
+op::v4::Proposal::Proposal(const Output<Node>& class_probs,
+ const Output<Node>& class_bbox_deltas,
+ const Output<Node>& image_shape,
+ const op::ProposalAttrs& attrs)
+ : v0::Proposal(class_probs, class_bbox_deltas, image_shape, attrs)
+{
+ constructor_validate_and_infer_types();
+}
+
+void op::v4::Proposal::validate_and_infer_types()
+{
+ v0::Proposal::validate_and_infer_types();
+
+ const auto& class_probs_pshape = get_input_partial_shape(0);
+ const auto& class_bbox_deltas_pshape = get_input_partial_shape(1);
+ const auto& image_shape_pshape = get_input_partial_shape(2);
+ auto batch_size = class_probs_pshape.to_shape()[0];
+ if (class_probs_pshape.is_static() && class_bbox_deltas_pshape.is_static() &&
+ image_shape_pshape.is_static())
+ set_output_type(1, get_input_element_type(0), Shape{batch_size * m_attrs.post_nms_topn});
+ else
+ set_output_type(1, get_input_element_type(0), PartialShape::dynamic());
+}
+
+std::shared_ptr<Node> op::v4::Proposal::clone_with_new_inputs(const OutputVector& new_args) const
+{
+ check_new_args_count(this, new_args);
+ return make_shared<op::v4::Proposal>(new_args.at(0), new_args.at(1), new_args.at(2), m_attrs);
+}
@nameable_op
def proposal(
class_probs: Node,
- box_logits: Node,
+ bbox_deltas: Node,
image_shape: NodeInput,
attrs: dict,
name: Optional[str] = None,
"""Filter bounding boxes and outputs only those with the highest prediction confidence.
:param class_probs: 4D input floating point tensor with class prediction scores.
- :param box_logits: 4D input floating point tensor with box logits.
+ :param bbox_deltas: 4D input floating point tensor with box logits.
:param image_shape: The 1D input tensor with 3 or 4 elements describing image shape.
:param attrs: The dictionary containing key, value pairs for attributes.
:param name: Optional name for the output node.
:return: Node representing Proposal operation.
"""
requirements = [
- ("attrs.base_size", True, np.unsignedinteger, is_positive_value),
- ("attrs.pre_nms_topn", True, np.unsignedinteger, is_positive_value),
- ("attrs.post_nms_topn", True, np.unsignedinteger, is_positive_value),
- ("attrs.nms_thresh", True, np.floating, is_positive_value),
- ("attrs.feat_stride", True, np.unsignedinteger, is_positive_value),
- ("attrs.min_size", True, np.unsignedinteger, is_positive_value),
- ("attrs.ratio", True, np.floating, None),
- ("attrs.scale", True, np.floating, None),
- ("attrs.clip_before_nms", False, np.bool_, None),
- ("attrs.clip_after_nms", False, np.bool_, None),
- ("attrs.normalize", False, np.bool_, None),
- ("attrs.box_size_scale", False, np.floating, is_positive_value),
- ("attrs.box_coordinate_scale", False, np.floating, is_positive_value),
- ("attrs.framework", False, np.str_, None),
+ ("base_size", True, np.unsignedinteger, is_positive_value),
+ ("pre_nms_topn", True, np.unsignedinteger, is_positive_value),
+ ("post_nms_topn", True, np.unsignedinteger, is_positive_value),
+ ("nms_thresh", True, np.floating, is_positive_value),
+ ("feat_stride", True, np.unsignedinteger, is_positive_value),
+ ("min_size", True, np.unsignedinteger, is_positive_value),
+ ("ratio", True, np.floating, None),
+ ("scale", True, np.floating, None),
+ ("clip_before_nms", False, np.bool_, None),
+ ("clip_after_nms", False, np.bool_, None),
+ ("normalize", False, np.bool_, None),
+ ("box_size_scale", False, np.floating, is_positive_value),
+ ("box_coordinate_scale", False, np.floating, is_positive_value),
+ ("framework", False, np.str_, None),
]
check_valid_attributes("Proposal", attrs, requirements)
return _get_node_factory_opset1().create(
- "Proposal", [class_probs, box_logits, as_node(image_shape)], attrs
+ "Proposal", [class_probs, bbox_deltas, as_node(image_shape)], attrs
)
from ngraph.opset1.ops import prior_box
from ngraph.opset1.ops import prior_box_clustered
from ngraph.opset1.ops import psroi_pooling
-from ngraph.opset1.ops import proposal
+from ngraph.opset4.ops import proposal
from ngraph.opset1.ops import range
from ngraph.opset3.ops import read_value
from ngraph.opset1.ops import reduce_logical_and
:return: New node with arctanh operation applied on it.
"""
return _get_node_factory_opset4().create("Atanh", [node])
+
+
+@nameable_op
+def proposal(
+ class_probs: Node,
+ bbox_deltas: Node,
+ image_shape: NodeInput,
+ attrs: dict,
+ name: Optional[str] = None,
+) -> Node:
+ """Filter bounding boxes and outputs only those with the highest prediction confidence.
+
+ :param class_probs: 4D input floating point tensor with class prediction scores.
+ :param bbox_deltas: 4D input floating point tensor with corrected predictions of bounding boxes
+ :param image_shape: The 1D input tensor with 3 or 4 elements describing image shape.
+ :param attrs: The dictionary containing key, value pairs for attributes.
+ :param name: Optional name for the output node.
+ * base_size The size of the anchor to which scale and ratio attributes are applied.
+ Range of values: a positive unsigned integer number
+ Default value: None
+ Required: yes
+ * pre_nms_topn The number of bounding boxes before the NMS operation.
+ Range of values: a positive unsigned integer number
+ Default value: None
+ Required: yes
+ * post_nms_topn The number of bounding boxes after the NMS operation.
+ Range of values: a positive unsigned integer number
+ Default value: None
+ Required: yes
+ * nms_thresh The minimum value of the proposal to be taken into consideration.
+ Range of values: a positive floating-point number
+ Default value: None
+ Required: yes
+ * feat_stride The step size to slide over boxes (in pixels).
+ Range of values: a positive unsigned integer
+ Default value: None
+ Required: yes
+ * min_size The minimum size of box to be taken into consideration.
+ Range of values: a positive unsigned integer number
+ Default value: None
+ Required: yes
+ * ratio The ratios for anchor generation.
+ Range of values: a list of floating-point numbers
+ Default value: None
+ Required: yes
+ * scale The scales for anchor generation.
+ Range of values: a list of floating-point numbers
+ Default value: None
+ Required: yes
+ * clip_before_nms The flag that specifies whether to perform clip bounding boxes before
+ non-maximum suppression or not.
+ Range of values: True or False
+ Default value: True
+ Required: no
+ * clip_after_nms The flag that specifies whether to perform clip bounding boxes after
+ non-maximum suppression or not.
+ Range of values: True or False
+ Default value: False
+ Required: no
+ * normalize The flag that specifies whether to perform normalization of output boxes to
+ [0,1] interval or not.
+ Range of values: True or False
+ Default value: False
+ Required: no
+ * box_size_scale Specifies the scale factor applied to logits of box sizes before decoding.
+ Range of values: a positive floating-point number
+ Default value: 1.0
+ Required: no
+ * box_coordinate_scale Specifies the scale factor applied to logits of box coordinates
+ before decoding.
+ Range of values: a positive floating-point number
+ Default value: 1.0
+ Required: no
+ * framework Specifies how the box coordinates are calculated.
+ Range of values: "" (empty string) - calculate box coordinates like in Caffe*
+ tensorflow - calculate box coordinates like in the TensorFlow*
+ Object Detection API models
+ Default value: "" (empty string)
+ Required: no
+ Example of attribute dictionary:
+ .. code-block:: python
+ # just required ones
+ attrs = {
+ 'base_size': 85,
+ 'pre_nms_topn': 10,
+ 'post_nms_topn': 20,
+ 'nms_thresh': 0.34,
+ 'feat_stride': 16,
+ 'min_size': 32,
+ 'ratio': [0.1, 1.5, 2.0, 2.5],
+ 'scale': [2, 3, 3, 4],
+ }
+ Optional attributes which are absent from dictionary will be set with corresponding default.
+ :return: Node representing Proposal operation.
+ """
+ requirements = [
+ ("base_size", True, np.unsignedinteger, is_positive_value),
+ ("pre_nms_topn", True, np.unsignedinteger, is_positive_value),
+ ("post_nms_topn", True, np.unsignedinteger, is_positive_value),
+ ("nms_thresh", True, np.floating, is_positive_value),
+ ("feat_stride", True, np.unsignedinteger, is_positive_value),
+ ("min_size", True, np.unsignedinteger, is_positive_value),
+ ("ratio", True, np.floating, None),
+ ("scale", True, np.floating, None),
+ ("clip_before_nms", False, np.bool_, None),
+ ("clip_after_nms", False, np.bool_, None),
+ ("normalize", False, np.bool_, None),
+ ("box_size_scale", False, np.floating, is_positive_value),
+ ("box_coordinate_scale", False, np.floating, is_positive_value),
+ ("framework", False, np.str_, None),
+ ]
+
+ check_valid_attributes("Proposal", attrs, requirements)
+
+ return _get_node_factory_opset4().create(
+ "Proposal", [class_probs, bbox_deltas, as_node(image_shape)], attrs
+ )
)
def test_proposal(int_dtype, fp_dtype):
attributes = {
- "attrs.base_size": int_dtype(1),
- "attrs.pre_nms_topn": int_dtype(20),
- "attrs.post_nms_topn": int_dtype(64),
- "attrs.nms_thresh": fp_dtype(0.34),
- "attrs.feat_stride": int_dtype(16),
- "attrs.min_size": int_dtype(32),
- "attrs.ratio": np.array([0.1, 1.5, 2.0, 2.5], dtype=fp_dtype),
- "attrs.scale": np.array([2, 3, 3, 4], dtype=fp_dtype),
+ "base_size": int_dtype(1),
+ "pre_nms_topn": int_dtype(20),
+ "post_nms_topn": int_dtype(64),
+ "nms_thresh": fp_dtype(0.34),
+ "feat_stride": int_dtype(16),
+ "min_size": int_dtype(32),
+ "ratio": np.array([0.1, 1.5, 2.0, 2.5], dtype=fp_dtype),
+ "scale": np.array([2, 3, 3, 4], dtype=fp_dtype),
}
batch_size = 7
class_probs = ng.parameter([batch_size, 12, 34, 62], fp_dtype, "class_probs")
- class_logits = ng.parameter([batch_size, 24, 34, 62], fp_dtype, "class_logits")
+ bbox_deltas = ng.parameter([batch_size, 24, 34, 62], fp_dtype, "bbox_deltas")
image_shape = ng.parameter([3], fp_dtype, "image_shape")
- node = ng.proposal(class_probs, class_logits, image_shape, attributes)
+ node = ng.proposal(class_probs, bbox_deltas, image_shape, attributes)
assert node.get_type_name() == "Proposal"
- assert node.get_output_size() == 1
- assert list(node.get_output_shape(0)) == [batch_size * attributes["attrs.post_nms_topn"], 5]
+ assert node.get_output_size() == 2
+ assert list(node.get_output_shape(0)) == [batch_size * attributes["post_nms_topn"], 5]
def test_tensor_iterator():
@pytest.fixture()
def _proposal_node():
attributes = {
- "attrs.base_size": np.uint16(1),
- "attrs.pre_nms_topn": np.uint16(20),
- "attrs.post_nms_topn": np.uint16(64),
- "attrs.nms_thresh": np.float64(0.34),
- "attrs.feat_stride": np.uint16(16),
- "attrs.min_size": np.uint16(32),
- "attrs.ratio": np.array([0.1, 1.5, 2.0, 2.5], dtype=np.float64),
- "attrs.scale": np.array([2, 3, 3, 4], dtype=np.float64),
+ "base_size": np.uint16(1),
+ "pre_nms_topn": np.uint16(20),
+ "post_nms_topn": np.uint16(64),
+ "nms_thresh": np.float64(0.34),
+ "feat_stride": np.uint16(16),
+ "min_size": np.uint16(32),
+ "ratio": np.array([0.1, 1.5, 2.0, 2.5], dtype=np.float64),
+ "scale": np.array([2, 3, 3, 4], dtype=np.float64),
}
batch_size = 7
class_probs = ng.parameter([batch_size, 12, 34, 62], np.float64, "class_probs")
- class_logits = ng.parameter([batch_size, 24, 34, 62], np.float64, "class_logits")
+ bbox_deltas = ng.parameter([batch_size, 24, 34, 62], np.float64, "bbox_deltas")
image_shape = ng.parameter([3], np.float64, "image_shape")
- return ng.proposal(class_probs, class_logits, image_shape, attributes)
+ return ng.proposal(class_probs, bbox_deltas, image_shape, attributes)
def test_dynamic_attributes_softmax():
)
def test_dynamic_set_attribute_value(int_dtype, fp_dtype):
attributes = {
- "attrs.base_size": int_dtype(1),
- "attrs.pre_nms_topn": int_dtype(20),
- "attrs.post_nms_topn": int_dtype(64),
- "attrs.nms_thresh": fp_dtype(0.34),
- "attrs.feat_stride": int_dtype(16),
- "attrs.min_size": int_dtype(32),
- "attrs.ratio": np.array([0.1, 1.5, 2.0, 2.5], dtype=fp_dtype),
- "attrs.scale": np.array([2, 3, 3, 4], dtype=fp_dtype),
+ "base_size": int_dtype(1),
+ "pre_nms_topn": int_dtype(20),
+ "post_nms_topn": int_dtype(64),
+ "nms_thresh": fp_dtype(0.34),
+ "feat_stride": int_dtype(16),
+ "min_size": int_dtype(32),
+ "ratio": np.array([0.1, 1.5, 2.0, 2.5], dtype=fp_dtype),
+ "scale": np.array([2, 3, 3, 4], dtype=fp_dtype),
}
batch_size = 7
class_probs = ng.parameter([batch_size, 12, 34, 62], fp_dtype, "class_probs")
- class_logits = ng.parameter([batch_size, 24, 34, 62], fp_dtype, "class_logits")
+ bbox_deltas = ng.parameter([batch_size, 24, 34, 62], fp_dtype, "bbox_deltas")
image_shape = ng.parameter([3], fp_dtype, "image_shape")
- node = ng.proposal(class_probs, class_logits, image_shape, attributes)
+ node = ng.proposal(class_probs, bbox_deltas, image_shape, attributes)
node.set_base_size(int_dtype(15))
node.set_pre_nms_topn(int_dtype(7))
--- /dev/null
+# ******************************************************************************
+# Copyright 2017-2020 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ******************************************************************************
+import numpy as np
+import ngraph as ng
+from ngraph.impl import Shape, Type
+
+
+def test_proposal_props():
+ float_dtype = np.float32
+ batch_size = 1
+ post_nms_topn = 20
+ probs = ng.parameter(Shape([batch_size, 8, 255, 255]), dtype=float_dtype, name="probs")
+ deltas = ng.parameter(Shape([batch_size, 16, 255, 255]), dtype=float_dtype, name="bbox_deltas")
+ im_info = ng.parameter(Shape([4]), dtype=float_dtype, name="im_info")
+
+ attrs = {
+ "base_size": np.uint32(85),
+ "pre_nms_topn": np.uint32(10),
+ "post_nms_topn": np.uint32(post_nms_topn),
+ "nms_thresh": np.float32(0.34),
+ "feat_stride": np.uint32(16),
+ "min_size": np.uint32(32),
+ "ratio": np.array([0.1, 1.5, 2.0, 2.5], dtype=np.float32),
+ "scale": np.array([2, 3, 3, 4], dtype=np.float32),
+ }
+
+ node = ng.proposal(probs, deltas, im_info, attrs)
+
+ assert node.get_type_name() == "Proposal"
+ assert node.get_output_size() == 2
+
+ assert list(node.get_output_shape(0)) == [batch_size * post_nms_topn, 5]
+ assert list(node.get_output_shape(1)) == [batch_size * post_nms_topn]
+ assert node.get_output_element_type(0) == Type.f32
+ assert node.get_output_element_type(1) == Type.f32
constexpr NodeTypeInfo Oracle::type_info;
-TEST(attributes, user_op)
+// todo: temporary disabled until bug with AttributeVisitor is fixed #35906
+TEST(attributes, DISABLED_user_op)
{
FactoryRegistry<Node>::get().register_factory<Oracle>();
auto program = make_shared<op::Parameter>(element::i32, Shape{200});
using namespace std;
using namespace ngraph;
-TEST(type_prop, proposal_invalid_class_probs_rank)
+// ------------------------------ V0 ------------------------------
+
+TEST(type_prop, proposal_v0_invalid_class_probs_rank)
+{
+ op::ProposalAttrs attrs;
+ auto class_probs = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3});
+ auto class_bbox_deltas = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3, 4});
+ auto image_shape = make_shared<op::Parameter>(element::f32, Shape{3});
+
+ try
+ {
+ auto proposal =
+ make_shared<op::v0::Proposal>(class_probs, class_bbox_deltas, image_shape, attrs);
+ // Should have thrown, so fail if it didn't
+ FAIL() << "Invalid input tensor rank.";
+ }
+ catch (const NodeValidationFailure& error)
+ {
+ EXPECT_HAS_SUBSTRING(
+ error.what(), std::string("Proposal layer shape class_probs input must have rank 4"));
+ }
+ catch (...)
+ {
+ FAIL() << "Deduced type check failed for unexpected reason";
+ }
+}
+
+TEST(type_prop, proposal_v0_invalid_class_bbox_deltas_rank)
+{
+ op::ProposalAttrs attrs;
+ auto class_probs = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3, 4});
+ auto class_bbox_deltas = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3});
+ auto image_shape = make_shared<op::Parameter>(element::f32, Shape{3});
+
+ try
+ {
+ auto proposal =
+ make_shared<op::v0::Proposal>(class_probs, class_bbox_deltas, image_shape, attrs);
+ // Should have thrown, so fail if it didn't
+ FAIL() << "Invalid input tensor rank.";
+ }
+ catch (const NodeValidationFailure& error)
+ {
+ EXPECT_HAS_SUBSTRING(
+ error.what(),
+ std::string("Proposal layer shape class_bbox_deltas_shape input must have rank 4"));
+ }
+ catch (...)
+ {
+ FAIL() << "Deduced type check failed for unexpected reason";
+ }
+}
+
+TEST(type_prop, proposal_v0_invalid_image_shape_rank)
+{
+ op::ProposalAttrs attrs;
+ auto class_probs = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3, 4});
+ auto class_bbox_deltas = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3, 4});
+ auto image_shape = make_shared<op::Parameter>(element::f32, Shape{2, 1});
+
+ try
+ {
+ auto proposal =
+ make_shared<op::v0::Proposal>(class_probs, class_bbox_deltas, image_shape, attrs);
+ // Should have thrown, so fail if it didn't
+ FAIL() << "Invalid input tensor rank.";
+ }
+ catch (const NodeValidationFailure& error)
+ {
+ EXPECT_HAS_SUBSTRING(error.what(),
+ std::string("Proposal layer image_shape input must have rank 1"));
+ }
+ catch (...)
+ {
+ FAIL() << "Deduced type check failed for unexpected reason";
+ }
+}
+
+TEST(type_prop, proposal_v0_invalid_image_shape_size)
+{
+ op::ProposalAttrs attrs;
+ auto class_probs = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3, 4});
+ auto class_bbox_deltas = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3, 4});
+ auto image_shape = make_shared<op::Parameter>(element::f32, Shape{5});
+
+ try
+ {
+ auto proposal =
+ make_shared<op::v0::Proposal>(class_probs, class_bbox_deltas, image_shape, attrs);
+ // Should have thrown, so fail if it didn't
+ FAIL() << "Invalid input tensor rank.";
+ }
+ catch (const NodeValidationFailure& error)
+ {
+ EXPECT_HAS_SUBSTRING(
+ error.what(),
+ std::string(
+ "Image_shape 1D tensor must have => 3 and <= 4 elements (image_shape_shape[0]"));
+ }
+ catch (...)
+ {
+ FAIL() << "Deduced type check failed for unexpected reason";
+ }
+}
+
+TEST(type_prop, proposal_v0_shape_infer)
+{
+ op::ProposalAttrs attrs;
+ attrs.base_size = 1;
+ attrs.pre_nms_topn = 20;
+ attrs.post_nms_topn = 200;
+ const size_t batch_size = 7;
+
+ auto class_probs = make_shared<op::Parameter>(element::f32, Shape{batch_size, 12, 34, 62});
+ auto class_bbox_deltas =
+ make_shared<op::Parameter>(element::f32, Shape{batch_size, 24, 34, 62});
+ auto image_shape = make_shared<op::Parameter>(element::f32, Shape{3});
+ auto op = make_shared<op::v0::Proposal>(class_probs, class_bbox_deltas, image_shape, attrs);
+ ASSERT_EQ(op->get_output_shape(0), (Shape{batch_size * attrs.post_nms_topn, 5}));
+}
+
+// ------------------------------ V4 ------------------------------
+
+TEST(type_prop, proposal_v4_invalid_class_probs_rank)
{
op::ProposalAttrs attrs;
auto class_probs = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3});
- auto class_logits = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3, 4});
+ auto class_bbox_deltas = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3, 4});
auto image_shape = make_shared<op::Parameter>(element::f32, Shape{3});
try
{
- auto proposal = make_shared<op::Proposal>(class_probs, class_logits, image_shape, attrs);
+ auto proposal =
+ make_shared<op::v4::Proposal>(class_probs, class_bbox_deltas, image_shape, attrs);
// Should have thrown, so fail if it didn't
FAIL() << "Invalid input tensor rank.";
}
}
}
-TEST(type_prop, proposal_invalid_class_logits_rank)
+TEST(type_prop, proposal_v4_invalid_class_bbox_deltas_rank)
{
op::ProposalAttrs attrs;
auto class_probs = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3, 4});
- auto class_logits = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3});
+ auto class_bbox_deltas = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3});
auto image_shape = make_shared<op::Parameter>(element::f32, Shape{3});
try
{
- auto proposal = make_shared<op::Proposal>(class_probs, class_logits, image_shape, attrs);
+ auto proposal =
+ make_shared<op::v4::Proposal>(class_probs, class_bbox_deltas, image_shape, attrs);
// Should have thrown, so fail if it didn't
FAIL() << "Invalid input tensor rank.";
}
{
EXPECT_HAS_SUBSTRING(
error.what(),
- std::string("Proposal layer shape class_logits_shape input must have rank 4"));
+ std::string("Proposal layer shape class_bbox_deltas_shape input must have rank 4"));
}
catch (...)
{
}
}
-TEST(type_prop, proposal_invalid_image_shape_rank)
+TEST(type_prop, proposal_v4_invalid_image_shape_rank)
{
op::ProposalAttrs attrs;
auto class_probs = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3, 4});
- auto class_logits = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3, 4});
+ auto class_bbox_deltas = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3, 4});
auto image_shape = make_shared<op::Parameter>(element::f32, Shape{2, 1});
try
{
- auto proposal = make_shared<op::Proposal>(class_probs, class_logits, image_shape, attrs);
+ auto proposal =
+ make_shared<op::v4::Proposal>(class_probs, class_bbox_deltas, image_shape, attrs);
// Should have thrown, so fail if it didn't
FAIL() << "Invalid input tensor rank.";
}
}
}
-TEST(type_prop, proposal_invalid_image_shape_size)
+TEST(type_prop, proposal_v4_invalid_image_shape_size)
{
op::ProposalAttrs attrs;
auto class_probs = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3, 4});
- auto class_logits = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3, 4});
+ auto class_bbox_deltas = make_shared<op::Parameter>(element::f32, Shape{1, 2, 3, 4});
auto image_shape = make_shared<op::Parameter>(element::f32, Shape{5});
try
{
- auto proposal = make_shared<op::Proposal>(class_probs, class_logits, image_shape, attrs);
+ auto proposal =
+ make_shared<op::v4::Proposal>(class_probs, class_bbox_deltas, image_shape, attrs);
// Should have thrown, so fail if it didn't
FAIL() << "Invalid input tensor rank.";
}
FAIL() << "Deduced type check failed for unexpected reason";
}
}
+
+TEST(type_prop, proposal_v4_shape_infer)
+{
+ op::ProposalAttrs attrs;
+ attrs.base_size = 1;
+ attrs.pre_nms_topn = 20;
+ attrs.post_nms_topn = 200;
+ const size_t batch_size = 7;
+
+ auto class_probs = make_shared<op::Parameter>(element::f32, Shape{batch_size, 12, 34, 62});
+ auto class_bbox_deltas =
+ make_shared<op::Parameter>(element::f32, Shape{batch_size, 24, 34, 62});
+ auto image_shape = make_shared<op::Parameter>(element::f32, Shape{3});
+ auto op = make_shared<op::v4::Proposal>(class_probs, class_bbox_deltas, image_shape, attrs);
+ ASSERT_EQ(op->get_output_shape(0), (Shape{batch_size * attrs.post_nms_topn, 5}));
+ ASSERT_EQ(op->get_output_shape(1), (Shape{batch_size * attrs.post_nms_topn}));
+}
#include "ngraph/op/interpolate.hpp"
#include "ngraph/op/prior_box.hpp"
#include "ngraph/op/prior_box_clustered.hpp"
-#include "ngraph/op/proposal.hpp"
#include "ngraph/op/psroi_pooling.hpp"
#include "ngraph/op/region_yolo.hpp"
#include "ngraph/op/reorg_yolo.hpp"
#include "ngraph/op/roi_pooling.hpp"
-#include "util/type_prop.hpp"
#include <memory>
using namespace std;
ASSERT_EQ(pbc->get_shape(), (Shape{2, 4332}));
}
-TEST(type_prop_layers, proposal)
-{
- op::ProposalAttrs attrs;
- attrs.base_size = 1;
- attrs.pre_nms_topn = 20;
- attrs.post_nms_topn = 200;
- const size_t batch_size = 7;
-
- auto class_probs = make_shared<op::Parameter>(element::f32, Shape{batch_size, 12, 34, 62});
- auto class_logits = make_shared<op::Parameter>(element::f32, Shape{batch_size, 24, 34, 62});
- auto image_shape = make_shared<op::Parameter>(element::f32, Shape{3});
- auto op = make_shared<op::Proposal>(class_probs, class_logits, image_shape, attrs);
- ASSERT_EQ(op->get_shape(), (Shape{batch_size * attrs.post_nms_topn, 5}));
-}
-
TEST(type_prop_layers, region_yolo1)
{
auto inputs = make_shared<op::Parameter>(element::f32, Shape{1, 125, 13, 13});