From 61a7cdde2355094575e5e12058b6c1f6542738fe Mon Sep 17 00:00:00 2001 From: Ivan Tikhonov Date: Fri, 18 Sep 2020 21:26:21 +0300 Subject: [PATCH] Introduce opset5, include GRU/RNN/LSTM Sequences to opset5 (#2265) * introduce new opset5, include GRU/RNN/LSTM Sequences to opset5 * resolve review remarks --- docs/doxygen/ie_docs.xml | 3 + docs/ops/opset.md | 1 + docs/ops/opset5.md | 152 ++++++++++++++ docs/ops/sequence/GRUSequence_5.md | 136 +++++++++++++ docs/ops/sequence/RNNSequence_5.md | 128 ++++++++++++ .../src/readers/ir_reader/ie_ir_parser.cpp | 2 +- .../bidirectional_sequences_decomposition.cpp | 33 +-- .../convert_sequences_to_sequences_ie.cpp | 72 +++---- .../convert_ti_to_sequences.cpp | 208 +++++++++---------- .../convert_sequences_to_sequences_ie_test.cpp | 150 +++++++------- .../convert_ti_to_sequences_test.cpp | 222 ++++++++++----------- .../include/ngraph_functions/builders.hpp | 1 + .../tests/ngraph_functions/src/gru_cell.cpp | 2 +- .../tests/ngraph_functions/src/lstm_cell.cpp | 2 +- .../tests/ngraph_functions/src/rnn_cell.cpp | 2 +- ngraph/core/include/ngraph/opsets/opset.hpp | 1 + ngraph/core/include/ngraph/opsets/opset5.hpp | 29 +++ ngraph/core/include/ngraph/opsets/opset5_tbl.hpp | 169 ++++++++++++++++ ngraph/core/src/opsets/opset.cpp | 19 ++ ngraph/test/attributes.cpp | 7 +- ngraph/test/type_prop/gru_sequence.cpp | 14 +- ngraph/test/type_prop/lstm_sequence.cpp | 62 +++--- ngraph/test/type_prop/rnn_sequence.cpp | 14 +- 23 files changed, 1035 insertions(+), 394 deletions(-) create mode 100644 docs/ops/opset5.md create mode 100644 docs/ops/sequence/GRUSequence_5.md create mode 100644 docs/ops/sequence/RNNSequence_5.md create mode 100644 ngraph/core/include/ngraph/opsets/opset5.hpp create mode 100644 ngraph/core/include/ngraph/opsets/opset5_tbl.hpp diff --git a/docs/doxygen/ie_docs.xml b/docs/doxygen/ie_docs.xml index cb6afcf..c7f796f 100644 --- a/docs/doxygen/ie_docs.xml +++ b/docs/doxygen/ie_docs.xml @@ -94,6 +94,7 @@ + @@ -147,6 +148,7 @@ + @@ -213,6 +215,7 @@ + diff --git a/docs/ops/opset.md b/docs/ops/opset.md index 958e7bd..365e6bd 100644 --- a/docs/ops/opset.md +++ b/docs/ops/opset.md @@ -6,6 +6,7 @@ This topic provides a complete list of available sets of operations supported in | OpenVINO™ Version | Actual Operations Set | | :---------------- | :------------------------------- | +| 2021.2 | [opset5](opset5.md) | | 2021.1 | [opset4](opset4.md) | | 2020.4 | [opset3](opset3.md) | | 2020.3 | [opset2](opset2.md) | diff --git a/docs/ops/opset5.md b/docs/ops/opset5.md new file mode 100644 index 0000000..c2083aa --- /dev/null +++ b/docs/ops/opset5.md @@ -0,0 +1,152 @@ +# Operation Set `opset5` Specification {#openvino_docs_ops_opset5} + +This specification document describes `opset5` operation set supported in OpenVINO. +Support for each particular operation from the list below depends on the capabilities available in a inference plugin +and may vary among different hardware platforms and devices. Examples of operation instances are expressed as IR V10 xml +snippets. Such IR is generated by the Model Optimizer. The semantics match corresponding nGraph operation classes +declared in `namespace opset5`. + + +## Table of Contents + +* [Abs](arithmetic/Abs_1.md) +* [Acos](arithmetic/Acos_1.md) +* [Acosh](arithmetic/Acosh_3.md) +* [Add](arithmetic/Add_1.md) +* [Asin](arithmetic/Asin_1.md) +* [Asinh](arithmetic/Asinh_3.md) +* [Assign](infrastructure/Assign_3.md) +* [Atan](arithmetic/Atan_1.md) +* [Atanh](arithmetic/Atanh_3.md) +* [AvgPool](pooling/AvgPool_1.md) +* [BatchNormInference](normalization/BatchNormInference_1.md) +* [BatchToSpace](movement/BatchToSpace_2.md) +* [BinaryConvolution](convolution/BinaryConvolution_1.md) +* [Broadcast](movement/Broadcast_3.md) +* [Bucketize](condition/Bucketize_3.md) +* [CTCGreedyDecoder](sequence/CTCGreedyDecoder_1.md) +* [CTCLoss](sequence/CTCLoss_4.md) +* [Ceiling](arithmetic/Ceiling_1.md) +* [Clamp](activation/Clamp_1.md) +* [Concat](movement/Concat_1.md) +* [Constant](infrastructure/Constant_1.md) +* [Convert](type/Convert_1.md) +* [ConvertLike](type/ConvertLike_1.md) +* [Convolution](convolution/Convolution_1.md) +* [ConvolutionBackpropData](convolution/ConvolutionBackpropData_1.md) +* [Cos](arithmetic/Cos_1.md) +* [Cosh](arithmetic/Cosh_1.md) +* [CumSum](arithmetic/CumSum_3.md) +* [DeformableConvolution](convolution/DeformableConvolution_1.md) +* [DeformablePSROIPooling](detection/DeformablePSROIPooling_1.md) +* [DepthToSpace](movement/DepthToSpace_1.md) +* [DetectionOutput](detection/DetectionOutput_1.md) +* [Divide](arithmetic/Divide_1.md) +* [Elu](activation/Elu_1.md) +* [EmbeddingBagOffsetsSum](sparse/EmbeddingBagOffsetsSum_3.md) +* [EmbeddingBagPackedSum](sparse/EmbeddingBagPackedSum_3.md) +* [EmbeddingSegmentsSum](sparse/EmbeddingSegmentsSum_3.md) +* [Equal](comparison/Equal_1.md) +* [Erf](arithmetic/Erf_1.md) +* [Exp](activation/Exp_1.md) +* [ExtractImagePatches](movement/ExtractImagePatches_3.md) +* [FakeQuantize](quantization/FakeQuantize_1.md) +* [Floor](arithmetic/Floor_1.md) +* [FloorMod](arithmetic/FloorMod_1.md) +* [Gather](movement/Gather_1.md) +* [GatherTree](movement/GatherTree_1.md) +* [Gelu](activation/GELU_2.md) +* [Greater](comparison/Greater_1.md) +* [GreaterEqual](comparison/GreaterEqual_1.md) +* [GRN](normalization/GRN_1.md) +* [GroupConvolution](convolution/GroupConvolution_1.md) +* [GroupConvolutionBackpropData](convolution/GroupConvolutionBackpropData_1.md) +* [GRUCell](sequence/GRUCell_3.md) +* [GRUSequence](sequence/GRUSequence_5.md) +* [HardSigmoid](activation/HardSigmoid_1.md) +* [HSwish](activation/HSwish_4.md) +* [Interpolate](image/Interpolate_4.md) +* [Less](comparison/Less_1.md) +* [LessEqual](comparison/LessEqual_1.md) +* [Log](arithmetic/Log_1.md) +* [LogicalAnd](logical/LogicalAnd_1.md) +* [LogicalNot](logical/LogicalNot_1.md) +* [LogicalOr](logical/LogicalOr_1.md) +* [LogicalXor](logical/LogicalXor_1.md) +* [LRN](normalization/LRN_1.md) +* [LSTMCell](sequence/LSTMCell_1.md) +* [LSTMSequence](sequence/LSTMSequence_1.md) +* [MatMul](matrix/MatMul_1.md) +* [MaxPool](pooling/MaxPool_1.md) +* [Maximum](arithmetic/Maximum_1.md) +* [Minimum](arithmetic/Minimum_1.md) +* [Mish](activation/Mish_4.md) +* [Mod](arithmetic/Mod_1.md) +* [MVN](normalization/MVN_1.md) +* [Multiply](arithmetic/Multiply_1.md) +* [Negative](arithmetic/Negative_1.md) +* [NonMaxSuppression](sort/NonMaxSuppression_4.md) +* [NonZero](condition/NonZero_3.md) +* [NormalizeL2](normalization/NormalizeL2_1.md) +* [NotEqual](comparison/NotEqual_1.md) +* [OneHot](sequence/OneHot_1.md) +* [Pad](movement/Pad_1.md) +* [Parameter](infrastructure/Parameter_1.md) +* [Power](arithmetic/Power_1.md) +* [PReLU](activation/PReLU_1.md) +* [PriorBoxClustered](detection/PriorBoxClustered_1.md) +* [PriorBox](detection/PriorBox_1.md) +* [Proposal](detection/Proposal_4.md) +* [PSROIPooling](detection/PSROIPooling_1.md) +* [Range](generation/Range_4.md) +* [ReLU](activation/ReLU_1.md) +* [ReadValue](infrastructure/ReadValue_3.md) +* [ReduceL1](reduction/ReduceL1_4.md) +* [ReduceL2](reduction/ReduceL2_4.md) +* [ReduceLogicalAnd](reduction/ReduceLogicalAnd_1.md) +* [ReduceLogicalOr](reduction/ReduceLogicalOr_1.md) +* [ReduceMax](reduction/ReduceMax_1.md) +* [ReduceMean](reduction/ReduceMean_1.md) +* [ReduceMin](reduction/ReduceMin_1.md) +* [ReduceProd](reduction/ReduceProd_1.md) +* [ReduceSum](reduction/ReduceSum_1.md) +* [RegionYolo](detection/RegionYolo_1.md) +* [ReorgYolo](detection/ReorgYolo_1.md) +* [Reshape](shape/Reshape_1.md) +* [Result](infrastructure/Result_1.md) +* [Reverse](movement/Reverse_1.md) +* [ReverseSequence](movement/ReverseSequence_1.md) +* [RNNCell](sequence/RNNCell_3.md) +* [RNNSequence](sequence/RNNSequence_5.md) +* [ROIAlign](detection/ROIAlign_3.md) +* [ROIPooling](detection/ROIPooling_1.md) +* [ScatterElementsUpdate](movement/ScatterElementsUpdate_3.md) +* [ScatterNDUpdate](movement/ScatterNDUpdate_3.md) +* [ScatterUpdate](movement/ScatterUpdate_3.md) +* [Select](condition/Select_1.md) +* [Selu](arithmetic/Selu_1.md) +* [ShapeOf](shape/ShapeOf_3.md) +* [ShuffleChannels](movement/ShuffleChannels_1.md) +* [Sigmoid](activation/Sigmoid_1.md) +* [Sign](arithmetic/Sign_1.md) +* [Sin](arithmetic/Sin_1.md) +* [Sinh](arithmetic/Sinh_1.md) +* [SoftMax](activation/SoftMax_1.md) +* [SoftPlus](activation/SoftPlus_4.md) +* [SpaceToBatch](movement/SpaceToBatch_2.md) +* [SpaceToDepth](movement/SpaceToDepth_1.md) +* [Split](movement/Split_1.md) +* [Sqrt](arithmetic/Sqrt_1.md) +* [SquaredDifference](arithmetic/SquaredDifference_1.md) +* [Squeeze](shape/Squeeze_1.md) +* [StridedSlice](movement/StridedSlice_1.md) +* [Subtract](arithmetic/Subtract_1.md) +* [Swish](activation/Swish_4.md) +* [Tan](arithmetic/Tan_1.md) +* [Tanh](arithmetic/Tanh_1.md) +* [TensorIterator](infrastructure/TensorIterator_1.md) +* [Tile](movement/Tile_1.md) +* [TopK](sort/TopK_3.md) +* [Transpose](movement/Transpose_1.md) +* [Unsqueeze](shape/Unsqueeze_1.md) +* [VariadicSplit](movement/VariadicSplit_1.md) diff --git a/docs/ops/sequence/GRUSequence_5.md b/docs/ops/sequence/GRUSequence_5.md new file mode 100644 index 0000000..67ee697 --- /dev/null +++ b/docs/ops/sequence/GRUSequence_5.md @@ -0,0 +1,136 @@ +## GRUSequence {#openvino_docs_ops_sequence_GRUSequence_5} + +**Versioned name**: *GRUSequence-5* + +**Category**: *Sequence processing* + +**Short description**: *GRUSequence* operation represents a series of GRU cells. Each cell is implemented as GRUCell operation. + +**Detailed description** + +A single cell in the sequence is implemented in the same way as in GRUCell operation. *GRUSequence* represents a sequence of GRU cells. The sequence can be connected differently depending on `direction` attribute that specifies the direction of traversing of input data along sequence dimension or specifies whether it should be a bidirectional sequence. The most of the attributes are in sync with the specification of ONNX GRU operator defined GRUCell. + + +**Attributes** + +* *hidden_size* + + * **Description**: *hidden_size* specifies hidden state size. + * **Range of values**: a positive integer + * **Type**: `int` + * **Default value**: None + * **Required**: *yes* + +* *activations* + + * **Description**: *activations* specifies activation functions for gates, there are two gates, so two activation functions should be specified as a value for this attributes + * **Range of values**: any combination of *relu*, *sigmoid*, *tanh* + * **Type**: a list of strings + * **Default value**: *sigmoid,tanh* + * **Required**: *no* + +* *activations_alpha, activations_beta* + + * **Description**: *activations_alpha, activations_beta* attributes of functions; applicability and meaning of these attributes depends on choosen activation functions + * **Range of values**: a list of floating-point numbers + * **Type**: `float[]` + * **Default value**: None + * **Required**: *no* + +* *clip* + + * **Description**: *clip* specifies bound values *[-C, C]* for tensor clipping. Clipping is performed before activations. + * **Range of values**: a positive floating-point number + * **Type**: `float` + * **Default value**: *infinity* that means that the clipping is not applied + * **Required**: *no* + +* *direction* + + * **Description**: Specify if the RNN is forward, reverse, or bidirectional. If it is one of *forward* or *reverse* then `num_directions = 1`, if it is *bidirectional*, then `num_directions = 2`. This `num_directions` value specifies input/output shape requirements. + * **Range of values**: *forward*, *reverse*, *bidirectional* + * **Type**: `string` + * **Default value**: None + * **Required**: *Yes* + +* *linear_before_reset* + + * **Description**: *linear_before_reset* flag denotes if the layer behaves according to the modification of *GRUCell* described in the formula in the [ONNX documentation](https://github.com/onnx/onnx/blob/master/docs/Operators.md#GRU). + * **Range of values**: True or False + * **Type**: `boolean` + * **Default value**: False + * **Required**: *no* + +**Inputs** + +* **1**: `X` - 3D tensor of type *T1* `[batch_size, seq_length, input_size]`, input data. It differs from GRUCell 1st input only by additional axis with size `seq_length`. **Required.** + +* **2**: `initial_hidden_state` - 3D tensor of type *T1* `[batch_size, num_directions, hidden_size]`, input hidden state data. **Required.** + +* **3**: `sequence_lengths` - 1D tensor of type *T2* `[batch_size]`, specifies real sequence lengths for each batch element. **Required.** + +* **4**: `W` - 3D tensor of type *T1* `[num_directions, 3 * hidden_size, input_size]`, the weights for matrix multiplication, gate order: zrh. **Required.** + +* **5**: `R` - 3D tensor of type *T1* `[num_directions, 3 * hidden_size, hidden_size]`, the recurrence weights for matrix multiplication, gate order: zrh. **Required.** + +* **6**: `B` - 2D tensor of type *T*. If *linear_before_reset* is set to 1, then the shape is `[num_directions, 4 * hidden_size]` - the sum of biases for z and r gates (weights and recurrence weights), the biases for h gate are placed separately. Otherwise the shape is `[num_directions, 3 * hidden_size]`, the sum of biases (weights and recurrence weights). **Required.** + +**Outputs** + +* **1**: `Y` – 3D tensor of type *T1* `[batch_size, num_directions, seq_len, hidden_size]`, concatenation of all the intermediate output values of the hidden. + +* **2**: `Ho` - 3D tensor of type *T1* `[batch_size, num_directions, hidden_size]`, the last output value of hidden state. + +**Types** + +* *T1*: any supported floating point type. +* *T2*: any supported integer type. + +**Example** +```xml + + + + + 1 + 4 + 16 + + + 1 + 1 + 128 + + + 1 + + + 1 + 384 + 16 + + + 1 + 384 + 128 + + + 1 + 384 + + + + + 1 + 1 + 4 + 128 + + + 1 + 1 + 128 + + + +``` \ No newline at end of file diff --git a/docs/ops/sequence/RNNSequence_5.md b/docs/ops/sequence/RNNSequence_5.md new file mode 100644 index 0000000..fb64c3c --- /dev/null +++ b/docs/ops/sequence/RNNSequence_5.md @@ -0,0 +1,128 @@ +## RNNSequence {#openvino_docs_ops_sequence_RNNSequence_5} + +**Versioned name**: *RNNSequence-5* + +**Category**: *Sequence processing* + +**Short description**: *RNNSequence* operation represents a series of RNN cells. Each cell is implemented as RNNCell operation. + +**Detailed description** + +A single cell in the sequence is implemented in the same way as in RNNCell operation. *RNNSequence* represents a sequence of RNN cells. The sequence can be connected differently depending on `direction` attribute that specifies the direction of traversing of input data along sequence dimension or specifies whether it should be a bidirectional sequence. The most of the attributes are in sync with the specification of ONNX RNN operator defined RNNCell. + + +**Attributes** + +* *hidden_size* + + * **Description**: *hidden_size* specifies hidden state size. + * **Range of values**: a positive integer + * **Type**: `int` + * **Default value**: None + * **Required**: *yes* + +* *activations* + + * **Description**: activation functions for gates + * **Range of values**: any combination of *relu*, *sigmoid*, *tanh* + * **Type**: a list of strings + * **Default value**: *tanh* + * **Required**: *no* + +* *activations_alpha, activations_beta* + + * **Description**: *activations_alpha, activations_beta* attributes of functions; applicability and meaning of these attributes depends on choosen activation functions + * **Range of values**: a list of floating-point numbers + * **Type**: `float[]` + * **Default value**: None + * **Required**: *no* + +* *clip* + + * **Description**: *clip* specifies bound values *[-C, C]* for tensor clipping. Clipping is performed before activations. + * **Range of values**: a positive floating-point number + * **Type**: `float` + * **Default value**: *infinity* that means that the clipping is not applied + * **Required**: *no* + +* *direction* + + * **Description**: Specify if the RNN is forward, reverse, or bidirectional. If it is one of *forward* or *reverse* then `num_directions = 1`, if it is *bidirectional*, then `num_directions = 2`. This `num_directions` value specifies input/output shape requirements. + * **Range of values**: *forward*, *reverse*, *bidirectional* + * **Type**: `string` + * **Default value**: None + * **Required**: *Yes* + +**Inputs** + +* **1**: `X` - 3D tensor of type *T1* `[batch_size, seq_length, input_size]`, input data. It differs from RNNCell 1st input only by additional axis with size `seq_length`. **Required.** + +* **2**: `initial_hidden_state` - 3D tensor of type *T1* `[batch_size, num_directions, hidden_size]`, input hidden state data. **Required.** + +* **3**: `sequence_lengths` - 1D tensor of type *T2* `[batch_size]`, specifies real sequence lengths for each batch element. **Required.** + +* **4**: `W` - 3D tensor of type *T1* `[num_directions, hidden_size, input_size]`, the weights for matrix multiplication. **Required.** + +* **5**: `R` - 3D tensor of type *T1* `[num_directions, hidden_size, hidden_size]`, the recurrence weights for matrix multiplication. **Required.** + +* **6**: `B` - 2D tensor of type *T1* `[num_directions, hidden_size]`, the sum of biases (weights and recurrence weights). **Required.** + +**Outputs** + +* **1**: `Y` – 3D tensor of type *T1* `[batch_size, num_directions, seq_len, hidden_size]`, concatenation of all the intermediate output values of the hidden. + +* **2**: `Ho` - 3D tensor of type *T1* `[batch_size, num_directions, hidden_size]`, the last output value of hidden state. + +**Types** + +* *T1*: any supported floating point type. +* *T2*: any supported integer type. + +**Example** +```xml + + + + + 1 + 4 + 16 + + + 1 + 1 + 128 + + + 1 + + + 1 + 128 + 16 + + + 1 + 128 + 128 + + + 1 + 128 + + + + + 1 + 1 + 4 + 128 + + + 1 + 1 + 128 + + + +``` \ No newline at end of file diff --git a/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp b/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp index 6ae0c1d..811b1d2 100644 --- a/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp +++ b/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp @@ -486,7 +486,7 @@ std::shared_ptr V10Parser::createNode(const std::vector bool { - for (size_t i = 1; i <= 4; i++) { + for (size_t i = 1; i <= 5; i++) { std::string opset_name = "opset" + std::to_string(i); if (version == opset_name) return true; diff --git a/inference-engine/src/transformations/src/transformations/bidirectional_sequences_decomposition.cpp b/inference-engine/src/transformations/src/transformations/bidirectional_sequences_decomposition.cpp index 1df3c61..ac5021d 100644 --- a/inference-engine/src/transformations/src/transformations/bidirectional_sequences_decomposition.cpp +++ b/inference-engine/src/transformations/src/transformations/bidirectional_sequences_decomposition.cpp @@ -7,14 +7,15 @@ #include #include +#include #include #include ngraph::pass::BidirectionalLSTMSequenceDecomposition::BidirectionalLSTMSequenceDecomposition() { - auto lstm_sequence_ngraph = ngraph::pattern::wrap_type(); + auto lstm_sequence_ngraph = ngraph::pattern::wrap_type(); ngraph::matcher_pass_callback callback = [](pattern::Matcher &m) { - auto lstm_sequence = std::dynamic_pointer_cast(m.get_match_root()); + auto lstm_sequence = std::dynamic_pointer_cast(m.get_match_root()); if (!lstm_sequence) { return false; } @@ -44,7 +45,7 @@ ngraph::pass::BidirectionalLSTMSequenceDecomposition::BidirectionalLSTMSequenceD lstm_sequence->get_activations(), lstm_sequence->get_clip()); - auto lstm_sequence_reverse = std::make_shared( + auto lstm_sequence_reverse = std::make_shared( lstm_sequence->input_value(0), H->output(1), C->output(1), @@ -59,11 +60,11 @@ ngraph::pass::BidirectionalLSTMSequenceDecomposition::BidirectionalLSTMSequenceD lstm_sequence->get_activations(), lstm_sequence->get_clip()); - auto concat_0 = std::make_shared(OutputVector{lstm_sequence_forward->output(0), + auto concat_0 = std::make_shared(OutputVector{lstm_sequence_forward->output(0), lstm_sequence_reverse->output(0)}, 1); - auto concat_1 = std::make_shared(OutputVector{lstm_sequence_forward->output(1), + auto concat_1 = std::make_shared(OutputVector{lstm_sequence_forward->output(1), lstm_sequence_reverse->output(1)}, 1); - auto concat_2 = std::make_shared(OutputVector{lstm_sequence_forward->output(2), + auto concat_2 = std::make_shared(OutputVector{lstm_sequence_forward->output(2), lstm_sequence_reverse->output(2)}, 1); ngraph::copy_runtime_info(lstm_sequence, {H, C, W, R, B, lstm_sequence_forward, lstm_sequence_reverse, concat_0, concat_1, concat_2}); @@ -79,10 +80,10 @@ ngraph::pass::BidirectionalLSTMSequenceDecomposition::BidirectionalLSTMSequenceD } ngraph::pass::BidirectionalGRUSequenceDecomposition::BidirectionalGRUSequenceDecomposition() { - auto gru_sequence_ngraph = ngraph::pattern::wrap_type(); + auto gru_sequence_ngraph = ngraph::pattern::wrap_type(); ngraph::matcher_pass_callback callback = [](pattern::Matcher &m) { - auto gru_sequence = std::dynamic_pointer_cast(m.get_match_root()); + auto gru_sequence = std::dynamic_pointer_cast(m.get_match_root()); if (!gru_sequence) { return false; } @@ -111,7 +112,7 @@ ngraph::pass::BidirectionalGRUSequenceDecomposition::BidirectionalGRUSequenceDec gru_sequence->get_clip(), gru_sequence->get_linear_before_reset()); - auto gru_sequence_reverse = std::make_shared( + auto gru_sequence_reverse = std::make_shared( gru_sequence->input_value(0), H->output(1), gru_sequence->input_value(2), @@ -126,9 +127,9 @@ ngraph::pass::BidirectionalGRUSequenceDecomposition::BidirectionalGRUSequenceDec gru_sequence->get_clip(), gru_sequence->get_linear_before_reset()); - auto concat_0 = std::make_shared(OutputVector{gru_sequence_forward->output(0), + auto concat_0 = std::make_shared(OutputVector{gru_sequence_forward->output(0), gru_sequence_reverse->output(0)}, 1); - auto concat_1 = std::make_shared(OutputVector{gru_sequence_forward->output(1), + auto concat_1 = std::make_shared(OutputVector{gru_sequence_forward->output(1), gru_sequence_reverse->output(1)}, 1); ngraph::copy_runtime_info(gru_sequence, {H, W, R, B, gru_sequence_forward, gru_sequence_reverse, concat_0, concat_1}); @@ -143,10 +144,10 @@ ngraph::pass::BidirectionalGRUSequenceDecomposition::BidirectionalGRUSequenceDec } ngraph::pass::BidirectionalRNNSequenceDecomposition::BidirectionalRNNSequenceDecomposition() { - auto rnn_sequence_ngraph = ngraph::pattern::wrap_type(); + auto rnn_sequence_ngraph = ngraph::pattern::wrap_type(); ngraph::matcher_pass_callback callback = [](pattern::Matcher &m) { - auto rnn_sequence = std::dynamic_pointer_cast(m.get_match_root()); + auto rnn_sequence = std::dynamic_pointer_cast(m.get_match_root()); if (!rnn_sequence) { return false; } @@ -174,7 +175,7 @@ ngraph::pass::BidirectionalRNNSequenceDecomposition::BidirectionalRNNSequenceDec rnn_sequence->get_activations_beta(), rnn_sequence->get_clip()); - auto rnn_sequence_reverse = std::make_shared( + auto rnn_sequence_reverse = std::make_shared( rnn_sequence->input_value(0), H->output(1), rnn_sequence->input_value(2), @@ -188,9 +189,9 @@ ngraph::pass::BidirectionalRNNSequenceDecomposition::BidirectionalRNNSequenceDec rnn_sequence->get_activations_beta(), rnn_sequence->get_clip()); - auto concat_0 = std::make_shared(OutputVector{rnn_sequence_forward->output(0), + auto concat_0 = std::make_shared(OutputVector{rnn_sequence_forward->output(0), rnn_sequence_reverse->output(0)}, 1); - auto concat_1 = std::make_shared(OutputVector{rnn_sequence_forward->output(1), + auto concat_1 = std::make_shared(OutputVector{rnn_sequence_forward->output(1), rnn_sequence_reverse->output(1)}, 1); ngraph::copy_runtime_info(rnn_sequence, {H, W, R, B, rnn_sequence_forward, rnn_sequence_reverse, concat_0, concat_1}); diff --git a/inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_sequences_to_sequences_ie.cpp b/inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_sequences_to_sequences_ie.cpp index 3f80180..36bec24 100644 --- a/inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_sequences_to_sequences_ie.cpp +++ b/inference-engine/src/transformations/src/transformations/convert_opset1_to_legacy/convert_sequences_to_sequences_ie.cpp @@ -6,7 +6,7 @@ #include -#include +#include #include #include @@ -15,10 +15,10 @@ #include ngraph::pass::ConvertLSTMSequenceMatcher::ConvertLSTMSequenceMatcher() { - auto lstm_sequence_ngraph = ngraph::pattern::wrap_type(); + auto lstm_sequence_ngraph = ngraph::pattern::wrap_type(); ngraph::matcher_pass_callback callback = [](pattern::Matcher &m) { - auto lstm_sequence = std::dynamic_pointer_cast(m.get_match_root()); + auto lstm_sequence = std::dynamic_pointer_cast(m.get_match_root()); if (!lstm_sequence) { return false; } @@ -31,13 +31,13 @@ ngraph::pass::ConvertLSTMSequenceMatcher::ConvertLSTMSequenceMatcher() { return false; // for forward/reverse cases we can squeeze num_direction dimension - auto axis_1 = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto in_1 = std::make_shared(lstm_sequence->input_value(1), axis_1); - auto in_2 = std::make_shared(lstm_sequence->input_value(2), axis_1); - auto concat = std::make_shared(ngraph::OutputVector{W, R}, 2); - auto axis_2 = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); - auto in_3 = std::make_shared(concat->output(0), axis_2); - auto in_4 = std::make_shared(lstm_sequence->input_value(6), axis_2); + auto axis_1 = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto in_1 = std::make_shared(lstm_sequence->input_value(1), axis_1); + auto in_2 = std::make_shared(lstm_sequence->input_value(2), axis_1); + auto concat = std::make_shared(ngraph::OutputVector{W, R}, 2); + auto axis_2 = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); + auto in_3 = std::make_shared(concat->output(0), axis_2); + auto in_4 = std::make_shared(lstm_sequence->input_value(6), axis_2); auto lstm_sequence_ie = std::make_shared( lstm_sequence->input(0).get_source_output(), // X in_1, // initial_hidden_state @@ -52,10 +52,10 @@ ngraph::pass::ConvertLSTMSequenceMatcher::ConvertLSTMSequenceMatcher() { lstm_sequence->get_activations_beta(), lstm_sequence->get_clip()); - auto unsqueeze_axis = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto unsqueeze_1 = std::make_shared(lstm_sequence_ie->output(0), unsqueeze_axis); - auto unsqueeze_2 = std::make_shared(lstm_sequence_ie->output(1), unsqueeze_axis); - auto unsqueeze_3 = std::make_shared(lstm_sequence_ie->output(2), unsqueeze_axis); + auto unsqueeze_axis = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto unsqueeze_1 = std::make_shared(lstm_sequence_ie->output(0), unsqueeze_axis); + auto unsqueeze_2 = std::make_shared(lstm_sequence_ie->output(1), unsqueeze_axis); + auto unsqueeze_3 = std::make_shared(lstm_sequence_ie->output(2), unsqueeze_axis); ngraph::copy_runtime_info(lstm_sequence, {concat, lstm_sequence_ie, in_1, in_2, in_3, in_4, unsqueeze_1, unsqueeze_2, unsqueeze_3}); @@ -71,10 +71,10 @@ ngraph::pass::ConvertLSTMSequenceMatcher::ConvertLSTMSequenceMatcher() { } ngraph::pass::ConvertGRUSequenceMatcher::ConvertGRUSequenceMatcher() { - auto gru_sequence_ngraph = ngraph::pattern::wrap_type(); + auto gru_sequence_ngraph = ngraph::pattern::wrap_type(); ngraph::matcher_pass_callback callback = [](pattern::Matcher &m) { - auto gru_sequence = std::dynamic_pointer_cast(m.get_match_root()); + auto gru_sequence = std::dynamic_pointer_cast(m.get_match_root()); if (!gru_sequence) { return false; } @@ -87,12 +87,12 @@ ngraph::pass::ConvertGRUSequenceMatcher::ConvertGRUSequenceMatcher() { return false; // for forward/reverse cases we can squeeze num_direction dimension - auto axis_1 = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto in_1 = std::make_shared(gru_sequence->input_value(1), axis_1); - auto concat = std::make_shared(ngraph::OutputVector{W, R}, 2); - auto axis_2 = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); - auto in_3 = std::make_shared(concat->output(0), axis_2); - auto in_4 = std::make_shared(gru_sequence->input_value(5), axis_2); + auto axis_1 = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto in_1 = std::make_shared(gru_sequence->input_value(1), axis_1); + auto concat = std::make_shared(ngraph::OutputVector{W, R}, 2); + auto axis_2 = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); + auto in_3 = std::make_shared(concat->output(0), axis_2); + auto in_4 = std::make_shared(gru_sequence->input_value(5), axis_2); auto gru_sequence_ie = std::make_shared( gru_sequence->input_value(0), // X @@ -108,9 +108,9 @@ ngraph::pass::ConvertGRUSequenceMatcher::ConvertGRUSequenceMatcher() { gru_sequence->get_clip(), gru_sequence->get_linear_before_reset()); - auto unsqueeze_axis = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto unsqueeze_1 = std::make_shared(gru_sequence_ie->output(0), unsqueeze_axis); - auto unsqueeze_2 = std::make_shared(gru_sequence_ie->output(1), unsqueeze_axis); + auto unsqueeze_axis = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto unsqueeze_1 = std::make_shared(gru_sequence_ie->output(0), unsqueeze_axis); + auto unsqueeze_2 = std::make_shared(gru_sequence_ie->output(1), unsqueeze_axis); ngraph::copy_runtime_info(gru_sequence, {concat, gru_sequence_ie, unsqueeze_1, unsqueeze_2, in_1, in_3, in_4}); unsqueeze_1->set_friendly_name(gru_sequence->get_friendly_name()+".0"); @@ -124,10 +124,10 @@ ngraph::pass::ConvertGRUSequenceMatcher::ConvertGRUSequenceMatcher() { } ngraph::pass::ConvertRNNSequenceMatcher::ConvertRNNSequenceMatcher() { - auto rnn_sequence_ngraph = ngraph::pattern::wrap_type(); + auto rnn_sequence_ngraph = ngraph::pattern::wrap_type(); ngraph::matcher_pass_callback callback = [](pattern::Matcher &m) { - auto rnn_sequence = std::dynamic_pointer_cast(m.get_match_root()); + auto rnn_sequence = std::dynamic_pointer_cast(m.get_match_root()); if (!rnn_sequence) { return false; } @@ -140,12 +140,12 @@ ngraph::pass::ConvertRNNSequenceMatcher::ConvertRNNSequenceMatcher() { auto R = rnn_sequence->input_value(4); // for forward/reverse cases we can squeeze num_direction dimension - auto axis_1 = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto in_1 = std::make_shared(rnn_sequence->input_value(1), axis_1); - auto concat = std::make_shared(ngraph::OutputVector{W, R}, 2); - auto axis_2 = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); - auto in_3 = std::make_shared(concat->output(0), axis_2); - auto in_4 = std::make_shared(rnn_sequence->input_value(5), axis_2); + auto axis_1 = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto in_1 = std::make_shared(rnn_sequence->input_value(1), axis_1); + auto concat = std::make_shared(ngraph::OutputVector{W, R}, 2); + auto axis_2 = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); + auto in_3 = std::make_shared(concat->output(0), axis_2); + auto in_4 = std::make_shared(rnn_sequence->input_value(5), axis_2); auto rnn_sequence_ie = std::make_shared( rnn_sequence->input_value(0), // X in_1, // initial_hidden_state @@ -159,9 +159,9 @@ ngraph::pass::ConvertRNNSequenceMatcher::ConvertRNNSequenceMatcher() { rnn_sequence->get_activations_beta(), rnn_sequence->get_clip()); - auto unsqueeze_axis = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto unsqueeze_1 = std::make_shared(rnn_sequence_ie->output(0), unsqueeze_axis); - auto unsqueeze_2 = std::make_shared(rnn_sequence_ie->output(1), unsqueeze_axis); + auto unsqueeze_axis = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto unsqueeze_1 = std::make_shared(rnn_sequence_ie->output(0), unsqueeze_axis); + auto unsqueeze_2 = std::make_shared(rnn_sequence_ie->output(1), unsqueeze_axis); ngraph::copy_runtime_info(rnn_sequence, {concat, rnn_sequence_ie, in_1, in_3, in_4, unsqueeze_1, unsqueeze_2}); diff --git a/inference-engine/src/transformations/src/transformations/tensor_iterator_transformations/convert_ti_to_sequences.cpp b/inference-engine/src/transformations/src/transformations/tensor_iterator_transformations/convert_ti_to_sequences.cpp index b7eac62..9908ed0 100644 --- a/inference-engine/src/transformations/src/transformations/tensor_iterator_transformations/convert_ti_to_sequences.cpp +++ b/inference-engine/src/transformations/src/transformations/tensor_iterator_transformations/convert_ti_to_sequences.cpp @@ -10,7 +10,7 @@ #include #include -#include +#include #include #include #include @@ -18,28 +18,28 @@ ngraph::pass::ConvertTensorIteratorToLSTMSequence::ConvertTensorIteratorToLSTMSequence() { auto tensor_iterator = std::make_shared(ngraph::element::f32, - ngraph::Shape{}, ngraph::pattern::has_class()); + ngraph::Shape{}, ngraph::pattern::has_class()); ngraph::matcher_pass_callback callback = [this](pattern::Matcher &m) { - auto ti = std::dynamic_pointer_cast(m.get_match_root()); + auto ti = std::dynamic_pointer_cast(m.get_match_root()); if (!ti || !m_transformation_callback(ti)) return false; // create pattern - auto data = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1}); - auto axis_squeeze = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 1); + auto data = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1}); + auto axis_squeeze = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 1); - auto input_data = std::make_shared(data, axis_squeeze); - auto input_H_state = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1}); - auto input_C_state = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1}); - auto input_W = std::make_shared(ngraph::element::f32, ngraph::Shape{4, 1}); - auto input_R = std::make_shared(ngraph::element::f32, ngraph::Shape{4, 1}); - auto input_B = std::make_shared(ngraph::element::f32, ngraph::Shape{4}); + auto input_data = std::make_shared(data, axis_squeeze); + auto input_H_state = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1}); + auto input_C_state = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1}); + auto input_W = std::make_shared(ngraph::element::f32, ngraph::Shape{4, 1}); + auto input_R = std::make_shared(ngraph::element::f32, ngraph::Shape{4, 1}); + auto input_B = std::make_shared(ngraph::element::f32, ngraph::Shape{4}); - auto cell = std::make_shared(input_data, input_H_state, input_C_state, + auto cell = std::make_shared(input_data, input_H_state, input_C_state, input_W, input_R, input_B, 1); - auto axis_unsqueeze = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 1); - auto unsqueeze = std::make_shared(cell, axis_unsqueeze); + auto axis_unsqueeze = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 1); + auto unsqueeze = std::make_shared(cell, axis_unsqueeze); ngraph::pattern::Matcher matcher(unsqueeze); bool match = false; @@ -57,7 +57,7 @@ ngraph::pass::ConvertTensorIteratorToLSTMSequence::ConvertTensorIteratorToLSTMSe auto pattern_map = matcher.get_pattern_map(); auto params = func->get_parameters(); - std::vector> ordered_in_descs(3); + std::vector> ordered_in_descs(3); int64_t stride = 0, slice_axis = 0; size_t batch_size = 0; for (const auto& input_desc : ti->get_input_descriptions()) { @@ -68,7 +68,7 @@ ngraph::pass::ConvertTensorIteratorToLSTMSequence::ConvertTensorIteratorToLSTMSe return false; } auto slice_input - = std::dynamic_pointer_cast(input_desc); + = std::dynamic_pointer_cast(input_desc); if (!slice_input) return false; @@ -90,12 +90,12 @@ ngraph::pass::ConvertTensorIteratorToLSTMSequence::ConvertTensorIteratorToLSTMSe } auto results = func->get_results(); - std::vector> ordered_out_descs(3); + std::vector> ordered_out_descs(3); for (const auto& output_desc : ti->get_output_descriptions()) { - std::shared_ptr res = results[output_desc->m_body_value_index]; + std::shared_ptr res = results[output_desc->m_body_value_index]; if (res->get_input_source_output(0) == pattern_map[unsqueeze]) { auto concat_output - = std::dynamic_pointer_cast(output_desc); + = std::dynamic_pointer_cast(output_desc); if (!concat_output) return false; @@ -110,22 +110,22 @@ ngraph::pass::ConvertTensorIteratorToLSTMSequence::ConvertTensorIteratorToLSTMSe } } - auto seq_lengths = ngraph::opset4::Constant::create(element::i32, Shape{batch_size}, {ti->get_num_iterations()}); - const auto& lstm_cell = std::dynamic_pointer_cast(pattern_map[cell]); + auto seq_lengths = ngraph::opset5::Constant::create(element::i32, Shape{batch_size}, {ti->get_num_iterations()}); + const auto& lstm_cell = std::dynamic_pointer_cast(pattern_map[cell]); auto in_0 = ti->input_values()[ordered_in_descs[0]->m_input_index]; if (slice_axis == 0) { - auto order = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {1, 0, 2}); - in_0 = std::make_shared(ti->input_values()[ordered_in_descs[0]->m_input_index], order); + auto order = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {1, 0, 2}); + in_0 = std::make_shared(ti->input_values()[ordered_in_descs[0]->m_input_index], order); } - auto axis_1 = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto in_1 = std::make_shared(ti->input_values()[ordered_in_descs[1]->m_input_index], axis_1); - auto in_2 = std::make_shared(ti->input_values()[ordered_in_descs[2]->m_input_index], axis_1); - - auto axis_2 = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); - auto in_4 = std::make_shared(pattern_map[input_W]->output(0).get_node_shared_ptr(), axis_2); - auto in_5 = std::make_shared(pattern_map[input_R]->output(0).get_node_shared_ptr(), axis_2); - auto in_6 = std::make_shared(pattern_map[input_B]->output(0).get_node_shared_ptr(), axis_2); - auto sequence = std::make_shared( + auto axis_1 = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto in_1 = std::make_shared(ti->input_values()[ordered_in_descs[1]->m_input_index], axis_1); + auto in_2 = std::make_shared(ti->input_values()[ordered_in_descs[2]->m_input_index], axis_1); + + auto axis_2 = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); + auto in_4 = std::make_shared(pattern_map[input_W]->output(0).get_node_shared_ptr(), axis_2); + auto in_5 = std::make_shared(pattern_map[input_R]->output(0).get_node_shared_ptr(), axis_2); + auto in_6 = std::make_shared(pattern_map[input_B]->output(0).get_node_shared_ptr(), axis_2); + auto sequence = std::make_shared( in_0, in_1, in_2, @@ -140,15 +140,15 @@ ngraph::pass::ConvertTensorIteratorToLSTMSequence::ConvertTensorIteratorToLSTMSe lstm_cell->get_activations(), lstm_cell->get_clip()); - auto axis_out = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto out_0 = std::make_shared(sequence->output(0), axis_out); - auto out_1 = std::make_shared(sequence->output(1), axis_out); - auto out_2 = std::make_shared(sequence->output(2), axis_out); + auto axis_out = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto out_0 = std::make_shared(sequence->output(0), axis_out); + auto out_1 = std::make_shared(sequence->output(1), axis_out); + auto out_2 = std::make_shared(sequence->output(2), axis_out); std::shared_ptr out = out_0; if (slice_axis == 0) { - auto order = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {1, 0, 2}); - out = std::make_shared(out_0, order); + auto order = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {1, 0, 2}); + out = std::make_shared(out_0, order); } ngraph::NodeVector outputs = {out, out_1, out_2}; @@ -176,26 +176,26 @@ ngraph::pass::ConvertTensorIteratorToLSTMSequence::ConvertTensorIteratorToLSTMSe ngraph::pass::ConvertTensorIteratorToRNNSequence::ConvertTensorIteratorToRNNSequence() { auto tensor_iterator = std::make_shared(ngraph::element::f32, - ngraph::Shape{}, ngraph::pattern::has_class()); + ngraph::Shape{}, ngraph::pattern::has_class()); ngraph::matcher_pass_callback callback = [this](pattern::Matcher &m) { - auto ti = std::dynamic_pointer_cast(m.get_match_root()); + auto ti = std::dynamic_pointer_cast(m.get_match_root()); if (!ti || !m_transformation_callback(ti)) return false; // create pattern - auto data = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1}); - auto axis_squeeze = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 0); - auto input_data = std::make_shared(data, axis_squeeze); + auto data = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1}); + auto axis_squeeze = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 0); + auto input_data = std::make_shared(data, axis_squeeze); - auto input_H_state = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1}); - auto input_W = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1}); - auto input_R = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1}); - auto input_B = std::make_shared(ngraph::element::f32, ngraph::Shape{1}); + auto input_H_state = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1}); + auto input_W = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1}); + auto input_R = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1}); + auto input_B = std::make_shared(ngraph::element::f32, ngraph::Shape{1}); - auto cell = std::make_shared(input_data, input_H_state, input_W, input_R, input_B, 1); + auto cell = std::make_shared(input_data, input_H_state, input_W, input_R, input_B, 1); - auto axis_unsqueeze = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 0); - auto unsqueeze = std::make_shared(cell, axis_unsqueeze); + auto axis_unsqueeze = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 0); + auto unsqueeze = std::make_shared(cell, axis_unsqueeze); ngraph::pattern::Matcher matcher(unsqueeze); bool match = false; @@ -213,7 +213,7 @@ ngraph::pass::ConvertTensorIteratorToRNNSequence::ConvertTensorIteratorToRNNSequ auto pattern_map = matcher.get_pattern_map(); auto params = func->get_parameters(); - std::vector> ordered_in_descs(3); + std::vector> ordered_in_descs(3); int64_t stride = 0, slice_axis = 0; size_t batch_size = 0; for (const auto& input_desc : ti->get_input_descriptions()) { @@ -224,7 +224,7 @@ ngraph::pass::ConvertTensorIteratorToRNNSequence::ConvertTensorIteratorToRNNSequ return false; } auto slice_input - = std::dynamic_pointer_cast(input_desc); + = std::dynamic_pointer_cast(input_desc); if (!slice_input) return false; @@ -242,15 +242,15 @@ ngraph::pass::ConvertTensorIteratorToRNNSequence::ConvertTensorIteratorToRNNSequ } } - auto seq_lengths = ngraph::opset4::Constant::create(element::i32, Shape{batch_size}, {ti->get_num_iterations()}); + auto seq_lengths = ngraph::opset5::Constant::create(element::i32, Shape{batch_size}, {ti->get_num_iterations()}); auto results = func->get_results(); - std::vector> ordered_out_descs(2); + std::vector> ordered_out_descs(2); for (const auto& output_desc : ti->get_output_descriptions()) { - std::shared_ptr res = results[output_desc->m_body_value_index]; + std::shared_ptr res = results[output_desc->m_body_value_index]; if (res->get_input_source_output(0) == pattern_map[unsqueeze]) { auto concat_output - = std::dynamic_pointer_cast(output_desc); + = std::dynamic_pointer_cast(output_desc); if (!concat_output) return false; @@ -263,22 +263,22 @@ ngraph::pass::ConvertTensorIteratorToRNNSequence::ConvertTensorIteratorToRNNSequ } } - const auto& rnn_cell = std::dynamic_pointer_cast(pattern_map[cell]); + const auto& rnn_cell = std::dynamic_pointer_cast(pattern_map[cell]); auto in_0 = ti->input_values()[ordered_in_descs[0]->m_input_index]; if (slice_axis == 0) { - auto order = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {1, 0, 2}); - in_0 = std::make_shared(ti->input_values()[ordered_in_descs[0]->m_input_index], order); + auto order = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {1, 0, 2}); + in_0 = std::make_shared(ti->input_values()[ordered_in_descs[0]->m_input_index], order); } - auto axis_1 = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto in_1 = std::make_shared(ti->input_values()[ordered_in_descs[1]->m_input_index], axis_1); + auto axis_1 = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto in_1 = std::make_shared(ti->input_values()[ordered_in_descs[1]->m_input_index], axis_1); - auto axis_2 = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); - auto in_3 = std::make_shared(pattern_map[input_W]->output(0).get_node_shared_ptr(), axis_2); - auto in_4 = std::make_shared(pattern_map[input_R]->output(0).get_node_shared_ptr(), axis_2); - auto in_5 = std::make_shared(pattern_map[input_B]->output(0).get_node_shared_ptr(), axis_2); - auto sequence = std::make_shared( + auto axis_2 = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); + auto in_3 = std::make_shared(pattern_map[input_W]->output(0).get_node_shared_ptr(), axis_2); + auto in_4 = std::make_shared(pattern_map[input_R]->output(0).get_node_shared_ptr(), axis_2); + auto in_5 = std::make_shared(pattern_map[input_B]->output(0).get_node_shared_ptr(), axis_2); + auto sequence = std::make_shared( in_0, in_1, seq_lengths, @@ -292,14 +292,14 @@ ngraph::pass::ConvertTensorIteratorToRNNSequence::ConvertTensorIteratorToRNNSequ rnn_cell->get_activations_beta(), rnn_cell->get_clip()); - auto axis_out = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto out_0 = std::make_shared(sequence->output(0), axis_out); - auto out_1 = std::make_shared(sequence->output(1), axis_out); + auto axis_out = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto out_0 = std::make_shared(sequence->output(0), axis_out); + auto out_1 = std::make_shared(sequence->output(1), axis_out); std::shared_ptr out = out_0; if (slice_axis == 0) { - auto order = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {1, 0, 2}); - out = std::make_shared(out_0, order); + auto order = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {1, 0, 2}); + out = std::make_shared(out_0, order); } ngraph::NodeVector outputs = {out, out_1}; @@ -327,26 +327,26 @@ ngraph::pass::ConvertTensorIteratorToRNNSequence::ConvertTensorIteratorToRNNSequ ngraph::pass::ConvertTensorIteratorToGRUSequence::ConvertTensorIteratorToGRUSequence() { auto tensor_iterator = std::make_shared(ngraph::element::f32, - ngraph::Shape{}, ngraph::pattern::has_class()); + ngraph::Shape{}, ngraph::pattern::has_class()); ngraph::matcher_pass_callback callback = [this](pattern::Matcher &m) { - auto ti = std::dynamic_pointer_cast(m.get_match_root()); + auto ti = std::dynamic_pointer_cast(m.get_match_root()); if (!ti || !m_transformation_callback(ti)) return false; // create pattern - auto data = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1}); - auto axis_squeeze = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 0); - auto input_data = std::make_shared(data, axis_squeeze); + auto data = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1, 1}); + auto axis_squeeze = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 0); + auto input_data = std::make_shared(data, axis_squeeze); - auto input_H_state = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1}); - auto input_W = std::make_shared(ngraph::element::f32, ngraph::Shape{3, 1}); - auto input_R = std::make_shared(ngraph::element::f32, ngraph::Shape{3, 1}); - auto input_B = std::make_shared(ngraph::element::f32, ngraph::Shape{3}); + auto input_H_state = std::make_shared(ngraph::element::f32, ngraph::Shape{1, 1}); + auto input_W = std::make_shared(ngraph::element::f32, ngraph::Shape{3, 1}); + auto input_R = std::make_shared(ngraph::element::f32, ngraph::Shape{3, 1}); + auto input_B = std::make_shared(ngraph::element::f32, ngraph::Shape{3}); - auto cell = std::make_shared(input_data, input_H_state, input_W, input_R, input_B, 1); + auto cell = std::make_shared(input_data, input_H_state, input_W, input_R, input_B, 1); - auto axis_unsqueeze = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 0); - auto unsqueeze = std::make_shared(cell, axis_unsqueeze); + auto axis_unsqueeze = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 0); + auto unsqueeze = std::make_shared(cell, axis_unsqueeze); ngraph::pattern::Matcher matcher(unsqueeze); bool match = false; @@ -364,7 +364,7 @@ ngraph::pass::ConvertTensorIteratorToGRUSequence::ConvertTensorIteratorToGRUSequ auto pattern_map = matcher.get_pattern_map(); auto params = func->get_parameters(); - std::vector> ordered_in_descs(3); + std::vector> ordered_in_descs(3); int64_t stride = 0, slice_axis = 0; size_t batch_size = 0; for (const auto& input_desc : ti->get_input_descriptions()) { @@ -375,7 +375,7 @@ ngraph::pass::ConvertTensorIteratorToGRUSequence::ConvertTensorIteratorToGRUSequ return false; } auto slice_input - = std::dynamic_pointer_cast(input_desc); + = std::dynamic_pointer_cast(input_desc); if (!slice_input) return false; @@ -393,15 +393,15 @@ ngraph::pass::ConvertTensorIteratorToGRUSequence::ConvertTensorIteratorToGRUSequ } } - auto seq_lengths = ngraph::opset4::Constant::create(element::i32, Shape{batch_size}, {ti->get_num_iterations()}); + auto seq_lengths = ngraph::opset5::Constant::create(element::i32, Shape{batch_size}, {ti->get_num_iterations()}); auto results = func->get_results(); - std::vector> ordered_out_descs(2); + std::vector> ordered_out_descs(2); for (const auto& output_desc : ti->get_output_descriptions()) { - std::shared_ptr res = results[output_desc->m_body_value_index]; + std::shared_ptr res = results[output_desc->m_body_value_index]; if (res->get_input_source_output(0) == pattern_map[unsqueeze]) { auto concat_output - = std::dynamic_pointer_cast(output_desc); + = std::dynamic_pointer_cast(output_desc); if (!concat_output) return false; @@ -414,22 +414,22 @@ ngraph::pass::ConvertTensorIteratorToGRUSequence::ConvertTensorIteratorToGRUSequ } } - const auto& rnn_cell = std::dynamic_pointer_cast(pattern_map[cell]); + const auto& rnn_cell = std::dynamic_pointer_cast(pattern_map[cell]); auto in_0 = ti->input_values()[ordered_in_descs[0]->m_input_index]; if (slice_axis == 0) { - auto order = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {1, 0, 2}); - in_0 = std::make_shared(ti->input_values()[ordered_in_descs[0]->m_input_index], order); + auto order = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {1, 0, 2}); + in_0 = std::make_shared(ti->input_values()[ordered_in_descs[0]->m_input_index], order); } - auto axis_1 = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto in_1 = std::make_shared(ti->input_values()[ordered_in_descs[1]->m_input_index], axis_1); + auto axis_1 = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto in_1 = std::make_shared(ti->input_values()[ordered_in_descs[1]->m_input_index], axis_1); - auto axis_2 = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); - auto in_3 = std::make_shared(pattern_map[input_W]->output(0).get_node_shared_ptr(), axis_2); - auto in_4 = std::make_shared(pattern_map[input_R]->output(0).get_node_shared_ptr(), axis_2); - auto in_5 = std::make_shared(pattern_map[input_B]->output(0).get_node_shared_ptr(), axis_2); - auto sequence = std::make_shared( + auto axis_2 = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); + auto in_3 = std::make_shared(pattern_map[input_W]->output(0).get_node_shared_ptr(), axis_2); + auto in_4 = std::make_shared(pattern_map[input_R]->output(0).get_node_shared_ptr(), axis_2); + auto in_5 = std::make_shared(pattern_map[input_B]->output(0).get_node_shared_ptr(), axis_2); + auto sequence = std::make_shared( in_0, in_1, seq_lengths, @@ -444,14 +444,14 @@ ngraph::pass::ConvertTensorIteratorToGRUSequence::ConvertTensorIteratorToGRUSequ rnn_cell->get_clip(), rnn_cell->get_linear_before_reset()); - auto axis_out = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto out_0 = std::make_shared(sequence->output(0), axis_out); - auto out_1 = std::make_shared(sequence->output(1), axis_out); + auto axis_out = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto out_0 = std::make_shared(sequence->output(0), axis_out); + auto out_1 = std::make_shared(sequence->output(1), axis_out); std::shared_ptr out = out_0; if (slice_axis == 0) { - auto order = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {1, 0, 2}); - out = std::make_shared(out_0, order); + auto order = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {1, 0, 2}); + out = std::make_shared(out_0, order); } ngraph::NodeVector outputs = {out, out_1}; diff --git a/inference-engine/tests/functional/inference_engine/transformations/convert_sequences_to_sequences_ie_test.cpp b/inference-engine/tests/functional/inference_engine/transformations/convert_sequences_to_sequences_ie_test.cpp index a8555d5..71d5ea5 100644 --- a/inference-engine/tests/functional/inference_engine/transformations/convert_sequences_to_sequences_ie_test.cpp +++ b/inference-engine/tests/functional/inference_engine/transformations/convert_sequences_to_sequences_ie_test.cpp @@ -11,7 +11,7 @@ #include #include -#include +#include #include #include #include @@ -25,7 +25,7 @@ using namespace testing; TEST(TransformationTests, GRUSequenceConversionTest) { std::shared_ptr f(nullptr), f_ref(nullptr); - std::shared_ptr sequence; + std::shared_ptr sequence; const size_t batch_size = 2; const size_t input_size = 3; @@ -33,21 +33,21 @@ TEST(TransformationTests, GRUSequenceConversionTest) { const size_t gates_count = 3; const size_t num_directions = 1; { - const auto X = std::make_shared(ngraph::element::f32, + const auto X = std::make_shared(ngraph::element::f32, ngraph::Shape{batch_size, 1, input_size}); const auto W = - std::make_shared(ngraph::element::f32, + std::make_shared(ngraph::element::f32, ngraph::Shape{num_directions, gates_count * hidden_size, input_size}); const auto R = - std::make_shared(ngraph::element::f32, + std::make_shared(ngraph::element::f32, ngraph::Shape{num_directions, gates_count * hidden_size, hidden_size}); - const auto H_t = std::make_shared(ngraph::element::f32, + const auto H_t = std::make_shared(ngraph::element::f32, ngraph::Shape{batch_size, num_directions, hidden_size}); - const auto B = std::make_shared(ngraph::element::f32, + const auto B = std::make_shared(ngraph::element::f32, ngraph::Shape{num_directions, gates_count * hidden_size}); - const auto seq_len = std::make_shared(ngraph::element::i32, ngraph::Shape{batch_size}); - sequence = std::make_shared(X, H_t, seq_len, W, R, B, hidden_size, + const auto seq_len = std::make_shared(ngraph::element::i32, ngraph::Shape{batch_size}); + sequence = std::make_shared(X, H_t, seq_len, W, R, B, hidden_size, ngraph::op::RecurrentSequenceDirection::FORWARD); sequence->set_friendly_name("test_sequence"); @@ -60,26 +60,26 @@ TEST(TransformationTests, GRUSequenceConversionTest) { } { - const auto X = std::make_shared(ngraph::element::f32, + const auto X = std::make_shared(ngraph::element::f32, ngraph::Shape{batch_size, 1, input_size}); const auto W = - std::make_shared(ngraph::element::f32, + std::make_shared(ngraph::element::f32, ngraph::Shape{num_directions, gates_count * hidden_size, input_size}); const auto R = - std::make_shared(ngraph::element::f32, + std::make_shared(ngraph::element::f32, ngraph::Shape{num_directions, gates_count * hidden_size, hidden_size}); - const auto H_t = std::make_shared(ngraph::element::f32, + const auto H_t = std::make_shared(ngraph::element::f32, ngraph::Shape{batch_size, num_directions, hidden_size}); - const auto B = std::make_shared(ngraph::element::f32, + const auto B = std::make_shared(ngraph::element::f32, ngraph::Shape{num_directions, gates_count * hidden_size}); - const auto seq_len = std::make_shared(ngraph::element::i32, ngraph::Shape{batch_size}, 1); - auto axis_1 = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto in_1 = std::make_shared(H_t, axis_1); - auto concat = std::make_shared(ngraph::NodeVector({W, R}), 2); - auto axis_2 = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); - auto in_3 = std::make_shared(concat->output(0), axis_2); - auto in_4 = std::make_shared(B, axis_2); + const auto seq_len = std::make_shared(ngraph::element::i32, ngraph::Shape{batch_size}, 1); + auto axis_1 = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto in_1 = std::make_shared(H_t, axis_1); + auto concat = std::make_shared(ngraph::NodeVector({W, R}), 2); + auto axis_2 = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); + auto in_3 = std::make_shared(concat->output(0), axis_2); + auto in_4 = std::make_shared(B, axis_2); auto sequence_ie = std::make_shared(X, in_1, seq_len, // this input is not supported @@ -94,9 +94,9 @@ TEST(TransformationTests, GRUSequenceConversionTest) { sequence->get_linear_before_reset()); sequence_ie->set_friendly_name("test_sequence"); - auto unsqueeze_axis = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto unsqueeze_1 = std::make_shared(sequence_ie->output(0), unsqueeze_axis); - auto unsqueeze_2 = std::make_shared(sequence_ie->output(1), unsqueeze_axis); + auto unsqueeze_axis = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto unsqueeze_1 = std::make_shared(sequence_ie->output(0), unsqueeze_axis); + auto unsqueeze_2 = std::make_shared(sequence_ie->output(1), unsqueeze_axis); f_ref = std::make_shared(ngraph::NodeVector{unsqueeze_1}, ngraph::ParameterVector{X, H_t}); } auto res = compare_functions(f, f_ref); @@ -112,16 +112,16 @@ TEST(TransformationTests, RNNSequenceConversionTest) { const size_t num_directions = 1; const size_t batch_size = 2; std::shared_ptr f(nullptr), f_ref(nullptr); - std::shared_ptr sequence; + std::shared_ptr sequence; { - auto X = std::make_shared(ngraph::element::f32, ngraph::Shape{batch_size, 1, 3}); - auto H = std::make_shared(ngraph::element::f32, ngraph::Shape{batch_size, num_directions, 3}); - auto W = std::make_shared(ngraph::element::f32, ngraph::Shape{num_directions, 3, 3}); - auto R = std::make_shared(ngraph::element::f32, ngraph::Shape{num_directions, 3, 3}); - auto B = std::make_shared(ngraph::element::f32, ngraph::Shape{num_directions, 3}); - auto seq_len = std::make_shared(ngraph::element::f32, ngraph::Shape{2}); - sequence = std::make_shared(X, H, seq_len, W, R, B, hidden_size, + auto X = std::make_shared(ngraph::element::f32, ngraph::Shape{batch_size, 1, 3}); + auto H = std::make_shared(ngraph::element::f32, ngraph::Shape{batch_size, num_directions, 3}); + auto W = std::make_shared(ngraph::element::f32, ngraph::Shape{num_directions, 3, 3}); + auto R = std::make_shared(ngraph::element::f32, ngraph::Shape{num_directions, 3, 3}); + auto B = std::make_shared(ngraph::element::f32, ngraph::Shape{num_directions, 3}); + auto seq_len = std::make_shared(ngraph::element::f32, ngraph::Shape{2}); + sequence = std::make_shared(X, H, seq_len, W, R, B, hidden_size, ngraph::op::RecurrentSequenceDirection::FORWARD); sequence->set_friendly_name("test_sequence"); @@ -134,18 +134,18 @@ TEST(TransformationTests, RNNSequenceConversionTest) { } { - auto X = std::make_shared(ngraph::element::f32, ngraph::Shape{batch_size, 1, 3}); - auto H = std::make_shared(ngraph::element::f32, ngraph::Shape{batch_size, num_directions, 3}); - auto W = std::make_shared(ngraph::element::f32, ngraph::Shape{num_directions, 3, 3}); - auto R = std::make_shared(ngraph::element::f32, ngraph::Shape{num_directions, 3, 3}); - auto B = std::make_shared(ngraph::element::f32, ngraph::Shape{num_directions, 3}); - auto seq_len = std::make_shared(ngraph::element::f32, ngraph::Shape{batch_size}, 1); - auto axis_1 = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto in_1 = std::make_shared(H, axis_1); - auto concat = std::make_shared(ngraph::NodeVector({W, R}), 2); - auto axis_2 = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); - auto in_3 = std::make_shared(concat->output(0), axis_2); - auto in_4 = std::make_shared(B, axis_2); + auto X = std::make_shared(ngraph::element::f32, ngraph::Shape{batch_size, 1, 3}); + auto H = std::make_shared(ngraph::element::f32, ngraph::Shape{batch_size, num_directions, 3}); + auto W = std::make_shared(ngraph::element::f32, ngraph::Shape{num_directions, 3, 3}); + auto R = std::make_shared(ngraph::element::f32, ngraph::Shape{num_directions, 3, 3}); + auto B = std::make_shared(ngraph::element::f32, ngraph::Shape{num_directions, 3}); + auto seq_len = std::make_shared(ngraph::element::f32, ngraph::Shape{batch_size}, 1); + auto axis_1 = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto in_1 = std::make_shared(H, axis_1); + auto concat = std::make_shared(ngraph::NodeVector({W, R}), 2); + auto axis_2 = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); + auto in_3 = std::make_shared(concat->output(0), axis_2); + auto in_4 = std::make_shared(B, axis_2); auto sequence_ie = std::make_shared(X, in_1, seq_len, @@ -158,9 +158,9 @@ TEST(TransformationTests, RNNSequenceConversionTest) { sequence->get_activations_beta(), sequence->get_clip()); - auto unsqueeze_axis = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto unsqueeze_1 = std::make_shared(sequence_ie->output(0), unsqueeze_axis); - auto unsqueeze_2 = std::make_shared(sequence_ie->output(1), unsqueeze_axis); + auto unsqueeze_axis = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto unsqueeze_1 = std::make_shared(sequence_ie->output(0), unsqueeze_axis); + auto unsqueeze_2 = std::make_shared(sequence_ie->output(1), unsqueeze_axis); sequence_ie->set_friendly_name("test_sequence"); f_ref = std::make_shared(ngraph::NodeVector{unsqueeze_1}, ngraph::ParameterVector{X, H}); } @@ -180,28 +180,28 @@ TEST(TransformationTests, LSTMSequenceConversionTest) { const size_t gates_count = 4; const size_t num_directions = 1; std::shared_ptr f(nullptr), f_ref(nullptr); - std::shared_ptr sequence; + std::shared_ptr sequence; { - const auto X = std::make_shared(ngraph::element::f32, + const auto X = std::make_shared(ngraph::element::f32, ngraph::Shape{batch_size, 10, input_size}); const auto W = - std::make_shared(ngraph::element::f32, + std::make_shared(ngraph::element::f32, ngraph::Shape{num_directions, gates_count * hidden_size, input_size}); const auto R = - std::make_shared(ngraph::element::f32, + std::make_shared(ngraph::element::f32, ngraph::Shape{num_directions, gates_count * hidden_size, hidden_size}); - const auto H_t = std::make_shared(ngraph::element::f32, + const auto H_t = std::make_shared(ngraph::element::f32, ngraph::Shape{batch_size, num_directions, hidden_size}); - const auto C_t = std::make_shared(ngraph::element::f32, + const auto C_t = std::make_shared(ngraph::element::f32, ngraph::Shape{batch_size, num_directions, hidden_size}); - const auto B = std::make_shared(ngraph::element::f32, + const auto B = std::make_shared(ngraph::element::f32, ngraph::Shape{num_directions, gates_count * hidden_size}); - const auto seq_len = std::make_shared(ngraph::element::i32, ngraph::Shape{batch_size}); - sequence = std::make_shared(X, H_t, C_t, seq_len, W, R, B, hidden_size, + const auto seq_len = std::make_shared(ngraph::element::i32, ngraph::Shape{batch_size}); + sequence = std::make_shared(X, H_t, C_t, seq_len, W, R, B, hidden_size, ngraph::op::RecurrentSequenceDirection::FORWARD); sequence->set_friendly_name("test_sequence"); @@ -214,33 +214,33 @@ TEST(TransformationTests, LSTMSequenceConversionTest) { } { - const auto X = std::make_shared(ngraph::element::f32, + const auto X = std::make_shared(ngraph::element::f32, ngraph::Shape{batch_size, 10, input_size}); const auto W = - std::make_shared(ngraph::element::f32, + std::make_shared(ngraph::element::f32, ngraph::Shape{num_directions, gates_count * hidden_size, input_size}); const auto R = - std::make_shared(ngraph::element::f32, + std::make_shared(ngraph::element::f32, ngraph::Shape{num_directions, gates_count * hidden_size, hidden_size}); - const auto H_t = std::make_shared(ngraph::element::f32, + const auto H_t = std::make_shared(ngraph::element::f32, ngraph::Shape{batch_size, num_directions, hidden_size}); - const auto C_t = std::make_shared(ngraph::element::f32, + const auto C_t = std::make_shared(ngraph::element::f32, ngraph::Shape{batch_size, num_directions, hidden_size}); - const auto seq_lenghts = std::make_shared(ngraph::element::f32, + const auto seq_lenghts = std::make_shared(ngraph::element::f32, ngraph::Shape{batch_size}); - const auto B = std::make_shared(ngraph::element::f32, + const auto B = std::make_shared(ngraph::element::f32, ngraph::Shape{num_directions, gates_count * hidden_size}); - // const auto seq_len = std::make_shared(ngraph::element::i32, ngraph::Shape{1}, 1); - auto axis_1 = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto in_1 = std::make_shared(H_t, axis_1); - auto in_2 = std::make_shared(C_t, axis_1); - auto concat = std::make_shared(ngraph::NodeVector({W, R}), 2); - auto axis_2 = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); - auto in_3 = std::make_shared(concat->output(0), axis_2); - auto in_4 = std::make_shared(B, axis_2); + // const auto seq_len = std::make_shared(ngraph::element::i32, ngraph::Shape{1}, 1); + auto axis_1 = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto in_1 = std::make_shared(H_t, axis_1); + auto in_2 = std::make_shared(C_t, axis_1); + auto concat = std::make_shared(ngraph::NodeVector({W, R}), 2); + auto axis_2 = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); + auto in_3 = std::make_shared(concat->output(0), axis_2); + auto in_4 = std::make_shared(B, axis_2); auto sequence_ie = std::make_shared(X, in_1, in_2, @@ -254,10 +254,10 @@ TEST(TransformationTests, LSTMSequenceConversionTest) { sequence->get_activations_beta(), sequence->get_clip()); sequence_ie->set_friendly_name("test_sequence"); - auto unsqueeze_axis = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto unsqueeze_1 = std::make_shared(sequence_ie->output(0), unsqueeze_axis); - auto unsqueeze_2 = std::make_shared(sequence_ie->output(1), unsqueeze_axis); - auto unsqueeze_3 = std::make_shared(sequence_ie->output(2), unsqueeze_axis); + auto unsqueeze_axis = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto unsqueeze_1 = std::make_shared(sequence_ie->output(0), unsqueeze_axis); + auto unsqueeze_2 = std::make_shared(sequence_ie->output(1), unsqueeze_axis); + auto unsqueeze_3 = std::make_shared(sequence_ie->output(2), unsqueeze_axis); f_ref = std::make_shared(ngraph::NodeVector{unsqueeze_1}, ngraph::ParameterVector{X, H_t, C_t}); } diff --git a/inference-engine/tests/functional/inference_engine/transformations/convert_ti_to_sequences_test.cpp b/inference-engine/tests/functional/inference_engine/transformations/convert_ti_to_sequences_test.cpp index f21776f..65e24b3 100644 --- a/inference-engine/tests/functional/inference_engine/transformations/convert_ti_to_sequences_test.cpp +++ b/inference-engine/tests/functional/inference_engine/transformations/convert_ti_to_sequences_test.cpp @@ -11,7 +11,7 @@ #include #include -#include +#include #include #include #include @@ -25,34 +25,34 @@ using namespace ngraph; TEST(TransformationTests, ConvertTensorIteratorToLSTMSequence) { std::shared_ptr f(nullptr), f_ref(nullptr); { - auto X = std::make_shared(element::f32, Shape{1, 2, 16}); - auto Y = std::make_shared(element::f32, Shape{1, 128}); - auto Z = std::make_shared(element::f32, Shape{1, 128}); + auto X = std::make_shared(element::f32, Shape{1, 2, 16}); + auto Y = std::make_shared(element::f32, Shape{1, 128}); + auto Z = std::make_shared(element::f32, Shape{1, 128}); - auto Xi = std::make_shared(element::f32, Shape{1, 1, 16}); - auto Yi = std::make_shared(element::f32, Shape{1, 128}); - auto Zi = std::make_shared(element::f32, Shape{1, 128}); + auto Xi = std::make_shared(element::f32, Shape{1, 1, 16}); + auto Yi = std::make_shared(element::f32, Shape{1, 128}); + auto Zi = std::make_shared(element::f32, Shape{1, 128}); // Body - auto axis = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto squeeze = std::make_shared(Xi, axis); + auto axis = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto squeeze = std::make_shared(Xi, axis); auto w_val = std::vector(512 * 16, 0); auto r_val = std::vector(512 * 128, 0); auto b_val = std::vector(512, 0); - auto W = ngraph::opset4::Constant::create(ngraph::element::f32, ngraph::Shape{512, 16}, w_val); - auto R = ngraph::opset4::Constant::create(ngraph::element::f32, ngraph::Shape{512, 128}, r_val); - auto B = ngraph::opset4::Constant::create(ngraph::element::f32, ngraph::Shape{512}, b_val); - - auto lstm_cell = std::make_shared(squeeze, Yi, Zi, W, R, B, 128); - auto res_1 = std::make_shared(lstm_cell); - auto axis_unsqueeze = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto unsqueeze = std::make_shared(lstm_cell, axis_unsqueeze); - auto res_2 = std::make_shared(unsqueeze); + auto W = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{512, 16}, w_val); + auto R = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{512, 128}, r_val); + auto B = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{512}, b_val); + + auto lstm_cell = std::make_shared(squeeze, Yi, Zi, W, R, B, 128); + auto res_1 = std::make_shared(lstm_cell); + auto axis_unsqueeze = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto unsqueeze = std::make_shared(lstm_cell, axis_unsqueeze); + auto res_2 = std::make_shared(unsqueeze); auto body = std::make_shared(OutputVector{res_1, res_2}, ParameterVector{Xi, Yi, Zi}); - auto tensor_iterator = std::make_shared(); + auto tensor_iterator = std::make_shared(); tensor_iterator->set_body(body); tensor_iterator->set_invariant_input(Zi, Z); @@ -62,8 +62,8 @@ TEST(TransformationTests, ConvertTensorIteratorToLSTMSequence) { auto out0 = tensor_iterator->get_iter_value(res_1, -1); auto out1 = tensor_iterator->get_concatenated_slices(res_2, 0, 1, 1, -1, 1); - auto res_ti_1 = std::make_shared(tensor_iterator->output(1)); - //auto res_ti_2 = std::make_shared(tensor_iterator->output(0)); + auto res_ti_1 = std::make_shared(tensor_iterator->output(1)); + //auto res_ti_2 = std::make_shared(tensor_iterator->output(0)); f = std::make_shared(ngraph::NodeVector{res_ti_1}, ngraph::ParameterVector{X, Y, Z}); @@ -76,34 +76,34 @@ TEST(TransformationTests, ConvertTensorIteratorToLSTMSequence) { } { - auto X = std::make_shared(element::f32, Shape{1, 2, 16}); - auto Y = std::make_shared(element::f32, Shape{1, 128}); - auto Z = std::make_shared(element::f32, Shape{1, 128}); + auto X = std::make_shared(element::f32, Shape{1, 2, 16}); + auto Y = std::make_shared(element::f32, Shape{1, 128}); + auto Z = std::make_shared(element::f32, Shape{1, 128}); auto w_val = std::vector(512 * 16, 0); auto r_val = std::vector(512 * 128, 0); auto b_val = std::vector(512, 0); - auto W = ngraph::opset4::Constant::create(ngraph::element::f32, ngraph::Shape{512, 16}, w_val); - auto R = ngraph::opset4::Constant::create(ngraph::element::f32, ngraph::Shape{512, 128}, r_val); - auto B = ngraph::opset4::Constant::create(ngraph::element::f32, ngraph::Shape{512}, b_val); + auto W = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{512, 16}, w_val); + auto R = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{512, 128}, r_val); + auto B = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{512}, b_val); - auto axis_1 = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto in_1 = std::make_shared(Y, axis_1); - auto in_2 = std::make_shared(Z, axis_1); + auto axis_1 = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto in_1 = std::make_shared(Y, axis_1); + auto in_2 = std::make_shared(Z, axis_1); - auto axis_2 = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); - auto in_4 = std::make_shared(W, axis_2); - auto in_5 = std::make_shared(R, axis_2); - auto in_6 = std::make_shared(B, axis_2); + auto axis_2 = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); + auto in_4 = std::make_shared(W, axis_2); + auto in_5 = std::make_shared(R, axis_2); + auto in_6 = std::make_shared(B, axis_2); - auto seq_lengths = ngraph::opset4::Constant::create(element::i32, Shape{1}, {2}); - auto lstm_seq = std::make_shared(X, in_1, in_2, seq_lengths, in_4, in_5, in_6, + auto seq_lengths = ngraph::opset5::Constant::create(element::i32, Shape{1}, {2}); + auto lstm_seq = std::make_shared(X, in_1, in_2, seq_lengths, in_4, in_5, in_6, 128, ngraph::op::RecurrentSequenceDirection::FORWARD); - auto axis_out = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto out_0 = std::make_shared(lstm_seq->output(0), axis_out); - auto out_1 = std::make_shared(lstm_seq->output(1), axis_out); - auto out_2 = std::make_shared(lstm_seq->output(1), axis_out); - auto res_ti_1 = std::make_shared(out_0); + auto axis_out = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto out_0 = std::make_shared(lstm_seq->output(0), axis_out); + auto out_1 = std::make_shared(lstm_seq->output(1), axis_out); + auto out_2 = std::make_shared(lstm_seq->output(1), axis_out); + auto res_ti_1 = std::make_shared(out_0); f_ref = std::make_shared(ngraph::NodeVector{res_ti_1}, ngraph::ParameterVector{X, Y, Z}); } @@ -114,32 +114,32 @@ TEST(TransformationTests, ConvertTensorIteratorToLSTMSequence) { TEST(TransformationTests, ConvertTensorIteratorToRNNSequence) { std::shared_ptr f(nullptr), f_ref(nullptr); { - auto X = std::make_shared(element::f32, Shape{1, 2, 16}); - auto Y = std::make_shared(element::f32, Shape{1, 128}); + auto X = std::make_shared(element::f32, Shape{1, 2, 16}); + auto Y = std::make_shared(element::f32, Shape{1, 128}); - auto Xi = std::make_shared(element::f32, Shape{1, 1, 16}); - auto Yi = std::make_shared(element::f32, Shape{1, 128}); + auto Xi = std::make_shared(element::f32, Shape{1, 1, 16}); + auto Yi = std::make_shared(element::f32, Shape{1, 128}); // Body - auto axis = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto squeeze = std::make_shared(Xi, axis); + auto axis = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto squeeze = std::make_shared(Xi, axis); auto w_val = std::vector(128 * 16, 0); auto r_val = std::vector(128 * 128, 0); auto b_val = std::vector(128, 0); - auto W = ngraph::opset4::Constant::create(ngraph::element::f32, ngraph::Shape{128, 16}, w_val); - auto R = ngraph::opset4::Constant::create(ngraph::element::f32, ngraph::Shape{128, 128}, r_val); - auto B = ngraph::opset4::Constant::create(ngraph::element::f32, ngraph::Shape{128}, b_val); - - auto rnn_cell = std::make_shared(squeeze, Yi, W, R, B, 128); - auto res_1 = std::make_shared(rnn_cell); - auto axis_unsqueeze = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto unsqueeze = std::make_shared(rnn_cell, axis_unsqueeze); - auto res_2 = std::make_shared(unsqueeze); + auto W = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{128, 16}, w_val); + auto R = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{128, 128}, r_val); + auto B = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{128}, b_val); + + auto rnn_cell = std::make_shared(squeeze, Yi, W, R, B, 128); + auto res_1 = std::make_shared(rnn_cell); + auto axis_unsqueeze = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto unsqueeze = std::make_shared(rnn_cell, axis_unsqueeze); + auto res_2 = std::make_shared(unsqueeze); auto body = std::make_shared(OutputVector{res_1, res_2}, ParameterVector{Xi, Yi}); - auto tensor_iterator = std::make_shared(); + auto tensor_iterator = std::make_shared(); tensor_iterator->set_body(body); tensor_iterator->set_sliced_input(Xi, X, 0, 1, 1, -1, 1); @@ -148,8 +148,8 @@ TEST(TransformationTests, ConvertTensorIteratorToRNNSequence) { auto out0 = tensor_iterator->get_iter_value(res_1, -1); auto out1 = tensor_iterator->get_concatenated_slices(res_2, 0, 1, 1, -1, 1); - auto res_ti_1 = std::make_shared(tensor_iterator->output(1)); - //auto res_ti_2 = std::make_shared(tensor_iterator->output(0)); + auto res_ti_1 = std::make_shared(tensor_iterator->output(1)); + //auto res_ti_2 = std::make_shared(tensor_iterator->output(0)); f = std::make_shared(ngraph::NodeVector{res_ti_1}, ngraph::ParameterVector{X, Y}); @@ -162,31 +162,31 @@ TEST(TransformationTests, ConvertTensorIteratorToRNNSequence) { } { - auto X = std::make_shared(element::f32, Shape{1, 2, 16}); - auto Y = std::make_shared(element::f32, Shape{1, 128}); + auto X = std::make_shared(element::f32, Shape{1, 2, 16}); + auto Y = std::make_shared(element::f32, Shape{1, 128}); auto w_val = std::vector(128 * 16, 0); auto r_val = std::vector(128 * 128, 0); auto b_val = std::vector(128, 0); - auto W = ngraph::opset4::Constant::create(ngraph::element::f32, ngraph::Shape{128, 16}, w_val); - auto R = ngraph::opset4::Constant::create(ngraph::element::f32, ngraph::Shape{128, 128}, r_val); - auto B = ngraph::opset4::Constant::create(ngraph::element::f32, ngraph::Shape{128}, b_val); + auto W = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{128, 16}, w_val); + auto R = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{128, 128}, r_val); + auto B = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{128}, b_val); - auto axis_1 = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto in_1 = std::make_shared(Y, axis_1); + auto axis_1 = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto in_1 = std::make_shared(Y, axis_1); - auto axis_2 = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); - auto in_3 = std::make_shared(W, axis_2); - auto in_4 = std::make_shared(R, axis_2); - auto in_5 = std::make_shared(B, axis_2); + auto axis_2 = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); + auto in_3 = std::make_shared(W, axis_2); + auto in_4 = std::make_shared(R, axis_2); + auto in_5 = std::make_shared(B, axis_2); - auto seq_lengths = ngraph::opset4::Constant::create(element::i32, Shape{1}, {2}); - auto rnn_sequence = std::make_shared(X, in_1, seq_lengths, in_3, in_4, in_5, + auto seq_lengths = ngraph::opset5::Constant::create(element::i32, Shape{1}, {2}); + auto rnn_sequence = std::make_shared(X, in_1, seq_lengths, in_3, in_4, in_5, 128, ngraph::op::RecurrentSequenceDirection::FORWARD); - auto axis_out = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto out_0 = std::make_shared(rnn_sequence->output(0), axis_out); - auto out_1 = std::make_shared(rnn_sequence->output(1), axis_out); - auto res_ti_1 = std::make_shared(out_0); + auto axis_out = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto out_0 = std::make_shared(rnn_sequence->output(0), axis_out); + auto out_1 = std::make_shared(rnn_sequence->output(1), axis_out); + auto res_ti_1 = std::make_shared(out_0); f_ref = std::make_shared(ngraph::NodeVector{res_ti_1}, ngraph::ParameterVector{X, Y}); } @@ -197,32 +197,32 @@ TEST(TransformationTests, ConvertTensorIteratorToRNNSequence) { TEST(TransformationTests, ConvertTensorIteratorToGRUSequence) { std::shared_ptr f(nullptr), f_ref(nullptr); { - auto X = std::make_shared(element::f32, Shape{1, 2, 16}); - auto Y = std::make_shared(element::f32, Shape{1, 128}); + auto X = std::make_shared(element::f32, Shape{1, 2, 16}); + auto Y = std::make_shared(element::f32, Shape{1, 128}); - auto Xi = std::make_shared(element::f32, Shape{1, 1, 16}); - auto Yi = std::make_shared(element::f32, Shape{1, 128}); + auto Xi = std::make_shared(element::f32, Shape{1, 1, 16}); + auto Yi = std::make_shared(element::f32, Shape{1, 128}); // Body - auto axis = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto squeeze = std::make_shared(Xi, axis); + auto axis = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto squeeze = std::make_shared(Xi, axis); auto w_val = std::vector(384 * 16, 0); auto r_val = std::vector(384 * 128, 0); auto b_val = std::vector(384, 0); - auto W = ngraph::opset4::Constant::create(ngraph::element::f32, ngraph::Shape{384, 16}, w_val); - auto R = ngraph::opset4::Constant::create(ngraph::element::f32, ngraph::Shape{384, 128}, r_val); - auto B = ngraph::opset4::Constant::create(ngraph::element::f32, ngraph::Shape{384}, b_val); - - auto gru_cell = std::make_shared(squeeze, Yi, W, R, B, 128); - auto res_1 = std::make_shared(gru_cell); - auto axis_unsqueeze = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto unsqueeze = std::make_shared(gru_cell, axis_unsqueeze); - auto res_2 = std::make_shared(unsqueeze); + auto W = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{384, 16}, w_val); + auto R = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{384, 128}, r_val); + auto B = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{384}, b_val); + + auto gru_cell = std::make_shared(squeeze, Yi, W, R, B, 128); + auto res_1 = std::make_shared(gru_cell); + auto axis_unsqueeze = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto unsqueeze = std::make_shared(gru_cell, axis_unsqueeze); + auto res_2 = std::make_shared(unsqueeze); auto body = std::make_shared(OutputVector{res_1, res_2}, ParameterVector{Xi, Yi}); - auto tensor_iterator = std::make_shared(); + auto tensor_iterator = std::make_shared(); tensor_iterator->set_body(body); tensor_iterator->set_sliced_input(Xi, X, 0, 1, 1, -1, 1); @@ -231,8 +231,8 @@ TEST(TransformationTests, ConvertTensorIteratorToGRUSequence) { auto out0 = tensor_iterator->get_iter_value(res_1, -1); auto out1 = tensor_iterator->get_concatenated_slices(res_2, 0, 1, 1, -1, 1); - auto res_ti_1 = std::make_shared(tensor_iterator->output(1)); - //auto res_tRNNCelli_2 = std::make_shared(tensor_iterator->output(0)); + auto res_ti_1 = std::make_shared(tensor_iterator->output(1)); + //auto res_tRNNCelli_2 = std::make_shared(tensor_iterator->output(0)); f = std::make_shared(ngraph::NodeVector{res_ti_1}, ngraph::ParameterVector{X, Y}); @@ -245,31 +245,31 @@ TEST(TransformationTests, ConvertTensorIteratorToGRUSequence) { } { - auto X = std::make_shared(element::f32, Shape{1, 2, 16}); - auto Y = std::make_shared(element::f32, Shape{1, 128}); + auto X = std::make_shared(element::f32, Shape{1, 2, 16}); + auto Y = std::make_shared(element::f32, Shape{1, 128}); auto w_val = std::vector(384 * 16, 0); auto r_val = std::vector(384 * 128, 0); auto b_val = std::vector(384, 0); - auto W = ngraph::opset4::Constant::create(ngraph::element::f32, ngraph::Shape{384, 16}, w_val); - auto R = ngraph::opset4::Constant::create(ngraph::element::f32, ngraph::Shape{384, 128}, r_val); - auto B = ngraph::opset4::Constant::create(ngraph::element::f32, ngraph::Shape{384}, b_val); + auto W = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{384, 16}, w_val); + auto R = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{384, 128}, r_val); + auto B = ngraph::opset5::Constant::create(ngraph::element::f32, ngraph::Shape{384}, b_val); - auto axis_1 = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto in_1 = std::make_shared(Y, axis_1); + auto axis_1 = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto in_1 = std::make_shared(Y, axis_1); - auto axis_2 = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); - auto in_3 = std::make_shared(W, axis_2); - auto in_4 = std::make_shared(R, axis_2); - auto in_5 = std::make_shared(B, axis_2); + auto axis_2 = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {0}); + auto in_3 = std::make_shared(W, axis_2); + auto in_4 = std::make_shared(R, axis_2); + auto in_5 = std::make_shared(B, axis_2); - auto seq_lengths = ngraph::opset4::Constant::create(element::i32, Shape{1}, {2}); - auto gru_sequence = std::make_shared(X, in_1, seq_lengths, in_3, in_4, in_5, + auto seq_lengths = ngraph::opset5::Constant::create(element::i32, Shape{1}, {2}); + auto gru_sequence = std::make_shared(X, in_1, seq_lengths, in_3, in_4, in_5, 128, ngraph::op::RecurrentSequenceDirection::FORWARD); - auto axis_out = ngraph::opset4::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); - auto out_0 = std::make_shared(gru_sequence->output(0), axis_out); - auto out_1 = std::make_shared(gru_sequence->output(1), axis_out); - auto res_ti_1 = std::make_shared(out_0); + auto axis_out = ngraph::opset5::Constant::create(ngraph::element::i64, ngraph::Shape{1}, {1}); + auto out_0 = std::make_shared(gru_sequence->output(0), axis_out); + auto out_1 = std::make_shared(gru_sequence->output(1), axis_out); + auto res_ti_1 = std::make_shared(out_0); f_ref = std::make_shared(ngraph::NodeVector{res_ti_1}, ngraph::ParameterVector{X, Y}); } diff --git a/inference-engine/tests/ngraph_functions/include/ngraph_functions/builders.hpp b/inference-engine/tests/ngraph_functions/include/ngraph_functions/builders.hpp index 22cf1da..302f3c1 100644 --- a/inference-engine/tests/ngraph_functions/include/ngraph_functions/builders.hpp +++ b/inference-engine/tests/ngraph_functions/include/ngraph_functions/builders.hpp @@ -11,6 +11,7 @@ #include #include #include +#include #include "ngraph_functions/utils/data_utils.hpp" diff --git a/inference-engine/tests/ngraph_functions/src/gru_cell.cpp b/inference-engine/tests/ngraph_functions/src/gru_cell.cpp index 784daf2..364f0cd 100644 --- a/inference-engine/tests/ngraph_functions/src/gru_cell.cpp +++ b/inference-engine/tests/ngraph_functions/src/gru_cell.cpp @@ -31,7 +31,7 @@ std::shared_ptr makeGRU(const OutputVector& in, } else { std::vector lenghts(in[0].get_shape()[0], in[0].get_shape()[1]); auto seq_lenghts = ngraph::builder::makeConstant(in[0].get_element_type(), constants[3], lenghts, false); - return std::make_shared(in[0], in[1], seq_lenghts, W, R, B, hidden_size, direction, + return std::make_shared(in[0], in[1], seq_lenghts, W, R, B, hidden_size, direction, activations, activations_alpha, activations_beta, clip, linear_before_reset); } } diff --git a/inference-engine/tests/ngraph_functions/src/lstm_cell.cpp b/inference-engine/tests/ngraph_functions/src/lstm_cell.cpp index 944c8b6..8ff4c5a 100644 --- a/inference-engine/tests/ngraph_functions/src/lstm_cell.cpp +++ b/inference-engine/tests/ngraph_functions/src/lstm_cell.cpp @@ -29,7 +29,7 @@ std::shared_ptr makeLSTM(const std::vector>& } else { std::vector lenghts(in[0].get_shape()[0], in[0].get_shape()[1]); auto seq_lenghts = ngraph::builder::makeConstant(in[0].get_element_type(), constants[3], lenghts, false); - return std::make_shared(in[0], in[1], in[2], seq_lenghts, W, R, B, hidden_size, direction, + return std::make_shared(in[0], in[1], in[2], seq_lenghts, W, R, B, hidden_size, direction, activations_alpha, activations_beta, activations, clip); } } diff --git a/inference-engine/tests/ngraph_functions/src/rnn_cell.cpp b/inference-engine/tests/ngraph_functions/src/rnn_cell.cpp index 0d87ec1..5234ef4 100644 --- a/inference-engine/tests/ngraph_functions/src/rnn_cell.cpp +++ b/inference-engine/tests/ngraph_functions/src/rnn_cell.cpp @@ -29,7 +29,7 @@ std::shared_ptr makeRNN(const OutputVector& in, } else { std::vector lenghts(in[0].get_shape()[0], in[0].get_shape()[1]); auto seq_lenghts = ngraph::builder::makeConstant(in[0].get_element_type(), constants[3], lenghts, false); - return std::make_shared(in[0], in[1], seq_lenghts, W, R, B, hidden_size, direction, + return std::make_shared(in[0], in[1], seq_lenghts, W, R, B, hidden_size, direction, activations, activations_alpha, activations_beta, clip); } } diff --git a/ngraph/core/include/ngraph/opsets/opset.hpp b/ngraph/core/include/ngraph/opsets/opset.hpp index decb769..0b3b585 100644 --- a/ngraph/core/include/ngraph/opsets/opset.hpp +++ b/ngraph/core/include/ngraph/opsets/opset.hpp @@ -132,4 +132,5 @@ namespace ngraph const NGRAPH_API OpSet& get_opset2(); const NGRAPH_API OpSet& get_opset3(); const NGRAPH_API OpSet& get_opset4(); + const NGRAPH_API OpSet& get_opset5(); } diff --git a/ngraph/core/include/ngraph/opsets/opset5.hpp b/ngraph/core/include/ngraph/opsets/opset5.hpp new file mode 100644 index 0000000..73d6394 --- /dev/null +++ b/ngraph/core/include/ngraph/opsets/opset5.hpp @@ -0,0 +1,29 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#pragma once + +#include "ngraph/ops.hpp" + +namespace ngraph +{ + namespace opset5 + { +#define NGRAPH_OP(a, b) using b::a; +#include "ngraph/opsets/opset5_tbl.hpp" +#undef NGRAPH_OP + } +} diff --git a/ngraph/core/include/ngraph/opsets/opset5_tbl.hpp b/ngraph/core/include/ngraph/opsets/opset5_tbl.hpp new file mode 100644 index 0000000..145cb86 --- /dev/null +++ b/ngraph/core/include/ngraph/opsets/opset5_tbl.hpp @@ -0,0 +1,169 @@ +//***************************************************************************** +// Copyright 2017-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//***************************************************************************** + +#ifndef NGRAPH_OP +#warning "NGRAPH_OP not defined" +#define NGRAPH_OP(x, y) +#endif + +NGRAPH_OP(Abs, ngraph::op::v0) +NGRAPH_OP(Acos, ngraph::op::v0) +NGRAPH_OP(Add, ngraph::op::v1) +NGRAPH_OP(Asin, ngraph::op::v0) +NGRAPH_OP(Atan, ngraph::op::v0) +NGRAPH_OP(AvgPool, ngraph::op::v1) +NGRAPH_OP(BatchNormInference, ngraph::op::v0) +NGRAPH_OP(BinaryConvolution, ngraph::op::v1) +NGRAPH_OP(Broadcast, ngraph::op::v3) +NGRAPH_OP(Bucketize, ngraph::op::v3) +NGRAPH_OP(CTCGreedyDecoder, ngraph::op::v0) +NGRAPH_OP(Ceiling, ngraph::op::v0) +NGRAPH_OP(Clamp, ngraph::op::v0) +NGRAPH_OP(Concat, ngraph::op::v0) +NGRAPH_OP(Constant, ngraph::op) +NGRAPH_OP(Convert, ngraph::op::v0) +NGRAPH_OP(ConvertLike, ngraph::op::v1) +NGRAPH_OP(Convolution, ngraph::op::v1) +NGRAPH_OP(ConvolutionBackpropData, ngraph::op::v1) +NGRAPH_OP(Cos, ngraph::op::v0) +NGRAPH_OP(Cosh, ngraph::op::v0) +NGRAPH_OP(CumSum, ngraph::op::v0) +NGRAPH_OP(DeformableConvolution, ngraph::op::v1) +NGRAPH_OP(DeformablePSROIPooling, ngraph::op::v1) +NGRAPH_OP(DepthToSpace, ngraph::op::v0) +NGRAPH_OP(DetectionOutput, ngraph::op::v0) +NGRAPH_OP(Divide, ngraph::op::v1) +NGRAPH_OP(Elu, ngraph::op::v0) +NGRAPH_OP(Erf, ngraph::op::v0) +NGRAPH_OP(Equal, ngraph::op::v1) +NGRAPH_OP(Exp, ngraph::op::v0) +NGRAPH_OP(ExtractImagePatches, ngraph::op::v3) +NGRAPH_OP(FakeQuantize, ngraph::op::v0) +NGRAPH_OP(Floor, ngraph::op::v0) +NGRAPH_OP(FloorMod, ngraph::op::v1) +NGRAPH_OP(Gather, ngraph::op::v1) +NGRAPH_OP(GatherTree, ngraph::op::v1) +NGRAPH_OP(Greater, ngraph::op::v1) +NGRAPH_OP(GreaterEqual, ngraph::op::v1) +NGRAPH_OP(GroupConvolution, ngraph::op::v1) +NGRAPH_OP(GroupConvolutionBackpropData, ngraph::op::v1) +NGRAPH_OP(GRN, ngraph::op::v0) +NGRAPH_OP(HardSigmoid, ngraph::op::v0) +NGRAPH_OP(Less, ngraph::op::v1) +NGRAPH_OP(LessEqual, ngraph::op::v1) +NGRAPH_OP(Log, ngraph::op::v0) +NGRAPH_OP(LogicalAnd, ngraph::op::v1) +NGRAPH_OP(LogicalNot, ngraph::op::v1) +NGRAPH_OP(LogicalOr, ngraph::op::v1) +NGRAPH_OP(LogicalXor, ngraph::op::v1) +NGRAPH_OP(LRN, ngraph::op::v0) +NGRAPH_OP(LSTMCell, ngraph::op::v4) +NGRAPH_OP(MatMul, ngraph::op::v0) +NGRAPH_OP(MaxPool, ngraph::op::v1) +NGRAPH_OP(Maximum, ngraph::op::v1) +NGRAPH_OP(Minimum, ngraph::op::v1) +NGRAPH_OP(Mod, ngraph::op::v1) +NGRAPH_OP(Multiply, ngraph::op::v1) +NGRAPH_OP(MVN, ngraph::op::v0) +NGRAPH_OP(Negative, ngraph::op::v0) +NGRAPH_OP(NormalizeL2, ngraph::op::v0) +NGRAPH_OP(NotEqual, ngraph::op::v1) +NGRAPH_OP(OneHot, ngraph::op::v1) +NGRAPH_OP(PRelu, ngraph::op::v0) +NGRAPH_OP(PSROIPooling, ngraph::op::v0) +NGRAPH_OP(Pad, ngraph::op::v1) +NGRAPH_OP(Parameter, ngraph::op::v0) +NGRAPH_OP(Power, ngraph::op::v1) +NGRAPH_OP(PriorBox, ngraph::op::v0) +NGRAPH_OP(PriorBoxClustered, ngraph::op::v0) +NGRAPH_OP(Proposal, ngraph::op::v4) +NGRAPH_OP(Range, ngraph::op::v4) +NGRAPH_OP(Relu, ngraph::op::v0) +NGRAPH_OP(ReduceMax, ngraph::op::v1) +NGRAPH_OP(ReduceLogicalAnd, ngraph::op::v1) +NGRAPH_OP(ReduceLogicalOr, ngraph::op::v1) +NGRAPH_OP(ReduceMean, ngraph::op::v1) +NGRAPH_OP(ReduceMin, ngraph::op::v1) +NGRAPH_OP(ReduceProd, ngraph::op::v1) +NGRAPH_OP(ReduceSum, ngraph::op::v1) +NGRAPH_OP(RegionYolo, ngraph::op::v0) +NGRAPH_OP(ReorgYolo, ngraph::op::v0) +NGRAPH_OP(Reshape, ngraph::op::v1) +NGRAPH_OP(Result, ngraph::op::v0) +NGRAPH_OP(ReverseSequence, ngraph::op::v0) +NGRAPH_OP(ROIPooling, ngraph::op::v0) +NGRAPH_OP(ScatterNDUpdate, ngraph::op::v3) +NGRAPH_OP(Select, ngraph::op::v1) +NGRAPH_OP(Selu, ngraph::op::v0) +NGRAPH_OP(Sign, ngraph::op::v0) +NGRAPH_OP(Sigmoid, ngraph::op::v0) +NGRAPH_OP(Sin, ngraph::op::v0) +NGRAPH_OP(Sinh, ngraph::op::v0) +NGRAPH_OP(Softmax, ngraph::op::v1) +NGRAPH_OP(Sqrt, ngraph::op::v0) +NGRAPH_OP(SpaceToDepth, ngraph::op::v0) +NGRAPH_OP(Split, ngraph::op::v1) +NGRAPH_OP(SquaredDifference, ngraph::op::v0) +NGRAPH_OP(Squeeze, ngraph::op::v0) +NGRAPH_OP(StridedSlice, ngraph::op::v1) +NGRAPH_OP(Subtract, ngraph::op::v1) +NGRAPH_OP(Tan, ngraph::op::v0) +NGRAPH_OP(Tanh, ngraph::op::v0) +NGRAPH_OP(TensorIterator, ngraph::op::v0) +NGRAPH_OP(Tile, ngraph::op::v0) +NGRAPH_OP(Transpose, ngraph::op::v1) +NGRAPH_OP(Unsqueeze, ngraph::op::v0) +NGRAPH_OP(VariadicSplit, ngraph::op::v1) + +// New operations added in opset2 +NGRAPH_OP(Gelu, ngraph::op::v0) +NGRAPH_OP(BatchToSpace, ngraph::op::v1) +NGRAPH_OP(SpaceToBatch, ngraph::op::v1) + +// New operations added in opset3 +NGRAPH_OP(EmbeddingBagPackedSum, ngraph::op::v3) +NGRAPH_OP(EmbeddingSegmentsSum, ngraph::op::v3) +NGRAPH_OP(EmbeddingBagOffsetsSum, ngraph::op::v3) +NGRAPH_OP(GRUCell, ngraph::op::v3) +NGRAPH_OP(NonZero, ngraph::op::v3) +NGRAPH_OP(RNNCell, ngraph::op::v0) +NGRAPH_OP(ROIAlign, ngraph::op::v3) +NGRAPH_OP(ScatterElementsUpdate, ngraph::op::v3) +NGRAPH_OP(ScatterUpdate, ngraph::op::v3) +NGRAPH_OP(ShuffleChannels, ngraph::op::v0) +NGRAPH_OP(ShapeOf, ngraph::op::v3) +NGRAPH_OP(Assign, ngraph::op::v3) +NGRAPH_OP(ReadValue, ngraph::op::v3) +NGRAPH_OP(TopK, ngraph::op::v3) + +// New operations added in opset4 +NGRAPH_OP(Acosh, ngraph::op::v3) +NGRAPH_OP(Asinh, ngraph::op::v3) +NGRAPH_OP(Atanh, ngraph::op::v3) +NGRAPH_OP(CTCLoss, ngraph::op::v4) +NGRAPH_OP(HSwish, ngraph::op::v4) +NGRAPH_OP(Interpolate, ngraph::op::v4) +NGRAPH_OP(Mish, ngraph::op::v4) +NGRAPH_OP(NonMaxSuppression, ngraph::op::v4) +NGRAPH_OP(ReduceL1, ngraph::op::v4) +NGRAPH_OP(ReduceL2, ngraph::op::v4) +NGRAPH_OP(SoftPlus, ngraph::op::v4) +NGRAPH_OP(Swish, ngraph::op::v4) + +// New operations added in opset5 +NGRAPH_OP(LSTMSequence, ngraph::op::v5) +NGRAPH_OP(GRUSequence, ngraph::op::v5) +NGRAPH_OP(RNNSequence, ngraph::op::v5) \ No newline at end of file diff --git a/ngraph/core/src/opsets/opset.cpp b/ngraph/core/src/opsets/opset.cpp index 3986d46..56fdf7d 100644 --- a/ngraph/core/src/opsets/opset.cpp +++ b/ngraph/core/src/opsets/opset.cpp @@ -119,3 +119,22 @@ const ngraph::OpSet& ngraph::get_opset4() } return opset; } + +const ngraph::OpSet& ngraph::get_opset5() +{ + static std::mutex init_mutex; + static bool opset_is_initialized = false; + static OpSet opset; + if (!opset_is_initialized) + { + std::lock_guard guard(init_mutex); + if (!opset_is_initialized) + { +#define NGRAPH_OP(NAME, NAMESPACE) opset.insert(); +#include "ngraph/opsets/opset5_tbl.hpp" +#undef NGRAPH_OP + opset_is_initialized = true; + } + } + return opset; +} \ No newline at end of file diff --git a/ngraph/test/attributes.cpp b/ngraph/test/attributes.cpp index f7f600d..a962466 100644 --- a/ngraph/test/attributes.cpp +++ b/ngraph/test/attributes.cpp @@ -21,6 +21,7 @@ #include "ngraph/opsets/opset1.hpp" #include "ngraph/opsets/opset3.hpp" #include "ngraph/opsets/opset4.hpp" +#include "ngraph/opsets/opset5.hpp" #include "util/visitor.hpp" @@ -1099,7 +1100,7 @@ TEST(attributes, lstm_cell_op) TEST(attributes, lstm_sequence_op) { - FactoryRegistry::get().register_factory(); + FactoryRegistry::get().register_factory(); const size_t batch_size = 4; const size_t num_directions = 2; @@ -1126,7 +1127,7 @@ TEST(attributes, lstm_sequence_op) const std::vector activations = {"tanh", "sigmoid", "tanh"}; const float clip_threshold = 0.5f; - const auto lstm_sequence = make_shared(X, + const auto lstm_sequence = make_shared(X, initial_hidden_state, initial_cell_state, sequence_lengths, @@ -1140,7 +1141,7 @@ TEST(attributes, lstm_sequence_op) activations, clip_threshold); NodeBuilder builder(lstm_sequence); - auto g_lstm_sequence = as_type_ptr(builder.create()); + auto g_lstm_sequence = as_type_ptr(builder.create()); EXPECT_EQ(g_lstm_sequence->get_hidden_size(), lstm_sequence->get_hidden_size()); EXPECT_EQ(g_lstm_sequence->get_activations(), lstm_sequence->get_activations()); diff --git a/ngraph/test/type_prop/gru_sequence.cpp b/ngraph/test/type_prop/gru_sequence.cpp index 105d8e3..47cc47f 100644 --- a/ngraph/test/type_prop/gru_sequence.cpp +++ b/ngraph/test/type_prop/gru_sequence.cpp @@ -16,7 +16,7 @@ #include "gtest/gtest.h" #include "ngraph/ngraph.hpp" -#include "ngraph/opsets/opset4.hpp" +#include "ngraph/opsets/opset5.hpp" #include "util/type_prop.hpp" using namespace std; @@ -31,20 +31,20 @@ TEST(type_prop, gru_sequence_forward) const size_t hidden_size = 128; const auto X = - make_shared(element::f32, Shape{batch_size, seq_length, input_size}); - const auto initial_hidden_state = make_shared( + make_shared(element::f32, Shape{batch_size, seq_length, input_size}); + const auto initial_hidden_state = make_shared( element::f32, Shape{batch_size, num_directions, hidden_size}); const auto sequence_lengths = make_shared(element::i32, Shape{batch_size}); - const auto W = make_shared( + const auto W = make_shared( element::f32, Shape{num_directions, 3 * hidden_size, input_size}); - const auto R = make_shared( + const auto R = make_shared( element::f32, Shape{num_directions, 3 * hidden_size, hidden_size}); const auto B = - make_shared(element::f32, Shape{num_directions, 3 * hidden_size}); + make_shared(element::f32, Shape{num_directions, 3 * hidden_size}); const auto direction = op::RecurrentSequenceDirection::FORWARD; - const auto sequence = make_shared( + const auto sequence = make_shared( X, initial_hidden_state, sequence_lengths, W, R, B, hidden_size, direction); EXPECT_EQ(sequence->get_hidden_size(), hidden_size); diff --git a/ngraph/test/type_prop/lstm_sequence.cpp b/ngraph/test/type_prop/lstm_sequence.cpp index 16d628c..756e7d9 100644 --- a/ngraph/test/type_prop/lstm_sequence.cpp +++ b/ngraph/test/type_prop/lstm_sequence.cpp @@ -16,7 +16,7 @@ #include "gtest/gtest.h" #include "ngraph/ngraph.hpp" -#include "ngraph/opsets/opset4.hpp" +#include "ngraph/opsets/opset5.hpp" #include "util/type_prop.hpp" // suppress FusedOp deprecation warnings @@ -41,7 +41,7 @@ struct recurrent_sequence_parameters // // Create and initialize default input test tensors. // -shared_ptr +shared_ptr lstm_seq_tensor_initialization(const recurrent_sequence_parameters& param) { auto batch_size = param.batch_size; @@ -52,20 +52,20 @@ shared_ptr auto et = param.et; const auto X = - make_shared(et, PartialShape{batch_size, seq_length, input_size}); + make_shared(et, PartialShape{batch_size, seq_length, input_size}); const auto initial_hidden_state = - make_shared(et, PartialShape{batch_size, num_directions, hidden_size}); + make_shared(et, PartialShape{batch_size, num_directions, hidden_size}); const auto initial_cell_state = - make_shared(et, PartialShape{batch_size, num_directions, hidden_size}); - const auto sequence_lengths = make_shared(et, PartialShape{batch_size}); - const auto W = make_shared( + make_shared(et, PartialShape{batch_size, num_directions, hidden_size}); + const auto sequence_lengths = make_shared(et, PartialShape{batch_size}); + const auto W = make_shared( et, PartialShape{num_directions, hidden_size * 4, input_size}); - const auto R = make_shared( + const auto R = make_shared( et, PartialShape{num_directions, hidden_size * 4, hidden_size}); const auto B = - make_shared(et, PartialShape{num_directions, hidden_size * 4}); + make_shared(et, PartialShape{num_directions, hidden_size * 4}); - const auto lstm_sequence = make_shared(); + const auto lstm_sequence = make_shared(); lstm_sequence->set_argument(0, X); lstm_sequence->set_argument(1, initial_hidden_state); @@ -87,22 +87,22 @@ TEST(type_prop, lstm_sequence_forward) const size_t hidden_size = 128; const auto X = - make_shared(element::f32, Shape{batch_size, seq_length, input_size}); - const auto initial_hidden_state = make_shared( + make_shared(element::f32, Shape{batch_size, seq_length, input_size}); + const auto initial_hidden_state = make_shared( element::f32, Shape{batch_size, num_directions, hidden_size}); - const auto initial_cell_state = make_shared( + const auto initial_cell_state = make_shared( element::f32, Shape{batch_size, num_directions, hidden_size}); - const auto sequence_lengths = make_shared(element::i32, Shape{batch_size}); - const auto W = make_shared( + const auto sequence_lengths = make_shared(element::i32, Shape{batch_size}); + const auto W = make_shared( element::f32, Shape{num_directions, 4 * hidden_size, input_size}); - const auto R = make_shared( + const auto R = make_shared( element::f32, Shape{num_directions, 4 * hidden_size, hidden_size}); const auto B = - make_shared(element::f32, Shape{num_directions, 4 * hidden_size}); + make_shared(element::f32, Shape{num_directions, 4 * hidden_size}); const auto lstm_direction = op::RecurrentSequenceDirection::FORWARD; - const auto lstm_sequence = make_shared(X, + const auto lstm_sequence = make_shared(X, initial_hidden_state, initial_cell_state, sequence_lengths, @@ -139,25 +139,25 @@ TEST(type_prop, lstm_sequence_bidirectional) const size_t hidden_size = 256; const auto X = - make_shared(element::f32, Shape{batch_size, seq_length, input_size}); - const auto initial_hidden_state = make_shared( + make_shared(element::f32, Shape{batch_size, seq_length, input_size}); + const auto initial_hidden_state = make_shared( element::f32, Shape{batch_size, num_directions, hidden_size}); - const auto initial_cell_state = make_shared( + const auto initial_cell_state = make_shared( element::f32, Shape{batch_size, num_directions, hidden_size}); - const auto sequence_lengths = make_shared(element::i32, Shape{batch_size}); - const auto W = make_shared( + const auto sequence_lengths = make_shared(element::i32, Shape{batch_size}); + const auto W = make_shared( element::f32, Shape{num_directions, 4 * hidden_size, input_size}); - const auto R = make_shared( + const auto R = make_shared( element::f32, Shape{num_directions, 4 * hidden_size, hidden_size}); const auto B = - make_shared(element::f32, Shape{num_directions, 4 * hidden_size}); + make_shared(element::f32, Shape{num_directions, 4 * hidden_size}); - const auto lstm_direction = op::v5::LSTMSequence::direction::BIDIRECTIONAL; + const auto lstm_direction = opset5::LSTMSequence::direction::BIDIRECTIONAL; const std::vector activations_alpha = {2.7, 7.0, 32.367}; const std::vector activations_beta = {0.0, 5.49, 6.0}; const std::vector activations = {"tanh", "sigmoid", "sigmoid"}; - const auto lstm_sequence = make_shared(X, + const auto lstm_sequence = make_shared(X, initial_hidden_state, initial_cell_state, sequence_lengths, @@ -170,7 +170,7 @@ TEST(type_prop, lstm_sequence_bidirectional) activations_beta, activations); EXPECT_EQ(lstm_sequence->get_hidden_size(), hidden_size); - EXPECT_EQ(lstm_sequence->get_direction(), op::v5::LSTMSequence::direction::BIDIRECTIONAL); + EXPECT_EQ(lstm_sequence->get_direction(), opset5::LSTMSequence::direction::BIDIRECTIONAL); EXPECT_EQ(lstm_sequence->get_activations_alpha(), activations_alpha); EXPECT_EQ(lstm_sequence->get_activations_beta(), activations_beta); EXPECT_EQ(lstm_sequence->get_activations()[0], "tanh"); @@ -328,7 +328,7 @@ TEST(type_prop, lstm_sequence_invalid_input_dimension) param.et = element::f32; auto lstm_sequence = lstm_seq_tensor_initialization(param); - auto invalid_rank0_tensor = make_shared(param.et, PartialShape{}); + auto invalid_rank0_tensor = make_shared(param.et, PartialShape{}); // Validate invalid rank0 tensor for all inputs: X, initial_hidden_state, initial_cell_state W, // R, B @@ -352,7 +352,7 @@ TEST(type_prop, lstm_sequence_invalid_input_dynamic_rank) param.hidden_size = 256; param.et = element::f32; - auto check_dynamic_lstm = [](const shared_ptr& lstm) -> bool { + auto check_dynamic_lstm = [](const shared_ptr& lstm) -> bool { return lstm->output(0).get_partial_shape() == PartialShape::dynamic() && lstm->output(1).get_partial_shape() == PartialShape::dynamic() && lstm->output(2).get_partial_shape() == PartialShape::dynamic() && @@ -361,7 +361,7 @@ TEST(type_prop, lstm_sequence_invalid_input_dynamic_rank) auto lstm_sequence = lstm_seq_tensor_initialization(param); auto invalid_dynamic_tensor = - make_shared(param.et, PartialShape::dynamic(Rank::dynamic())); + make_shared(param.et, PartialShape::dynamic(Rank::dynamic())); // Validate invalid dynamic tensor for all inputs: X, initial_hidden_state, initial_cell_state // W, R, B diff --git a/ngraph/test/type_prop/rnn_sequence.cpp b/ngraph/test/type_prop/rnn_sequence.cpp index a3dfb6c..94b500d 100644 --- a/ngraph/test/type_prop/rnn_sequence.cpp +++ b/ngraph/test/type_prop/rnn_sequence.cpp @@ -16,7 +16,7 @@ #include "gtest/gtest.h" #include "ngraph/ngraph.hpp" -#include "ngraph/opsets/opset4.hpp" +#include "ngraph/opsets/opset5.hpp" #include "util/type_prop.hpp" using namespace std; @@ -31,20 +31,20 @@ TEST(type_prop, rnn_sequence_forward) const size_t hidden_size = 128; const auto X = - make_shared(element::f32, Shape{batch_size, seq_length, input_size}); - const auto initial_hidden_state = make_shared( + make_shared(element::f32, Shape{batch_size, seq_length, input_size}); + const auto initial_hidden_state = make_shared( element::f32, Shape{batch_size, num_directions, hidden_size}); const auto sequence_lengths = make_shared(element::i32, Shape{batch_size}); - const auto W = make_shared(element::f32, + const auto W = make_shared(element::f32, Shape{num_directions, hidden_size, input_size}); - const auto R = make_shared(element::f32, + const auto R = make_shared(element::f32, Shape{num_directions, hidden_size, hidden_size}); - const auto B = make_shared(element::f32, Shape{num_directions, hidden_size}); + const auto B = make_shared(element::f32, Shape{num_directions, hidden_size}); const auto direction = op::RecurrentSequenceDirection::FORWARD; - const auto sequence = make_shared( + const auto sequence = make_shared( X, initial_hidden_state, sequence_lengths, W, R, B, hidden_size, direction); EXPECT_EQ(sequence->get_hidden_size(), hidden_size); -- 2.7.4