Added unit tests and readme for model optimizer (#79)
authorAlexey Suhov <asuhov@users.noreply.github.com>
Wed, 23 Jan 2019 17:23:27 +0000 (20:23 +0300)
committeropenvino-pushbot <44090433+openvino-pushbot@users.noreply.github.com>
Wed, 23 Jan 2019 17:23:27 +0000 (20:23 +0300)
* added unit tests
* added readme for model optimizer
* added a list of supported IE plugins

232 files changed:
inference-engine/README.md
model-optimizer/README.md [new file with mode: 0644]
model-optimizer/extensions/back/PermuteForReshape_test.py [new file with mode: 0644]
model-optimizer/extensions/back/ShufflenetReLUReorder_test.py [new file with mode: 0644]
model-optimizer/extensions/back/TileReshaper_test.py [new file with mode: 0644]
model-optimizer/extensions/back/insert_compatibility_l2normalization_test.py [new file with mode: 0644]
model-optimizer/extensions/back/kaldi_remove_memory_output_test.py [new file with mode: 0644]
model-optimizer/extensions/back/remove_last_softmax_test.py [new file with mode: 0644]
model-optimizer/extensions/front/caffe/accum_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/caffe/argmax_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/caffe/axpy_test.py [new file with mode: 0644]
model-optimizer/extensions/front/caffe/bn_test.py [new file with mode: 0644]
model-optimizer/extensions/front/caffe/conv_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/caffe/correlation_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/caffe/ctcgreedydecoder_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/caffe/data_augmentation_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/caffe/grn_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/caffe/interp_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/caffe/normalize_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/caffe/pooling_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/caffe/power_file_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/caffe/prelu_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/caffe/priorbox_clustered_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/caffe/priorbox_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/caffe/proposal_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/caffe/proposal_python_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/caffe/psroipooling_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/caffe/regionyolo_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/caffe/reorgyolo_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/caffe/resample_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/caffe/simplernms_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/caffe/spatial_transformer_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/eltwise_n_test.py [new file with mode: 0644]
model-optimizer/extensions/front/freeze_placeholder_value_test.py [new file with mode: 0644]
model-optimizer/extensions/front/image_scaler_test.py [new file with mode: 0644]
model-optimizer/extensions/front/instance_normalization_test.py [new file with mode: 0644]
model-optimizer/extensions/front/kaldi/replace_splice_node_pattern_test.py [new file with mode: 0644]
model-optimizer/extensions/front/mxnet/check_softmax_node_inputs_test.py [new file with mode: 0644]
model-optimizer/extensions/front/mxnet/conv_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/mxnet/custom_test.py [new file with mode: 0644]
model-optimizer/extensions/front/mxnet/pooling_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/mxnet/slice_channel_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/mxnet/ssd_pattern_flatten_softmax_activation_test.py [new file with mode: 0644]
model-optimizer/extensions/front/mxnet/ssd_pattern_remove_flatten_test.py [new file with mode: 0644]
model-optimizer/extensions/front/mxnet/ssd_pattern_remove_reshape_test.py [new file with mode: 0644]
model-optimizer/extensions/front/mxnet/ssd_pattern_remove_transpose_test.py [new file with mode: 0644]
model-optimizer/extensions/front/mxnet/ssd_reorder_detection_out_inputs_test.py [new file with mode: 0644]
model-optimizer/extensions/front/onnx/affine_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/onnx/conv_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/onnx/crop_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/onnx/elu_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/onnx/flatten_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/onnx/gather_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/onnx/image_scaler_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/onnx/instance_normalization_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/onnx/pad_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/onnx/sigmoid_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/onnx/slice_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/onnx/squeeze_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/onnx/tanh_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/onnx/transpose_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/onnx/unsqueeze_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/onnx/upsample_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/reciprocal_test.py [new file with mode: 0644]
model-optimizer/extensions/front/tf/ObjectDetectionAPI_test.py [new file with mode: 0644]
model-optimizer/extensions/front/tf/concat_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/tf/concat_test.py [new file with mode: 0644]
model-optimizer/extensions/front/tf/conv_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/tf/deconv_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/tf/fifo_replacer_test.py [new file with mode: 0644]
model-optimizer/extensions/front/tf/mvn_unrolled_test.py [new file with mode: 0644]
model-optimizer/extensions/front/tf/next_iteration_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/tf/pad_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/tf/pooling_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/front/tf/stop_gradient_ext_test.py [new file with mode: 0644]
model-optimizer/extensions/middle/AddIsCyclicAttribute_test.py [new file with mode: 0644]
model-optimizer/extensions/middle/AddReshapeAfterStridedSlice_test.py [new file with mode: 0644]
model-optimizer/extensions/middle/ConvertGroupedStridedSlice_test.py [new file with mode: 0644]
model-optimizer/extensions/middle/EltwiseInputNormalization_test.py [new file with mode: 0644]
model-optimizer/extensions/middle/EltwiseInputReshape_test.py [new file with mode: 0644]
model-optimizer/extensions/middle/FusePermutesSequence_test.py [new file with mode: 0644]
model-optimizer/extensions/middle/MinumumMiddleReplacer_test.py [new file with mode: 0644]
model-optimizer/extensions/middle/NormalizeFullyConnected_test.py [new file with mode: 0644]
model-optimizer/extensions/middle/PixelLinkReshape_test.py [new file with mode: 0644]
model-optimizer/extensions/middle/Reduce_test.py [new file with mode: 0644]
model-optimizer/extensions/middle/ShuffleChannel_test.py [new file with mode: 0644]
model-optimizer/extensions/middle/ShufflenetReshape_test.py [new file with mode: 0644]
model-optimizer/extensions/middle/SliceConvert_test.py [new file with mode: 0644]
model-optimizer/extensions/middle/TensorIteratorBackEdge_test.py [new file with mode: 0644]
model-optimizer/extensions/middle/TensorIteratorCondition_test.py [new file with mode: 0644]
model-optimizer/extensions/middle/TensorIteratorInput_test.py [new file with mode: 0644]
model-optimizer/extensions/middle/TensorIteratorOutput_test.py [new file with mode: 0644]
model-optimizer/extensions/middle/UselessSridedSlice_test.py [new file with mode: 0644]
model-optimizer/extensions/middle/lstm_sequence_normalize_test.py [new file with mode: 0644]
model-optimizer/extensions/ops/accum_test.py [new file with mode: 0644]
model-optimizer/extensions/ops/argmax_test.py [new file with mode: 0644]
model-optimizer/extensions/ops/assert_test.py [new file with mode: 0644]
model-optimizer/extensions/ops/correlation_test.py [new file with mode: 0644]
model-optimizer/extensions/ops/ctc_greedy_decoder_test.py [new file with mode: 0644]
model-optimizer/extensions/ops/data_augmentation_test.py [new file with mode: 0644]
model-optimizer/extensions/ops/depth_to_space_test.py [new file with mode: 0644]
model-optimizer/extensions/ops/gather_test.py [new file with mode: 0644]
model-optimizer/extensions/ops/grn_test.py [new file with mode: 0644]
model-optimizer/extensions/ops/instance_normalization_test.py [new file with mode: 0644]
model-optimizer/extensions/ops/interp_test.py [new file with mode: 0644]
model-optimizer/extensions/ops/merge_test.py [new file with mode: 0644]
model-optimizer/extensions/ops/normalize_test.py [new file with mode: 0644]
model-optimizer/extensions/ops/priorbox_clustered_test.py [new file with mode: 0644]
model-optimizer/extensions/ops/priorbox_test.py [new file with mode: 0644]
model-optimizer/extensions/ops/proposal_test.py [new file with mode: 0644]
model-optimizer/extensions/ops/psroipooling_test.py [new file with mode: 0644]
model-optimizer/extensions/ops/regionyolo_test.py [new file with mode: 0644]
model-optimizer/extensions/ops/reorgyolo_test.py [new file with mode: 0644]
model-optimizer/extensions/ops/resample_test.py [new file with mode: 0644]
model-optimizer/extensions/ops/select_test.py [new file with mode: 0644]
model-optimizer/extensions/ops/simplernms_test.py [new file with mode: 0644]
model-optimizer/extensions/ops/spatial_transformer_test.py [new file with mode: 0644]
model-optimizer/extensions/ops/switch_test.py [new file with mode: 0644]
model-optimizer/mo/back/ie_ir_ver_2/emitter_test.py [new file with mode: 0644]
model-optimizer/mo/front/caffe/custom_layers_mapping_test.py [new file with mode: 0644]
model-optimizer/mo/front/caffe/extractor_test.py [new file with mode: 0644]
model-optimizer/mo/front/caffe/extractors/batchnorm_test.py [new file with mode: 0644]
model-optimizer/mo/front/caffe/extractors/concat_test.py [new file with mode: 0644]
model-optimizer/mo/front/caffe/extractors/crop_test.py [new file with mode: 0644]
model-optimizer/mo/front/caffe/extractors/eltwise_test.py [new file with mode: 0644]
model-optimizer/mo/front/caffe/extractors/elu_test.py [new file with mode: 0644]
model-optimizer/mo/front/caffe/extractors/inner_product_test.py [new file with mode: 0644]
model-optimizer/mo/front/caffe/extractors/input_test.py [new file with mode: 0644]
model-optimizer/mo/front/caffe/extractors/lrn_test.py [new file with mode: 0644]
model-optimizer/mo/front/caffe/extractors/permute_test.py [new file with mode: 0644]
model-optimizer/mo/front/caffe/extractors/power_test.py [new file with mode: 0644]
model-optimizer/mo/front/caffe/extractors/relu_test.py [new file with mode: 0644]
model-optimizer/mo/front/caffe/extractors/reshape_test.py [new file with mode: 0644]
model-optimizer/mo/front/caffe/extractors/scale_test.py [new file with mode: 0644]
model-optimizer/mo/front/caffe/extractors/slice_test.py [new file with mode: 0644]
model-optimizer/mo/front/caffe/extractors/utils_test.py [new file with mode: 0644]
model-optimizer/mo/front/caffe/loader_test.py [new file with mode: 0644]
model-optimizer/mo/front/caffe/python_layer_extractor_test.py [new file with mode: 0644]
model-optimizer/mo/front/common/layout_test.py [new file with mode: 0644]
model-optimizer/mo/front/common/partial_infer/caffe_fallback_test.py [new file with mode: 0644]
model-optimizer/mo/front/common/partial_infer/concat_test.py [new file with mode: 0644]
model-optimizer/mo/front/common/partial_infer/crop_test.py [new file with mode: 0644]
model-optimizer/mo/front/common/partial_infer/elemental_test.py [new file with mode: 0644]
model-optimizer/mo/front/common/partial_infer/eltwise_test.py [new file with mode: 0644]
model-optimizer/mo/front/common/partial_infer/expand_dims_test.py [new file with mode: 0644]
model-optimizer/mo/front/common/partial_infer/inner_product_test.py [new file with mode: 0644]
model-optimizer/mo/front/common/partial_infer/multi_box_detection_test.py [new file with mode: 0644]
model-optimizer/mo/front/common/partial_infer/multi_box_prior_test.py [new file with mode: 0644]
model-optimizer/mo/front/common/partial_infer/range_test.py [new file with mode: 0644]
model-optimizer/mo/front/common/partial_infer/roipooling_test.py [new file with mode: 0644]
model-optimizer/mo/front/common/partial_infer/slice_test.py [new file with mode: 0644]
model-optimizer/mo/front/common/partial_infer/split_test.py [new file with mode: 0644]
model-optimizer/mo/front/extractor_test.py [new file with mode: 0644]
model-optimizer/mo/front/kaldi/extractors/add_shift_ext_test.py [new file with mode: 0644]
model-optimizer/mo/front/kaldi/extractors/affine_component_ext_test.py [new file with mode: 0644]
model-optimizer/mo/front/kaldi/extractors/affine_transform_ext_test.py [new file with mode: 0644]
model-optimizer/mo/front/kaldi/extractors/common_ext_test.py [new file with mode: 0644]
model-optimizer/mo/front/kaldi/extractors/concat_ext_test.py [new file with mode: 0644]
model-optimizer/mo/front/kaldi/extractors/convolutional_component_ext_test.py [new file with mode: 0644]
model-optimizer/mo/front/kaldi/extractors/fixed_affine_component_ext_test.py [new file with mode: 0644]
model-optimizer/mo/front/kaldi/extractors/max_pooling_ext_test.py [new file with mode: 0644]
model-optimizer/mo/front/kaldi/extractors/rescale_ext_test.py [new file with mode: 0644]
model-optimizer/mo/front/kaldi/extractors/sigmoid_ext_test.py [new file with mode: 0644]
model-optimizer/mo/front/kaldi/extractors/slice_ext_test.py [new file with mode: 0644]
model-optimizer/mo/front/kaldi/extractors/tanh_ext_test.py [new file with mode: 0644]
model-optimizer/mo/front/kaldi/loader/utils_test.py [new file with mode: 0644]
model-optimizer/mo/front/mxnet/extractors/activation_test.py [new file with mode: 0644]
model-optimizer/mo/front/mxnet/extractors/crop_test.py [new file with mode: 0644]
model-optimizer/mo/front/mxnet/extractors/eltwise_test.py [new file with mode: 0644]
model-optimizer/mo/front/mxnet/extractors/leaky_relu_test.py [new file with mode: 0644]
model-optimizer/mo/front/mxnet/extractors/multibox_detection_test.py [new file with mode: 0644]
model-optimizer/mo/front/mxnet/extractors/multibox_prior_test.py [new file with mode: 0644]
model-optimizer/mo/front/mxnet/extractors/relu_test.py [new file with mode: 0644]
model-optimizer/mo/front/mxnet/extractors/sigmoid_test.py [new file with mode: 0644]
model-optimizer/mo/front/mxnet/extractors/slice_axis_test.py [new file with mode: 0644]
model-optimizer/mo/front/mxnet/extractors/utils_test.py [new file with mode: 0644]
model-optimizer/mo/front/mxnet/loader_test.py [new file with mode: 0644]
model-optimizer/mo/front/onnx/extractors/constant_test.py [new file with mode: 0644]
model-optimizer/mo/front/tf/extractors/concat_test.py [new file with mode: 0644]
model-optimizer/mo/front/tf/extractors/const_test.py [new file with mode: 0644]
model-optimizer/mo/front/tf/extractors/eltwise_test.py [new file with mode: 0644]
model-optimizer/mo/front/tf/extractors/expand_dims_test.py [new file with mode: 0644]
model-optimizer/mo/front/tf/extractors/identity_test.py [new file with mode: 0644]
model-optimizer/mo/front/tf/extractors/lrn_test.py [new file with mode: 0644]
model-optimizer/mo/front/tf/extractors/matmul_test.py [new file with mode: 0644]
model-optimizer/mo/front/tf/extractors/mean_test.py [new file with mode: 0644]
model-optimizer/mo/front/tf/extractors/prod_test.py [new file with mode: 0644]
model-optimizer/mo/front/tf/extractors/squeeze_test.py [new file with mode: 0644]
model-optimizer/mo/front/tf/extractors/utils_test.py [new file with mode: 0644]
model-optimizer/mo/front/tf/loader_test.py [new file with mode: 0644]
model-optimizer/mo/graph/graph_test.py [new file with mode: 0644]
model-optimizer/mo/main_test.py [new file with mode: 0644]
model-optimizer/mo/middle/passes/conv_test.py [new file with mode: 0644]
model-optimizer/mo/middle/passes/eliminate_test.py [new file with mode: 0644]
model-optimizer/mo/middle/passes/fusing/decomposition_test.py [new file with mode: 0644]
model-optimizer/mo/middle/passes/fusing/fuse_linear_ops_test.py [new file with mode: 0644]
model-optimizer/mo/middle/passes/fusing/fuse_linear_seq_test.py [new file with mode: 0644]
model-optimizer/mo/middle/passes/fusing/helpers_test.py [new file with mode: 0644]
model-optimizer/mo/middle/passes/fusing/mark_unfused_nodes_test.py [new file with mode: 0644]
model-optimizer/mo/middle/passes/fusing/resnet_optimization_test.py [new file with mode: 0644]
model-optimizer/mo/middle/passes/infer_test.py [new file with mode: 0644]
model-optimizer/mo/middle/passes/mean_scale_values_test.py [new file with mode: 0644]
model-optimizer/mo/middle/passes/pool_test.py [new file with mode: 0644]
model-optimizer/mo/middle/passes/shared_weights_duplication_test.py [new file with mode: 0644]
model-optimizer/mo/ops/activation_test.py [new file with mode: 0644]
model-optimizer/mo/ops/clamp_test.py [new file with mode: 0644]
model-optimizer/mo/ops/concat_test.py [new file with mode: 0644]
model-optimizer/mo/ops/convolution_test.py [new file with mode: 0644]
model-optimizer/mo/ops/crop_test.py [new file with mode: 0644]
model-optimizer/mo/ops/flatten_onnx_test.py [new file with mode: 0644]
model-optimizer/mo/ops/flatten_test.py [new file with mode: 0644]
model-optimizer/mo/ops/inner_product_test.py [new file with mode: 0644]
model-optimizer/mo/ops/pad_test.py [new file with mode: 0644]
model-optimizer/mo/ops/permute_test.py [new file with mode: 0644]
model-optimizer/mo/ops/pooling_test.py [new file with mode: 0644]
model-optimizer/mo/ops/power_test.py [new file with mode: 0644]
model-optimizer/mo/ops/slice_test.py [new file with mode: 0644]
model-optimizer/mo/ops/tile_test.py [new file with mode: 0644]
model-optimizer/mo/ops/unsqueeze_test.py [new file with mode: 0644]
model-optimizer/mo/pipeline/common_test.py [new file with mode: 0644]
model-optimizer/mo/pipeline/kaldi_test.py [new file with mode: 0644]
model-optimizer/mo/pipeline/mx_test.py [new file with mode: 0644]
model-optimizer/mo/utils/cli_parser_test.py [new file with mode: 0644]
model-optimizer/mo/utils/convert.py [new file with mode: 0644]
model-optimizer/mo/utils/graph_test.py [new file with mode: 0644]
model-optimizer/mo/utils/pipeline_config_test.py [new file with mode: 0644]
model-optimizer/mo/utils/simple_proto_parser_test.py [new file with mode: 0644]
model-optimizer/mo/utils/summarize_graph_test.py [new file with mode: 0644]
model-optimizer/mo/utils/unittest/extractors.py [new file with mode: 0644]
model-optimizer/mo/utils/unittest/graph.py [new file with mode: 0644]
model-optimizer/mo/utils/utils_test.py [new file with mode: 0644]
model-optimizer/mo/utils/version_test.py [new file with mode: 0644]

index c122e00..f0d6641 100644 (file)
@@ -1,3 +1,18 @@
+## Repository components
+
+The Inference Engine can infer models in different formats with various input and output formats.
+
+The open source version of Inference Engine includes the following plugins:
+
+| PLUGIN               | DEVICE TYPES |
+| ---------------------| -------------|
+| CPU plugin           | Intel® Xeon® with Intel® AVX2 and AVX512, Intel® Coreâ„¢ Processors with Intel® AVX2, Intel® Atom® Processors with Intel® SSE |
+| GPU plugin           | Intel® Processor Graphics, including Intel® HD Graphics and Intel® Iris® Graphics |
+| GNA plugin           | Intel® Speech Enabling Developer Kit, Amazon Alexa* Premium Far-Field Developer Kit, Intel® Pentium® Silver processor J5005, Intel® Celeron® processor J4005, Intel® Coreâ„¢ i3-8121U processor |
+| Heterogeneous plugin | Heterogeneous plugin enables computing for inference on one network on several Intel® devices. |
+
+Inference Engine plugins for Intel® FPGA and Intel® Movidiusâ„¢ Neural Compute Stick are distributed only in a binary form as a part of [Intel® Distribution of OpenVINOâ„¢](https://software.intel.com/en-us/openvino-toolkit).
+
 ## Build on Linux\* Systems
 
 The software was validated on:
diff --git a/model-optimizer/README.md b/model-optimizer/README.md
new file mode 100644 (file)
index 0000000..8bfe217
--- /dev/null
@@ -0,0 +1,145 @@
+## Project structure
+
+Project structure:
+<pre>
+    |-- root
+        |-- extensions
+            |-- front/ - graph transformations during front phase
+            |-- middle/ - graph transformations during middle phase (after partial inference)
+            |-- end/  - graph transformations during back phase (before IR generation) 
+            |-- ops/ - Model Optimizer operation classes
+        |-- mo
+            |-- back - Back-End logic: contains IR emitting logic
+            |-- front - Front-End logic: contains matching between Framework-specific layers and IR specific, 
+                        calculation of output shapes for each registered layer
+            |-- graph - Graph utilities to work with internal IR representation
+            |-- middle - Graph transformations - optimizations of the model
+            |-- ops - Model Optimizer operation classes
+            |-- pipeline - Sequence of steps required to create IR for each framework
+            |-- utils - Utility functions
+        |-- tf_call_ie_layer - Sources for TensorFlow fallback in Inference Engine during model inference
+        |-- mo.py - Centralized entry point that can be used for any supported framework
+        |-- mo_caffe.py - Entry point particularly for Caffe
+        |-- mo_mxnet.py - Entry point particularly for MXNet
+        |-- mo_tf.py - Entry point particularly for TensorFlow
+
+</pre>
+
+## Prerequisites
+
+Model Optimizer requires:
+
+1. Python 3.4 or newer
+
+## Installation instructions
+
+1. Go to the Model Optimizer folder
+
+2. Create virtual environment and activate it. This option is strongly recommended as it creates a Python sandbox and
+   dependencies for Model Optimizer do not influence global Python configuration, installed libraries etc. At the same
+   time, special flag ensures that system-wide Python libraries are also available in this sandbox. Skip this
+   step only if you do want to install all Model Optimizer dependencies globally:
+
+    * Create environment:
+          <pre>virtualenv -p /usr/bin/python3.6 .env3 --system-site-packages</pre>
+    * Activate it:
+      <pre>. .env3/bin/activate</pre>
+3. Install dependencies. If you want to convert models only from particular framework, you should use one of
+   available <code>requirements_*.txt</code> files corresponding to the framework of choice. For example, for Caffe use
+   <code>requirements_caffe.txt</code> and so on. When you decide to switch later to other frameworks, please install dependencies
+   for them using the same mechanism:
+   <pre>
+    pip3 install -r requirements.txt
+    </pre>
+
+## Command-Line Interface (CLI)
+
+The following short examples are framework-dependent. Please read the complete help
+with --help option for details across all frameworks:
+<pre>
+    python3 mo.py --help
+</pre>
+
+There are several scripts that convert a model:
+
+1. <code>mo.py</code> -- universal entry point that can convert a model from any supported framework
+
+2. <code>mo_caffe.py</code> -- dedicated script for Caffe models conversion
+
+3. <code>mo_mxnet.py</code> -- dedicated script for MXNet models conversion
+
+4. <code>mo_tf.py</code> -- dedicated script for TensorFlow models conversion
+
+5. <code>mo_onnx.py</code> -- dedicated script for ONNX models conversion
+
+6. <code>mo_kaldi.py</code> -- dedicated script for Kaldi models conversion
+
+<code>mo.py</code> can deduce original framework where input model was trained by an extension of
+the model file. Or <code>--framework</code> option can be used for this purpose if model files
+don't have standard extensions (<code>.pb</code> - for TensorFlow models, <code>.params</code> - for MXNet models,
+<code>.caffemodel</code> - for Caffe models). So, the following commands are equivalent::
+
+<pre>
+    python3 mo.py --input_model /user/models/model.pb
+    python3 mo.py --framework tf --input_model /user/models/model.pb
+</pre>
+The following examples illustrate the shortest command lines to convert a model per
+framework.
+
+### Convert TensorFlow model
+
+To convert a frozen TensorFlow model contained in binary file <code>model-file.pb</code>, run
+dedicated entry point <code>mo_tf.py</code>:
+
+    python3 mo_tf.py --input_model model-file.pb
+
+### Convert Caffe model
+
+To convert a Caffe model contained in <code>model-file.prototxt</code> and <code>model-file.caffemodel</code> run
+dedicated entry point <code>mo_caffe.py</code>:
+<pre>
+    python3 mo_caffe.py --input_model model-file.caffemodel
+</pre>
+
+
+### Convert MXNet model
+
+To Convert an MXNet model in <code>model-file-symbol.json</code> and <code>model-file-0000.params</code> run
+dedicated entry point <code>mo_mxnet.py</code>:
+<pre>
+    python3 mo_mxnet.py --input_model model-file
+</pre>
+
+> **NOTE**: for TensorFlow* all Placeholder ops are represented as Input layers in the final IR.
+
+### Convert ONNX* model
+
+The Model Optimizer assumes that you have an ONNX model that was directly downloaded from a public repository or converted from any framework that supports exporting to the ONNX format.
+
+Use the mo_onnx.py script to simply convert a model with the path to the input model .onnx file:
+
+<pre>
+    python3 mo_onnx.py --input_model model-file.onnx
+</pre>
+
+Input channels re-ordering, scaling, subtraction of mean values and other preprocessing features
+are not applied by default. To pass necessary values to Model Optimizer, please run <code>mo.py</code>
+(or <code>mo_tf.py</code>, <code>mo_caffe.py</code>, <code>mo_mxnet.py</code>) with <code>--help</code> and
+examine all available options.
+
+## Working with Inference Engine
+
+To the moment, Inference Engine is the only consumer of IR models that Model Optimizer produces.
+The whole workflow and more documentation on the structure of IR are documented in the Developer Guide
+of Inference Engine. Note that sections about running Model Optimizer refer to the old version
+of the tool and can not be applied to the current version of Model Optimizer.
+
+### How to run unit-tests
+
+1. Run tests with:
+<pre>
+    python -m unittest discover -p "*_test.py" [-s PATH_TO_DIR]
+</pre>
+
+---
+\* Other names and brands may be claimed as the property of others.
diff --git a/model-optimizer/extensions/back/PermuteForReshape_test.py b/model-optimizer/extensions/back/PermuteForReshape_test.py
new file mode 100644 (file)
index 0000000..6efc482
--- /dev/null
@@ -0,0 +1,133 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+import unittest
+
+import numpy as np
+
+from extensions.back.PermuteForReshape import PermuteForReshape
+from mo.graph.graph import Node
+from mo.ops.op import PermuteAttrs
+from mo.utils.unittest.graph import build_graph_with_attrs, compare_graphs
+
+
+class ReshapeToPermuteTest(unittest.TestCase):
+    nodes = [
+        ('input_data', {'kind': 'data', 'shape': None}),
+        ('reshape', {'kind': 'op', 'op': 'Squeeze', 'type': 'Reshape', 'dim': None}),
+        ('reshape_data', {'kind': 'data'}),
+    ]
+    edges = [
+        ('input_data', 'reshape'),
+        ('reshape', 'reshape_data'),
+    ]
+
+    permute_nodes = [
+        ('permute', {'kind': 'op', 'op': 'Permute'}),
+        ('permute_data', {'kind': 'data', 'shape': None})
+    ]
+    permute_edges = [
+        ('input_data', 'permute'),
+        ('permute', 'permute_data'),
+        ('permute_data', 'reshape'),
+    ]
+
+    def test_from3D_to3D(self):
+        input_shape = np.array([2, 3, 4])
+        new_shape = np.array([2, 3, 4])
+        graph = build_graph_with_attrs(
+            nodes_with_attrs=self.nodes,
+            edges_with_attrs=self.edges,
+            update_nodes_attributes=[('input_data', {'shape': input_shape}),
+                                     ('reshape', {'dim': new_shape}),
+                                     ('reshape_data', {'shape': new_shape})]
+        )
+        graph.graph['layout'] = 'NHWC'
+        # add permute attrs to reshape
+        reshape = Node(graph, 'reshape')
+        PermuteAttrs.create_permute_attrs(reshape, attrs=[('dim', 'output:0')])
+
+        tested_pattern = PermuteForReshape()
+        tested_pattern.find_and_replace_pattern(graph)
+        (flag, resp) = compare_graphs(graph, graph, last_node='reshape_data')
+        self.assertTrue(flag, resp)
+
+    def test_from4D_to3D(self):
+        input_shape = np.array([1, 2, 3, 4])
+        new_shape = np.array([3, 4, 2])
+        nhwc_shape = np.array([1, 3, 4, 2])
+        graph = build_graph_with_attrs(
+            nodes_with_attrs=self.nodes,
+            edges_with_attrs=self.edges,
+            update_nodes_attributes=[('input_data', {'shape': input_shape}),
+                                     ('reshape', {'dim': new_shape}),
+                                     ('reshape_data', {'shape': new_shape})]
+        )
+        graph.graph['layout'] = 'NHWC'
+        # add permute attrs to reshape
+        reshape = Node(graph, 'reshape')
+        PermuteAttrs.create_permute_attrs(reshape, attrs=[('dim', 'output:0')])
+
+        tested_pattern = PermuteForReshape()
+        tested_pattern.find_and_replace_pattern(graph)
+        graph_ref = build_graph_with_attrs(
+            nodes_with_attrs=self.nodes + self.permute_nodes,
+            edges_with_attrs=self.edges[1:] + self.permute_edges,
+            update_nodes_attributes=[('input_data', {'shape': input_shape}),
+                                     ('reshape', {'dim': new_shape}),
+                                     ('reshape_data', {'shape': new_shape}),
+                                     ('permute_data', {'shape': nhwc_shape})]
+        )
+        # check graphs equality
+        (flag, resp) = compare_graphs(graph, graph_ref, last_node='reshape_data')
+        self.assertTrue(flag, resp)
+
+        # check righ order in new permutation node
+        permute_order = graph.node['reshape/Permute_']['order']
+        self.assertTrue(np.all(permute_order == np.array([0, 2, 3, 1]))) # from NCHW to NHWC
+
+    def test_from_5D_to_3D(self):
+        input_shape = np.array([1, 2, 1, 3, 4]) #  NCDHW 1 1 3 4 2
+        new_shape = np.array([3, 4, 2])
+        nhwc_shape = np.array([1, 1, 3, 4, 2])
+        graph = build_graph_with_attrs(
+            nodes_with_attrs=self.nodes,
+            edges_with_attrs=self.edges,
+            update_nodes_attributes=[('input_data', {'shape': input_shape}),
+                                     ('reshape', {'dim': new_shape}),
+                                     ('reshape_data', {'shape': new_shape})]
+        )
+        graph.graph['layout'] = 'NHWC'
+        # add permute attrs to reshape
+        reshape = Node(graph, 'reshape')
+        PermuteAttrs.create_permute_attrs(reshape, attrs=[('dim', 'output:0')])
+
+        tested_pattern = PermuteForReshape()
+        tested_pattern.find_and_replace_pattern(graph)
+        graph_ref = build_graph_with_attrs(
+            nodes_with_attrs=self.nodes + self.permute_nodes,
+            edges_with_attrs=self.edges[1:] + self.permute_edges,
+            update_nodes_attributes=[('input_data', {'shape': input_shape}),
+                                     ('reshape', {'dim': new_shape}),
+                                     ('reshape_data', {'shape': new_shape}),
+                                     ('permute_data', {'shape': nhwc_shape})]
+        )
+        # check graphs equality
+        (flag, resp) = compare_graphs(graph, graph_ref, last_node='reshape_data')
+        self.assertTrue(flag, resp)
+
+        # check righ order in new permutation node
+        permute_order = graph.node['reshape/Permute_']['order']
+        self.assertTrue(np.all(permute_order == np.array([0, 2, 3, 4, 1])))  # from NCDHW to NDHWC
diff --git a/model-optimizer/extensions/back/ShufflenetReLUReorder_test.py b/model-optimizer/extensions/back/ShufflenetReLUReorder_test.py
new file mode 100644 (file)
index 0000000..27c0f34
--- /dev/null
@@ -0,0 +1,148 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.back.ShufflenetReLUReorder import ShufflenetReLUReorder
+from mo.utils.unittest.graph import build_graph, compare_graphs
+
+# The dictionary with nodes attributes used to build various graphs. A key is the name of the node and the value is the
+# dictionary with node attributes.
+nodes_attributes = {
+    'placeholder_1': {'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+    'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    # ReLU
+    'relu_1': {'type': 'ReLU', 'kind': 'op', 'op': 'ReLU'},
+    'relu_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # Reshape layers
+    'reshape_1': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},
+    'reshape_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+    'reshape_2': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},
+    'reshape_2_data': {'value': None, 'shape': None, 'kind': 'data'},
+    'reshape_3': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},
+    'reshape_3_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # Transpose layer
+    'transpose_1': {'type': 'Permute', 'kind': 'op', 'op': 'Transpose'},
+    'transpose_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # Conv layer
+    'conv_1': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2d'},
+    'conv_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+}
+
+
+class ShufflenetReLUReorderTests(unittest.TestCase):
+    def test_1(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'relu_1'),
+                             ('relu_1', 'relu_1_data'),
+                             ('relu_1_data', 'reshape_1'),
+                             ('reshape_1', 'reshape_1_data'),
+                             ('reshape_1_data', 'transpose_1'),
+                             ('transpose_1', 'transpose_1_data'),
+                             ('transpose_1_data', 'reshape_2'),
+                             ('reshape_2', 'reshape_2_data'),
+                             ('reshape_2_data', 'conv_1'),
+                             ('conv_1', 'conv_1_data')
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 112])},
+                             'relu_1_data': {'shape': np.array([1, 227, 227, 112])},
+                             'reshape_1_data': {'shape': np.array([227, 227, 4, 28])},
+                             'transpose_1': {'order': np.array([0, 1, 3, 2])},
+                             'transpose_1_data': {'shape': np.array([227, 227, 28, 4])},
+                             'reshape_2_data': {'shape': np.array([1, 227, 227, 112])},
+                             'conv_1_data': {'shape': np.array([1, 227, 227, 112])},
+                             'conv_1': {'pad': np.array([1, 1])}
+                             })
+        graph.graph['layout'] = 'NHWC'
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'reshape_1'),
+                                 ('reshape_1', 'reshape_1_data'),
+                                 ('reshape_1_data', 'transpose_1'),
+                                 ('transpose_1', 'transpose_1_data'),
+                                 ('transpose_1_data', 'reshape_2'),
+                                 ('reshape_2', 'reshape_2_data'),
+                                 ('reshape_2_data', 'relu_1'),
+                                 ('relu_1', 'relu_1_data'),
+                                 ('relu_1_data', 'conv_1'),
+                                 ('conv_1', 'conv_1_data')
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 112])},
+                                 'relu_1_data': {'shape': np.array([1, 227, 227, 112])},
+                                 'reshape_1_data': {'shape': np.array([227, 227, 4, 28])},
+                                 'transpose_1': {'order': np.array([0, 1, 3, 2])},
+                                 'transpose_1_data': {'shape': np.array([227, 227, 28, 4])},
+                                 'reshape_2_data': {'shape': np.array([1, 227, 227, 112])},
+                                 'conv_1_data': {'shape': np.array([1, 227, 227, 112])},
+                                 })
+
+        pattern = ShufflenetReLUReorder()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'conv_1_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    def test_2_neg(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'reshape_1'),
+                             ('reshape_1', 'reshape_1_data'),
+                             ('reshape_1_data', 'transpose_1'),
+                             ('transpose_1', 'transpose_1_data'),
+                             ('transpose_1_data', 'reshape_2'),
+                             ('reshape_2', 'reshape_2_data'),
+                             ('reshape_2_data', 'conv_1'),
+                             ('conv_1', 'conv_1_data')
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 112])},
+                             'relu_1_data': {'shape': np.array([1, 227, 227, 112])},
+                             'reshape_1_data': {'shape': np.array([227, 227, 4, 28])},
+                             'transpose_1': {'order': np.array([0, 1, 3, 2])},
+                             'transpose_1_data': {'shape': np.array([227, 227, 28, 4])},
+                             'reshape_2_data': {'shape': np.array([1, 227, 227, 112])},
+                             'conv_1_data': {'shape': np.array([1, 227, 227, 112])},
+                             })
+        graph.graph['layout'] = 'NHWC'
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'reshape_1'),
+                                 ('reshape_1', 'reshape_1_data'),
+                                 ('reshape_1_data', 'transpose_1'),
+                                 ('transpose_1', 'transpose_1_data'),
+                                 ('transpose_1_data', 'reshape_2'),
+                                 ('reshape_2', 'reshape_2_data'),
+                                 ('reshape_2_data', 'conv_1'),
+                                 ('conv_1', 'conv_1_data')
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 112])},
+                                 'relu_1_data': {'shape': np.array([1, 227, 227, 112])},
+                                 'reshape_1_data': {'shape': np.array([227, 227, 4, 28])},
+                                 'transpose_1': {'order': np.array([0, 1, 3, 2])},
+                                 'transpose_1_data': {'shape': np.array([227, 227, 28, 4])},
+                                 'reshape_2_data': {'shape': np.array([1, 227, 227, 112])},
+                                 'conv_1_data': {'shape': np.array([1, 227, 227, 112])},
+                                 })
+
+        pattern = ShufflenetReLUReorder()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'conv_1_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
diff --git a/model-optimizer/extensions/back/TileReshaper_test.py b/model-optimizer/extensions/back/TileReshaper_test.py
new file mode 100644 (file)
index 0000000..5c43219
--- /dev/null
@@ -0,0 +1,70 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.back.TileReshaper import TileReshaper
+from mo.ops.tile import Tile
+from mo.utils.unittest.graph import build_graph, compare_graphs
+
+# The dictionary with nodes attributes used to build various graphs. A key is the name of the node and the value is the
+# dictionary with node attributes.
+nodes_attributes = {
+    'previous_data': {'shape': np.array([1, 1, 101]), 'kind': 'data'},
+    'tile': {'type': 'Tile', 'kind': 'op', 'axis': 1, 'tiles': 16, 'infer': Tile.infer},
+    'tile_data': {'shape': np.array([1, 16, 101]), 'kind': 'data'},
+    'next_op': {'kind': 'op', 'op': 'SomeOp'},
+}
+edge_attributes = [
+    ('previous_data', 'tile'),
+    ('tile', 'tile_data'),
+    ('tile_data', 'next_op'),
+]
+
+nodes_attributes_ref = {
+    'previous_data': {'kind': 'data', 'shape': np.array([1, 1, 101])},
+    'reshape_op_before': {'type': 'Reshape', 'kind': 'op', 'dim': [1, 1, 101, 1]},
+    'reshape_data_before': {'kind': 'data', 'shape': np.array([1, 1, 101, 1])},
+    'tile': {'type': 'Tile', 'kind': 'op', 'infer': Tile.infer, 'axis': 1, 'tiles': 16},
+    'tile_data': {'shape': np.array([1, 16, 101, 1]), 'kind': 'data'},
+    'reshape_op_after': {'type': 'Reshape', 'kind': 'op', 'dim': [1, 16, 101]},
+    'reshape_data_after': {'kind': 'data', 'shape': np.array([1, 16, 101])},
+    'next_op': {'kind': 'op', 'op': 'SomeOp'},
+}
+edge_attributes_ref = [
+    ('previous_data', 'reshape_op_before'),
+    ('reshape_op_before', 'reshape_data_before'),
+    ('reshape_data_before', 'tile'),
+    ('tile', 'tile_data'),
+    ('tile_data', 'reshape_op_after'),
+    ('reshape_op_after', 'reshape_data_after'),
+    ('reshape_data_after', 'next_op')
+]
+
+
+class TileReshaperTests(unittest.TestCase):
+    def test_tile_reshaper(self):
+        graph = build_graph(nodes_attributes, edge_attributes)
+
+        graph_ref = build_graph(nodes_attributes_ref, edge_attributes_ref)
+
+        pattern = TileReshaper()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'next_op', check_op_attrs=True)
+        self.assertTrue(flag, resp)
diff --git a/model-optimizer/extensions/back/insert_compatibility_l2normalization_test.py b/model-optimizer/extensions/back/insert_compatibility_l2normalization_test.py
new file mode 100644 (file)
index 0000000..a1296ac
--- /dev/null
@@ -0,0 +1,52 @@
+"""
+ Copyright (c) 2017-2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.back.insert_compatibility_l2normalization import CompatibilityL2NormalizationPattern
+from mo.utils.unittest.graph import build_graph
+
+
+class CompatibilityL2NormalizationPatternTest(unittest.TestCase):
+    nodes = {
+        'input_node': {
+            'kind': 'data'
+        },
+        'l2norm_node': {
+            'op': 'Normalize',
+            'kind': 'op',
+            'type': 'Normalize',
+        },
+        'output_node': {
+            'kind': 'data'
+        }
+    }
+
+    def test_insert_data(self):
+        graph = build_graph(self.nodes, [('input_node', 'l2norm_node'), ('l2norm_node', 'output_node')],
+                            {'input_node': {'shape': np.array([1, 10])},
+                             })
+        CompatibilityL2NormalizationPattern().find_and_replace_pattern(graph)
+        self.assertEqual(len(graph.nodes()), 4)
+        self.assertEqual(graph.node['l2norm_node_weights']['name'], 'l2norm_node_weights')
+        self.assertEqual(len(graph.node['l2norm_node_weights']['value']), 10)
+
+        expect_value = np.full([10], 1.0, np.float32)
+
+        for i, val in enumerate(expect_value):
+            self.assertEqual(graph.node['l2norm_node_weights']['value'][i], val)
diff --git a/model-optimizer/extensions/back/kaldi_remove_memory_output_test.py b/model-optimizer/extensions/back/kaldi_remove_memory_output_test.py
new file mode 100644 (file)
index 0000000..c72351c
--- /dev/null
@@ -0,0 +1,51 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+from extensions.back.kaldi_remove_memory_output import KaldiRemoveMemoryOutputBackReplacementPattern
+from mo.utils.unittest.graph import build_graph
+
+
+class KaldiRemoveMemoryOutputTest(unittest.TestCase):
+    nodes = {
+        'input_node': {
+            'kind': 'data'
+        },
+        'memory_node': {
+            'op': 'Memory',
+            'kind': 'op'
+        },
+        'output_node': {
+            'kind': 'data'
+        }
+    }
+
+    def test_remove_out_data_for_memory(self):
+        graph = build_graph(self.nodes, [('input_node', 'memory_node')])
+        # Need for matching in pattern. The edge memory_node->out_node must contain only the attribute 'out' = 0
+        # build_graph creates edge  memory_node->out_node with attributes 'in' and 'out'
+        graph.add_node('output_node', is_output=True, **self.nodes['output_node'])
+        graph.add_edge('memory_node', 'output_node', out=0)
+        KaldiRemoveMemoryOutputBackReplacementPattern().find_and_replace_pattern(graph)
+        self.assertNotIn('output_node', graph.node)
+
+    def test_do_not_remove_out_data_for_memory(self):
+        graph = build_graph(self.nodes, [('input_node', 'memory_node')])
+        graph.add_node('output_node', **self.nodes['output_node'])
+        graph.add_edge('memory_node', 'output_node', out=0)
+        KaldiRemoveMemoryOutputBackReplacementPattern().find_and_replace_pattern(graph)
+        self.assertIn('output_node', graph.node)
diff --git a/model-optimizer/extensions/back/remove_last_softmax_test.py b/model-optimizer/extensions/back/remove_last_softmax_test.py
new file mode 100644 (file)
index 0000000..29a0173
--- /dev/null
@@ -0,0 +1,51 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+from extensions.back.remove_last_softmax_pattern import RemoveLastSoftMaxPattern
+from mo.utils.unittest.graph import build_graph
+
+
+class KaldiRemoveLastSoftMaxTest(unittest.TestCase):
+    nodes = {
+        'input_node': {
+            'kind': 'data'
+        },
+        'softmax_node': {
+            'op': 'SoftMax',
+            'kind': 'op'
+        },
+        'output_node': {
+            'kind': 'data'
+        }
+    }
+
+    def test_remove_last_SoftMax(self):
+        graph = build_graph(self.nodes, [
+            ('input_node', 'softmax_node'),
+            ('softmax_node', 'output_node')
+        ], {'output_node': {'is_output': True}})
+        RemoveLastSoftMaxPattern().find_and_replace_pattern(graph)
+        self.assertNotIn('softmax_node', graph.node)
+
+    def test_do_not_remove_no_last_SoftMax(self):
+        graph = build_graph(self.nodes, [
+            ('input_node', 'softmax_node'),
+            ('softmax_node', 'output_node')
+        ])
+        RemoveLastSoftMaxPattern().find_and_replace_pattern(graph)
+        self.assertIn('softmax_node', graph.node)
diff --git a/model-optimizer/extensions/front/caffe/accum_ext_test.py b/model-optimizer/extensions/front/caffe/accum_ext_test.py
new file mode 100644 (file)
index 0000000..f67e745
--- /dev/null
@@ -0,0 +1,68 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import patch
+
+from extensions.front.caffe.accum_ext import AccumFrontExtractor
+from extensions.ops.accum import AccumOp
+from mo.utils.unittest.extractors import FakeMultiParam
+from mo.utils.unittest.graph import FakeNode
+from mo.ops.op import Op
+
+
+class FakeAccumProtoLayer:
+    def __init__(self, val):
+        self.accum_param = val
+
+
+class TestAccumExt(unittest.TestCase):
+    @classmethod
+    def setUpClass(cls):
+        Op.registered_ops['Accum'] = AccumOp
+
+    def test_accum_no_pb_no_ml(self):
+        self.assertRaises(AttributeError, AccumFrontExtractor.extract, None)
+
+    @patch('extensions.front.caffe.accum_ext.collect_attributes')
+    def test_accum_ext(self, collect_attributes_mock):
+        params = {
+            'top_height': 200,
+            'top_width': 300,
+            'size_divisible_by': 3,
+            'have_reference': 'False',
+        }
+        collect_attributes_mock.return_value = {
+            **params,
+            'have_reference': 0
+        }
+
+        fake_pl = FakeAccumProtoLayer(FakeMultiParam(params))
+        fake_node = FakeNode(fake_pl, None)
+
+        AccumFrontExtractor.extract(fake_node)
+
+        exp_res = {
+            'type': "Accum",
+            'top_height': 200,
+            'top_width': 300,
+            'size_divisible_by': 3,
+            'have_reference': 0,
+            'infer': AccumOp.accum_infer
+        }
+
+        for key in exp_res.keys():
+            self.assertEqual(fake_node[key], exp_res[key])
diff --git a/model-optimizer/extensions/front/caffe/argmax_ext_test.py b/model-optimizer/extensions/front/caffe/argmax_ext_test.py
new file mode 100644 (file)
index 0000000..39547d1
--- /dev/null
@@ -0,0 +1,64 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import patch
+
+from extensions.front.caffe.argmax_ext import ArgMaxFrontExtractor
+from extensions.ops.argmax import ArgMaxOp
+from mo.utils.unittest.extractors import FakeMultiParam
+from mo.utils.unittest.graph import FakeNode
+from mo.ops.op import Op
+
+
+class FakeArgMaxProtoLayer:
+    def __init__(self, val):
+        self.argmax_param = val
+
+
+class TestArgMaxExt(unittest.TestCase):
+    @classmethod
+    def setUpClass(cls):
+        Op.registered_ops['ArgMax'] = ArgMaxOp
+
+    def test_argmax_no_pb_no_ml(self):
+        self.assertRaises(AttributeError, ArgMaxFrontExtractor.extract, None)
+
+    @patch('extensions.front.caffe.argmax_ext.merge_attrs')
+    def test_argmax_ext_ideal_numbers(self, merge_attrs_mock):
+        params = {
+            'out_max_val': True,
+            'top_k': 100,
+            'axis': 2
+        }
+        merge_attrs_mock.return_value = {
+            **params
+        }
+
+        fake_pl = FakeArgMaxProtoLayer(FakeMultiParam(params))
+        fake_node = FakeNode(fake_pl, None)
+
+        ArgMaxFrontExtractor.extract(fake_node)
+
+        exp_res = {
+            'out_max_val': True,
+            'top_k': 100,
+            'axis': 2,
+            'infer': ArgMaxOp.argmax_infer
+        }
+
+        for key in exp_res.keys():
+            self.assertEqual(fake_node[key], exp_res[key])
diff --git a/model-optimizer/extensions/front/caffe/axpy_test.py b/model-optimizer/extensions/front/caffe/axpy_test.py
new file mode 100644 (file)
index 0000000..01e535c
--- /dev/null
@@ -0,0 +1,44 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+import unittest
+
+from extensions.front.caffe.axpy import AxpyToEltwise
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph_with_edge_attrs
+
+
+class TestAxpyReplacer(unittest.TestCase):
+    def test_axpy(self):
+        nodes = {
+            'node_1': {'kind': 'op', 'type': 'Identity', 'op': 'Placeholder'},
+            'node_2': {'kind': 'op', 'type': 'Identity', 'op': 'Placeholder'},
+            'node_3': {'kind': 'op', 'type': 'Identity', 'op': 'Placeholder'},
+            'axpy': {'type': 'Axpy', 'kind': 'op', 'op': 'Axpy'},
+            'node_4': {'kind': 'op', 'type': 'Identity', 'op': 'Placeholder'}}
+        edges = [
+            ('node_1', 'axpy', {'in': 0}),
+            ('node_2', 'axpy', {'in': 1}),
+            ('node_3', 'axpy', {'in': 2}),
+            ('axpy', 'node_4', {'in': 0})]
+        graph = build_graph_with_edge_attrs(nodes, edges)
+        node = Node(graph, 'axpy')
+        replacer = AxpyToEltwise()
+        replacer.replace_op(graph, node)
+
+        scale_node = [node for node, attrs in list(graph.nodes(data=True)) if attrs['type'] == 'ScaleShift']
+        self.assertEqual(len(scale_node), 1)
+        add_node = [node for node, attrs in list(graph.nodes(data=True)) if attrs['type'] == 'Eltwise']
+        self.assertEqual(len(add_node), 1)
diff --git a/model-optimizer/extensions/front/caffe/bn_test.py b/model-optimizer/extensions/front/caffe/bn_test.py
new file mode 100644 (file)
index 0000000..f075e50
--- /dev/null
@@ -0,0 +1,67 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+import numpy as np
+import unittest
+
+from extensions.front.caffe.bn import BNToScaleShift
+from mo.graph.graph import Node
+from mo.utils.unittest.extractors import FakeParam
+from mo.utils.unittest.graph import build_graph_with_edge_attrs
+
+
+class FakeBNProtoLayer:
+    def __init__(self, val):
+        self.bn_param = val
+
+
+class FakeBNBinLayer:
+    def __init__(self, val):
+        self.blobs = val
+
+
+class TestBNReplacer(unittest.TestCase):
+    def test_bn(self):
+        bn_pb = FakeBNProtoLayer(FakeParam('eps', 0.0001))
+        mean = [1, 2.5, 3]
+        var = [0.5, 0.1, 1.2]
+        scale = [2.3, 3.4, 4.5]
+        shift = [0.8, 0.6, 0.4]
+        bn_bin = FakeBNBinLayer([FakeParam('data', mean),
+                                 FakeParam('data', var),
+                                 FakeParam('data', scale),
+                                 FakeParam('data', shift)])
+        nodes = {
+            'node_1': {'kind': 'op', 'type': 'Identity', 'op': 'Placeholder'},
+            'bn': {'type': 'BN', 'kind': 'op', 'op': 'BN',
+                   'pb': bn_pb,
+                   'model_pb': bn_bin},
+            'node_2': {'kind': 'op', 'type': 'Identity', 'op': 'Placeholder'}}
+        edges = [
+            ('node_1', 'bn', {'in': 0}),
+            ('bn', 'node_2', {'in': 0})]
+        graph = build_graph_with_edge_attrs(nodes, edges)
+        node = Node(graph, 'bn')
+        replacer = BNToScaleShift()
+        replacer.replace_op(graph, node)
+
+        scale_node = [node for node, attrs in list(graph.nodes(data=True)) if attrs['type'] == 'ScaleShift']
+        self.assertEqual(len(scale_node), 1)
+
+        scale_ref = np.array([1.11796412, 3.2272172, 4.74282367])
+        shift_ref = np.array([-2.07131747, -10.87253847, -20.14270653])
+        for i in range(len(mean)):
+            self.assertAlmostEqual(graph.node[scale_node[0]]['scale'][i], scale_ref[i])
+            self.assertAlmostEqual(graph.node[scale_node[0]]['bias'][i], shift_ref[i])
diff --git a/model-optimizer/extensions/front/caffe/conv_ext_test.py b/model-optimizer/extensions/front/caffe/conv_ext_test.py
new file mode 100644 (file)
index 0000000..49c8b0b
--- /dev/null
@@ -0,0 +1,349 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import patch
+
+import numpy as np
+
+from extensions.front.caffe.conv_ext import ConvFrontExtractor, DeconvFrontExtractor, conv_create_attrs, conv_set_params
+from mo.front.caffe.extractors.utils import get_list_from_container
+from mo.utils.error import Error
+from mo.utils.unittest.extractors import PB, FakeParam, FakeMultiParam
+
+
+class FakeConvProtoLayer:
+    def __init__(self, val):
+        self.convolution_param = val
+        self.bottom = [0]
+
+
+class TestConvShapesParsing(unittest.TestCase):
+    def test_conv_no_pb_no_ml(self):
+        node = PB({'pb': None})
+        self.assertRaises(Error, ConvFrontExtractor.extract, node)
+
+    @patch('extensions.front.caffe.conv_ext.weights_biases')
+    @patch('extensions.front.caffe.conv_ext.layout_attrs')
+    def test_conv_ext_ideal_numbers(self, weights_biases_mock, layout_attrs_mock):
+        weights_biases_mock.return_value = {}
+        layout_attrs_mock.return_value = {}
+        params = {
+            'pad': 10,
+            'kernel_size': 11,
+            'stride': 12,
+            'dilation': 13,
+            'group': 14,
+            'num_output': 15,
+            'bias_term': True
+        }
+        node = PB({'pb': FakeConvProtoLayer(FakeMultiParam(params))})
+        ConvFrontExtractor.extract(node)
+        res = node
+        exp_res = {
+            'op': 'Conv2D',
+            'pad': np.array([[0, 0], [0, 0], [10, 10], [10, 10]]),
+            'pad_spatial_shape': np.array([[10, 10], [10, 10]]),
+            'stride': np.array([1, 1, 12, 12]),
+            'kernel_spatial': np.array([11, 11]),
+            'dilation': np.array([1, 1, 13, 13]),
+            'group': 14,
+            'bias_addable': True,
+            'bias_term': True,
+        }
+        self.assertTrue(weights_biases_mock.called)
+        self.assertTrue(layout_attrs_mock.called)
+        for key in exp_res.keys():
+            if key in ('pad', 'pad_spatial_shape', 'stride', 'kernel_spatial', 'dilation'):
+                np.testing.assert_equal(res[key], exp_res[key])
+            else:
+                self.assertEqual(res[key], exp_res[key])
+
+    @patch('extensions.front.caffe.conv_ext.weights_biases')
+    @patch('extensions.front.caffe.conv_ext.layout_attrs')
+    def test_conv_ext_empty_numbers(self, weights_biases_mock, layout_attrs_mock):
+        weights_biases_mock.return_value = {}
+        layout_attrs_mock.return_value = {}
+        params = {
+            'pad': None,
+            'kernel_size': None,
+            'stride': None,
+            'dilation': None,
+            'group': 14,
+            'num_output': 15,
+            'bias_term': True,
+            'pad_w': 3,
+            'pad_h': 4,
+            'kernel_w': 5,
+            'kernel_h': 6,
+            'stride_h': 3,
+            'stride_w': 2,
+        }
+        node = PB({'pb': FakeConvProtoLayer(FakeMultiParam(params))})
+        ConvFrontExtractor.extract(node)
+        res = node
+        exp_res = {
+            'op': 'Conv2D',
+            'pad': np.array([[0, 0], [0, 0], [4, 4], [3, 3]]),
+            'pad_spatial_shape': np.array([[4, 4], [3, 3]]),
+            'stride': np.array([1, 1, 3, 2]),
+            'kernel_spatial': np.array([6, 5]),
+            'dilation': np.array([1, 1, 1, 1]),
+            'group': 14,
+            'bias_addable': True,
+            'bias_term': True,
+        }
+        self.assertTrue(weights_biases_mock.called)
+        self.assertTrue(layout_attrs_mock.called)
+        for key in exp_res.keys():
+            if key in ('pad', 'pad_spatial_shape', 'stride', 'kernel_spatial', 'dilation'):
+                np.testing.assert_equal(res[key], exp_res[key])
+            else:
+                self.assertEqual(res[key], exp_res[key])
+
+    def test_attrs(self):
+        params = {
+            'type_str': 'Conv2D',
+            'padding': [10, 10],
+            'stride': [12, 12],
+            'kernel': [11, 11],
+            'dilate': [13, 13],
+            'group': 14,
+            'output': 13,
+            'bias_term': True
+        }
+
+        res = conv_create_attrs(params)
+
+        exp_res = {
+            'pad': np.array([[0, 0], [0, 0], [10, 10], [10, 10]]),
+            'pad_spatial_shape': np.array([[10, 10], [10, 10]]),
+            'stride': np.array([1, 1, 12, 12]),
+            'kernel_spatial': np.array([11, 11]),
+            'dilation': np.array([1, 1, 13, 13]),
+            'group': 14,
+            'bias_addable': True,
+            'bias_term': True,
+            'output_spatial_shape': None,
+            'output_shape': None,
+            'output': 13,
+        }
+        for key in exp_res.keys():
+            if key in ('pad', 'pad_spatial_shape', 'stride', 'kernel_spatial', 'dilation'):
+                np.testing.assert_equal(res[key], exp_res[key])
+            else:
+                self.assertEqual(res[key], exp_res[key])
+
+    def test_get_list_from_container_no_existing_param(self):
+        res = get_list_from_container(FakeParam("p", "1"), 'prop', int)
+        self.assertEqual(res, [])
+
+    def test_get_list_from_container_no_param(self):
+        res = get_list_from_container(None, 'prop', int)
+        self.assertEqual(res, [])
+
+    def test_get_list_from_container_simple_type_match(self):
+        res = get_list_from_container(FakeParam('prop', 10), 'prop', int)
+        self.assertEqual(res, [10])
+
+    def test_get_list_from_container_list_match(self):
+        res = get_list_from_container(FakeParam('prop', [10, 11]), 'prop', int)
+        self.assertEqual(res, [10, 11])
+
+    def test_get_list_from_container_list_match_empty(self):
+        res = get_list_from_container(FakeParam('prop', []), 'prop', int)
+        self.assertEqual(res, [])
+
+    def test_params_creation(self):
+        params = {
+            'pad': None,
+            'kernel_size': None,
+            'stride': None,
+            'dilation': None,
+            'group': 14,
+            'num_output': 15,
+            'bias_term': True,
+            'pad_w': 3,
+            'pad_h': 4,
+            'kernel_w': 5,
+            'kernel_h': 6,
+            'stride_h': 3,
+            'stride_w': 2,
+        }
+        exp_res = {
+            'padding': [3, 4],
+            'stride': [2, 3],
+            'kernel': [5, 6],
+            'dilate': [1, 1],
+            'group': 14,
+            'output': 15
+        }
+        res = conv_set_params(FakeConvProtoLayer(FakeMultiParam(params)).convolution_param, 'Conv2D')
+
+        for key in exp_res.keys():
+            if key in ('padding', 'stride', 'stride', 'kernel', 'dilate'):
+                np.testing.assert_equal(res[key], exp_res[key])
+            else:
+                self.assertEqual(res[key], exp_res[key])
+
+
+class TestDeconvShapesParsing(unittest.TestCase):
+    def test_deconv_no_pb_no_ml(self):
+        node = PB({'pb': None})
+        self.assertRaises(Error, DeconvFrontExtractor.extract, node)
+
+    @patch('extensions.front.caffe.conv_ext.weights_biases')
+    @patch('extensions.front.caffe.conv_ext.layout_attrs')
+    def test_conv_ext_ideal_numbers(self, weights_biases_mock, layout_attrs_mock):
+        weights_biases_mock.return_value = {}
+        layout_attrs_mock.return_value = {}
+        params = {
+            'pad': 10,
+            'kernel_size': 11,
+            'stride': 12,
+            'dilation': 13,
+            'group': 14,
+            'num_output': 15,
+            'bias_term': True
+        }
+        node = PB({'pb': FakeConvProtoLayer(FakeMultiParam(params))})
+        res = DeconvFrontExtractor.extract(node)
+        res = node
+        exp_res = {
+            'op': 'Deconv2D',
+            'pad': np.array([[0, 0], [0, 0], [10, 10], [10, 10]]),
+            'pad_spatial_shape': np.array([[10, 10], [10, 10]]),
+            'stride': np.array([1, 1, 12, 12]),
+            'kernel_spatial': np.array([11, 11]),
+            'dilation': np.array([1, 1, 13, 13]),
+            'group': 14,
+            'bias_addable': True,
+        }
+        self.assertTrue(weights_biases_mock.called)
+        self.assertTrue(layout_attrs_mock.called)
+        for key in exp_res.keys():
+            if key in ('pad', 'pad_spatial_shape', 'stride', 'kernel_spatial', 'dilation'):
+                np.testing.assert_equal(res[key], exp_res[key])
+            else:
+                self.assertEqual(res[key], exp_res[key])
+
+    @patch('extensions.front.caffe.conv_ext.weights_biases')
+    @patch('extensions.front.caffe.conv_ext.layout_attrs')
+    def test_conv_ext_false_bias_term(self, weights_biases_mock, layout_attrs_mock):
+        weights_biases_mock.return_value = {}
+        layout_attrs_mock.return_value = {}
+        params = {
+            'pad': 10,
+            'kernel_size': 11,
+            'stride': 12,
+            'dilation': 13,
+            'group': 14,
+            'num_output': 15,
+            'bias_term': False
+        }
+        node = PB({'pb': FakeConvProtoLayer(FakeMultiParam(params))})
+        res = DeconvFrontExtractor.extract(node)
+        res = node
+        exp_res = {
+            'op': 'Deconv2D',
+            'pad': np.array([[0, 0], [0, 0], [10, 10], [10, 10]]),
+            'pad_spatial_shape': np.array([[10, 10], [10, 10]]),
+            'stride': np.array([1, 1, 12, 12]),
+            'kernel_spatial': np.array([11, 11]),
+            'dilation': np.array([1, 1, 13, 13]),
+            'group': 14,
+            'bias_addable': True,
+            'bias_term': False,
+        }
+        self.assertTrue(weights_biases_mock.called)
+        self.assertTrue(layout_attrs_mock.called)
+        for key in exp_res.keys():
+            if key in ('pad', 'pad_spatial_shape', 'stride', 'kernel_spatial', 'dilation', 'bias_term'):
+                np.testing.assert_equal(res[key], exp_res[key])
+            else:
+                self.assertEqual(res[key], exp_res[key])
+
+    @patch('extensions.front.caffe.conv_ext.weights_biases')
+    @patch('extensions.front.caffe.conv_ext.layout_attrs')
+    def test_conv_ext_empty_numbers(self, weights_biases_mock, layout_attrs_mock):
+        weights_biases_mock.return_value = {}
+        layout_attrs_mock.return_value = {}
+        params = {
+            'pad': None,
+            'kernel_size': None,
+            'stride': None,
+            'dilation': None,
+            'group': 14,
+            'num_output': 15,
+            'bias_term': True,
+            'pad_w': 3,
+            'pad_h': 4,
+            'kernel_w': 5,
+            'kernel_h': 6,
+            'stride_h': 3,
+            'stride_w': 2,
+        }
+        node = PB({'pb': FakeConvProtoLayer(FakeMultiParam(params))})
+        res = DeconvFrontExtractor.extract(node)
+        res = node
+        exp_res = {
+            'op': 'Deconv2D',
+            'pad': np.array([[0, 0], [0, 0], [4, 4], [3, 3]]),
+            'pad_spatial_shape': np.array([[4, 4], [3, 3]]),
+            'stride': np.array([1, 1, 3, 2]),
+            'kernel_spatial': np.array([6, 5]),
+            'dilation': np.array([1, 1, 1, 1]),
+            'group': 14,
+            'bias_addable': True,
+        }
+        self.assertTrue(weights_biases_mock.called)
+        self.assertTrue(layout_attrs_mock.called)
+        for key in exp_res.keys():
+            if key in ('pad', 'pad_spatial_shape', 'stride', 'kernel_spatial', 'dilation'):
+                np.testing.assert_equal(res[key], exp_res[key])
+            else:
+                self.assertEqual(res[key], exp_res[key])
+
+    def test_attrs(self):
+        params = {
+            'type_str': 'Deconv2D',
+            'padding': [10, 10],
+            'stride': [12, 12],
+            'kernel': [11, 11],
+            'dilate': [13, 13],
+            'group': 14,
+            'output': 13,
+            'bias_term': True
+        }
+        res = conv_create_attrs(params)
+
+        exp_res = {
+            'pad': np.array([[0, 0], [0, 0], [10, 10], [10, 10]]),
+            'pad_spatial_shape': np.array([[10, 10], [10, 10]]),
+            'stride': np.array([1, 1, 12, 12]),
+            'kernel_spatial': np.array([11, 11]),
+            'dilation': np.array([1, 1, 13, 13]),
+            'group': 14,
+            'bias_addable': True,
+            'output_spatial_shape': None,
+            'output_shape': None,
+            'output': 13,
+        }
+        for key in exp_res.keys():
+            if key in ('pad', 'pad_spatial_shape', 'stride', 'kernel_spatial', 'dilation'):
+                np.testing.assert_equal(res[key], exp_res[key])
+            else:
+                self.assertEqual(res[key], exp_res[key])
diff --git a/model-optimizer/extensions/front/caffe/correlation_ext_test.py b/model-optimizer/extensions/front/caffe/correlation_ext_test.py
new file mode 100644 (file)
index 0000000..de4b74c
--- /dev/null
@@ -0,0 +1,77 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import patch
+
+from extensions.front.caffe.correlation_ext import CorrelationFrontExtractor
+from extensions.ops.correlation import CorrelationOp
+from mo.utils.unittest.extractors import FakeMultiParam
+from mo.utils.unittest.graph import FakeNode
+from mo.ops.op import Op
+
+
+class FakeCorrProtoLayer:
+    def __init__(self, val):
+        self.correlation_param = val
+
+
+class TestCorrelationExt(unittest.TestCase):
+    @classmethod
+    def setUpClass(cls):
+        Op.registered_ops['Correlation'] = CorrelationOp
+
+    def test_da_no_pb_no_ml(self):
+        self.assertRaises(AttributeError, CorrelationFrontExtractor.extract, None)
+
+    @patch('extensions.front.caffe.correlation_ext.merge_attrs')
+    def test_resample_ext_ideal_numbers(self, merge_attrs_mock):
+        params = {
+            'pad': 20,
+            'kernel_size': 1,
+            'max_displacement': 20,
+            'stride_1': 1,
+            'stride_2': 2,
+            'single_direction': 0,
+            'do_abs': False,
+            'correlation_type': 'caffe.CorrelationParameter.MULTIPLY'
+        }
+        merge_attrs_mock.return_value = {
+            **params,
+            'test': 54,
+            'test2': 'test3'
+        }
+
+        fake_pl = FakeCorrProtoLayer(FakeMultiParam(params))
+        fake_node = FakeNode(fake_pl, None)
+
+        CorrelationFrontExtractor.extract(fake_node)
+
+        exp_res = {
+            'type': "Correlation",
+            'pad': 20,
+            'kernel_size': 1,
+            'max_displacement': 20,
+            'stride_1': 1,
+            'stride_2': 2,
+            'single_direction': 0,
+            'do_abs': False,
+            'correlation_type': 'caffe.CorrelationParameter.MULTIPLY',
+            'infer': CorrelationOp.corr_infer
+        }
+
+        for key in exp_res.keys():
+            self.assertEqual(fake_node[key], exp_res[key])
diff --git a/model-optimizer/extensions/front/caffe/ctcgreedydecoder_ext_test.py b/model-optimizer/extensions/front/caffe/ctcgreedydecoder_ext_test.py
new file mode 100644 (file)
index 0000000..07b724e
--- /dev/null
@@ -0,0 +1,61 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import patch
+
+from extensions.front.caffe.ctcgreedydecoder_ext import CTCGreedyDecoderFrontExtractor
+from extensions.ops.ctc_greedy_decoder import CTCGreedyDecoderOp
+from mo.utils.unittest.extractors import FakeMultiParam
+from mo.utils.unittest.graph import FakeNode
+from mo.ops.op import Op
+
+
+class FakeCTCGreedyDecoderProtoLayer:
+    def __init__(self, val):
+        self.ctc_decoder_param = val
+
+
+class TestCTCGreedyDecoderExt(unittest.TestCase):
+    @classmethod
+    def setUpClass(cls):
+        Op.registered_ops['CTCGreedyDecoder'] = CTCGreedyDecoderOp
+
+    def test_ctcgreedydecoder_no_pb_no_ml(self):
+        self.assertRaises(AttributeError, CTCGreedyDecoderFrontExtractor.extract, None)
+
+    @patch('extensions.front.caffe.ctcgreedydecoder_ext.merge_attrs')
+    def test_ctcgreedydecoder_ext_ideal_numbers(self, merge_attrs_mock):
+        params = {
+            'ctc_merge_repeated': True
+        }
+        merge_attrs_mock.return_value = {
+            **params
+        }
+
+        fake_pl = FakeCTCGreedyDecoderProtoLayer(FakeMultiParam(params))
+        fake_node = FakeNode(fake_pl, None)
+
+        CTCGreedyDecoderFrontExtractor.extract(fake_node)
+
+        exp_res = {
+            'type': "CTCGreedyDecoder",
+            'ctc_merge_repeated': 1,
+            'infer': CTCGreedyDecoderOp.ctc_greedy_decoder_infer
+        }
+
+        for key in exp_res.keys():
+            self.assertEqual(fake_node[key], exp_res[key])
diff --git a/model-optimizer/extensions/front/caffe/data_augmentation_ext_test.py b/model-optimizer/extensions/front/caffe/data_augmentation_ext_test.py
new file mode 100644 (file)
index 0000000..0dd0aba
--- /dev/null
@@ -0,0 +1,94 @@
+"""
+ Copyright (c) 2017-2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import patch
+
+import numpy as np
+
+from extensions.front.caffe.data_augmentation_ext import DataAugmentationFrontExtractor
+from extensions.ops.data_augmentation import DataAugmentationOp
+from mo.utils.unittest.extractors import FakeMultiParam
+from mo.utils.unittest.graph import FakeNode
+from mo.ops.op import Op
+
+
+class FakeDAProtoLayer:
+    def __init__(self, val):
+        self.augmentation_param = val
+
+
+class TestDA(unittest.TestCase):
+    @classmethod
+    def setUpClass(cls):
+        Op.registered_ops['DataAugmentation'] = DataAugmentationOp
+
+    def test_da_no_pb_no_ml(self):
+        self.assertRaises(AttributeError, DataAugmentationFrontExtractor.extract, None)
+
+    @patch('extensions.front.caffe.data_augmentation_ext.merge_attrs')
+    def test_da_ext_ideal_numbers(self, merge_attrs_mock):
+        params = {
+            'crop_width': 0,
+            'crop_height': 0,
+            'write_augmented': "",
+            'max_multiplier': 255.0,
+            'augment_during_test': True,
+            'recompute_mean': 0,
+            'write_mean': "",
+            'mean_per_pixel': False,
+            'mean': 0,
+            'mode': "add",
+            'bottomwidth': 0,
+            'bottomheight': 0,
+            'num': 0,
+            'chromatic_eigvec': [0.0]
+
+        }
+        merge_attrs_mock.return_value = {
+            **params,
+            'test': 54,
+            'test2': 'test3'
+        }
+        fake_pl = FakeDAProtoLayer(FakeMultiParam(params))
+        fake_node = FakeNode(fake_pl, None)
+
+        DataAugmentationFrontExtractor.extract(fake_node)
+        exp_res = {
+            'type': 'DataAugmentation',
+            'op': 'DataAugmentation',
+            'crop_width': 0,
+            'crop_height': 0,
+            'write_augmented': "",
+            'max_multiplier': 255.0,
+            'augment_during_test': 1,
+            'recompute_mean': 0,
+            'write_mean': "",
+            'mean_per_pixel': 0,
+            'mean': 0,
+            'mode': "add",
+            'bottomwidth': 0,
+            'bottomheight': 0,
+            'num': 0,
+            'chromatic_eigvec': [0.0],
+            'infer': DataAugmentationOp.data_augmentation_infer
+        }
+
+        for key in exp_res.keys():
+            if key in ('chromatic_eigvec',):
+                np.testing.assert_equal(exp_res[key], fake_node[key])
+            else:
+                self.assertEqual(exp_res[key], fake_node[key])
diff --git a/model-optimizer/extensions/front/caffe/grn_ext_test.py b/model-optimizer/extensions/front/caffe/grn_ext_test.py
new file mode 100644 (file)
index 0000000..e284a8a
--- /dev/null
@@ -0,0 +1,62 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import patch
+
+from extensions.front.caffe.grn_ext import GRNFrontExtractor
+from extensions.ops.grn import GRNOp
+from mo.utils.unittest.extractors import FakeMultiParam
+from mo.utils.unittest.graph import FakeNode
+from mo.front.common.partial_infer.elemental import copy_shape_infer
+from mo.ops.op import Op
+
+
+class FakeGRNProtoLayer:
+    def __init__(self, val):
+        self.grn_param = val
+
+
+class TestGRNExt(unittest.TestCase):
+    @classmethod
+    def setUpClass(cls):
+        Op.registered_ops['GRN'] = GRNOp
+
+    def test_grn_no_pb_no_ml(self):
+        self.assertRaises(AttributeError, GRNFrontExtractor.extract, None)
+
+    @patch('extensions.front.caffe.grn_ext.merge_attrs')
+    def test_grn_ext_ideal_numbers(self, merge_attrs_mock):
+        params = {
+            'bias': 0.7
+        }
+        merge_attrs_mock.return_value = {
+            **params
+        }
+
+        fake_pl = FakeGRNProtoLayer(FakeMultiParam(params))
+        fake_node = FakeNode(fake_pl, None)
+
+        GRNFrontExtractor.extract(fake_node)
+
+        exp_res = {
+            'type': "GRN",
+            'bias': 0.7,
+            'infer': copy_shape_infer
+        }
+
+        for key in exp_res.keys():
+            self.assertEqual(fake_node[key], exp_res[key])
diff --git a/model-optimizer/extensions/front/caffe/interp_ext_test.py b/model-optimizer/extensions/front/caffe/interp_ext_test.py
new file mode 100644 (file)
index 0000000..ecbf114
--- /dev/null
@@ -0,0 +1,72 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import patch
+
+from extensions.front.caffe.interp_ext import InterpFrontExtractor
+from extensions.ops.interp import InterpOp
+from mo.utils.unittest.extractors import FakeMultiParam
+from mo.utils.unittest.graph import FakeNode
+from mo.ops.op import Op
+
+
+class FakeInterpProtoLayer:
+    def __init__(self, val):
+        self.interp_param = val
+
+
+class TestInterpExt(unittest.TestCase):
+    @classmethod
+    def setUpClass(cls):
+        Op.registered_ops['Interp'] = InterpOp
+
+    def test_interp_no_pb_no_ml(self):
+        self.assertRaises(AttributeError, InterpFrontExtractor.extract, None)
+
+    @patch('extensions.front.caffe.interp_ext.merge_attrs')
+    def test_interp_ext_ideal_numbers(self, merge_attrs_mock):
+        params = {
+            'height': 1.1,
+            'width': 2.2,
+            'zoom_factor': 3.3,
+            'shrink_factor': 4.4,
+            'pad_beg': 5.5,
+            'pad_end': 6.6
+        }
+        merge_attrs_mock.return_value = {
+            **params,
+            'test': 54,
+            'test2': 'test3'
+        }
+
+        fake_pl = FakeInterpProtoLayer(FakeMultiParam(params))
+        fake_node = FakeNode(fake_pl, None)
+        InterpFrontExtractor.extract(fake_node)
+
+        exp_res = {
+            'type': "Interp",
+            'height': 1.1,
+            'width': 2.2,
+            'zoom_factor': 3.3,
+            'shrink_factor': 4.4,
+            'pad_beg': 5.5,
+            'pad_end': 6.6,
+            'infer': InterpOp.interp_infer
+        }
+
+        for key in exp_res.keys():
+            self.assertEqual(fake_node[key], exp_res[key])
diff --git a/model-optimizer/extensions/front/caffe/normalize_ext_test.py b/model-optimizer/extensions/front/caffe/normalize_ext_test.py
new file mode 100644 (file)
index 0000000..4b2c42f
--- /dev/null
@@ -0,0 +1,66 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import patch
+
+from extensions.front.caffe.normalize_ext import NormalizeFrontExtractor
+from extensions.ops.normalize import NormalizeOp
+from mo.utils.unittest.extractors import FakeMultiParam
+from mo.utils.unittest.graph import FakeNode
+from mo.front.common.partial_infer.elemental import copy_shape_infer
+from mo.ops.op import Op
+
+
+class FakeNormalizeProtoLayer:
+    def __init__(self, val):
+        self.norm_param = val
+
+
+class TestNormalizeExt(unittest.TestCase):
+    @classmethod
+    def setUpClass(cls):
+        Op.registered_ops['Normalize'] = NormalizeOp
+
+    def test_normalize_no_pb_no_ml(self):
+        self.assertRaises(AttributeError, NormalizeFrontExtractor.extract, None)
+
+    @patch('extensions.front.caffe.normalize_ext.collect_attributes')
+    def test_normalize_ext_ideal_numbers(self, collect_attributes_mock):
+        params = {
+            'across_spatial': 1,
+            'channel_shared': 0,
+            'eps': 0.00001
+        }
+        collect_attributes_mock.return_value = {
+            **params
+        }
+
+        fake_pl = FakeNormalizeProtoLayer(FakeMultiParam(params))
+        fake_node = FakeNode(fake_pl, None)
+
+        NormalizeFrontExtractor.extract(fake_node)
+
+        exp_res = {
+            'type': "Normalize",
+            'across_spatial': 1,
+            'channel_shared': 0,
+            'eps': 0.00001,
+            'infer': copy_shape_infer
+        }
+
+        for key in exp_res.keys():
+            self.assertEqual(fake_node[key], exp_res[key])
diff --git a/model-optimizer/extensions/front/caffe/pooling_ext_test.py b/model-optimizer/extensions/front/caffe/pooling_ext_test.py
new file mode 100644 (file)
index 0000000..f391d93
--- /dev/null
@@ -0,0 +1,112 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.front.caffe.pooling_ext import PoolingFrontExtractor
+from mo.front.common.extractors.utils import layout_attrs
+from mo.ops.pooling import Pooling
+from mo.utils.unittest.extractors import PB, FakeMultiParam
+
+
+class FakeProtoLayer:
+    def __init__(self, val):
+        self.pooling_param = val
+
+
+class TestPooling(unittest.TestCase):
+    def test_pooling_ext_global(self):
+        params = {
+            'kernel_size': 1,
+            'stride': 2,
+            'pad': 3,
+            'pool': 0,
+            'global_pooling': 1,
+            'ceil_mode': 1
+        }
+        node = PB({'pb': FakeProtoLayer(FakeMultiParam(params))})
+        PoolingFrontExtractor.extract(node)
+        res = node
+        exp_res = {
+            'window': np.array([1, 1, 0, 0], dtype=np.int64),
+            'stride': np.array([1, 1, 1, 1], dtype=np.int64),
+            'pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]], dtype=np.int64),
+            'pad_spatial_shape': np.array([[0, 0], [0, 0]], dtype=np.int64),
+            'pool_method': 'max',
+            'exclude_pad': 'true',
+            'infer': Pooling.infer,
+            'global_pool': 1,
+            'output_spatial_shape': None,
+            'pooling_convention': 'full',
+            'rounding_type': 'ceil'
+
+        }
+        exp_res.update(layout_attrs())
+        for i in exp_res.keys():
+            if i in ('window', 'stride',
+                     'pad', 'pad_spatial_shape',
+                     'spatial_dims', 'batch_dims',
+                     'channel_dims'):
+                np.testing.assert_array_equal(res[i], exp_res[i])
+            else:
+                self.assertEqual(res[i], exp_res[i])
+
+    def test_pooling_ext(self):
+        params = {
+            'kernel_size': 1,
+            'stride': 2,
+            'pad': 3,
+            'pool': 1,
+            'global_pooling': 0,
+            'ceil_mode': 0
+        }
+        node = PB({'pb': FakeProtoLayer(FakeMultiParam(params))})
+        PoolingFrontExtractor.extract(node)
+        res = node
+        exp_res = {
+            'window': np.array([1, 1, 1, 1], dtype=np.int64),
+            'stride': np.array([1, 1, 2, 2], dtype=np.int64),
+            'pad': np.array([[0, 0], [0, 0], [3, 3], [3, 3]], dtype=np.int64),
+            'pad_spatial_shape': np.array([[3, 3], [3, 3]], dtype=np.int64),
+            'pool_method': 'avg',
+            'exclude_pad': 'false',
+            'infer': Pooling.infer,
+            'global_pool': 0,
+            'output_spatial_shape': None,
+            'pooling_convention': 'valid'
+        }
+        exp_res.update(layout_attrs())
+        for i in exp_res.keys():
+            if i in ('window', 'stride',
+                     'pad', 'pad_spatial_shape',
+                     'spatial_dims', 'batch_dims',
+                     'channel_dims'):
+                np.testing.assert_array_equal(res[i], exp_res[i])
+            else:
+                self.assertEqual(res[i], exp_res[i])
+
+    def test_pooling_ext_exception(self):
+        params = {
+            'kernel_size': 1,
+            'stride': 2,
+            'pad': 3,
+            'pool': 3,
+            'global_pooling': 1
+        }
+        node = PB({'pb': FakeProtoLayer(FakeMultiParam(params))})
+        self.assertRaises(ValueError, PoolingFrontExtractor.extract, node)
diff --git a/model-optimizer/extensions/front/caffe/power_file_ext_test.py b/model-optimizer/extensions/front/caffe/power_file_ext_test.py
new file mode 100644 (file)
index 0000000..da06fc6
--- /dev/null
@@ -0,0 +1,64 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import patch
+
+from extensions.front.caffe.power_file_ext import PowerFileFrontExtractor
+from extensions.ops.power_file import PowerFileOp
+from mo.utils.unittest.extractors import FakeMultiParam
+from mo.utils.unittest.graph import FakeNode
+from mo.front.common.partial_infer.elemental import copy_shape_infer
+from mo.ops.op import Op
+
+
+class FakePowerFileProtoLayer:
+    def __init__(self, val):
+        self.power_file_param = val
+
+
+class TestPowerFileExt(unittest.TestCase):
+    @classmethod
+    def setUpClass(cls):
+        Op.registered_ops['PowerFile'] = PowerFileOp
+
+    def test_power_file_no_pb_no_ml(self):
+        self.assertRaises(AttributeError, PowerFileFrontExtractor.extract, None)
+
+    @patch('extensions.front.caffe.power_file_ext.collect_attributes')
+    def test_mvn_ext_ideal_numbers(self, collect_attributes_mock):
+        params = {
+            'normalize_variance': 'True',
+            'across_channels': 'False',
+            'eps': 1e-9
+        }
+        collect_attributes_mock.return_value = {
+            'shift_file': 'some_file_path'
+        }
+
+        fake_pl = FakePowerFileProtoLayer(FakeMultiParam(params))
+        fake_node = FakeNode(fake_pl, None)
+
+        PowerFileFrontExtractor.extract(fake_node)
+
+        exp_res = {
+            'type': "PowerFile",
+            'shift_file': 'some_file_path',
+            'infer': copy_shape_infer
+        }
+
+        for key in exp_res.keys():
+            self.assertEqual(fake_node[key], exp_res[key])
diff --git a/model-optimizer/extensions/front/caffe/prelu_ext_test.py b/model-optimizer/extensions/front/caffe/prelu_ext_test.py
new file mode 100644 (file)
index 0000000..2ed4370
--- /dev/null
@@ -0,0 +1,63 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import patch
+
+from extensions.front.caffe.prelu_ext import PreluFrontExtractor
+from extensions.ops.prelu import PreluOp
+from mo.utils.unittest.extractors import FakeMultiParam
+from mo.utils.unittest.graph import FakeNode
+from mo.ops.op import Op
+
+
+class FakePReLUProtoLayer:
+    def __init__(self, val):
+        self.prelu_param = val
+
+
+class TestPreluExt(unittest.TestCase):
+    @classmethod
+    def setUpClass(cls):
+        Op.registered_ops['PReLU'] = PreluOp
+
+    def test_prelu_no_pb_no_ml(self):
+        self.assertRaises(AttributeError, PreluFrontExtractor.extract, None)
+
+    @patch('extensions.front.caffe.prelu_ext.merge_attrs')
+    def test_reogyolo_ext_ideal_numbers(self, merge_attrs_mock):
+        params = {
+            'channel_shared': False
+        }
+
+        merge_attrs_mock.return_value = {
+            **params
+        }
+
+        fake_pl = FakePReLUProtoLayer(FakeMultiParam(params))
+        fake_node = FakeNode(fake_pl, None)
+
+        PreluFrontExtractor.extract(fake_node)
+
+        exp_res = {
+            'type': 'PReLU',
+            'op': 'PReLU',
+            'channel_shared': 0,
+            'infer': PreluOp.prelu_shape_infer,
+        }
+
+        for key in exp_res.keys():
+            self.assertEqual(fake_node[key], exp_res[key])
diff --git a/model-optimizer/extensions/front/caffe/priorbox_clustered_ext_test.py b/model-optimizer/extensions/front/caffe/priorbox_clustered_ext_test.py
new file mode 100644 (file)
index 0000000..4ce3e32
--- /dev/null
@@ -0,0 +1,88 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import patch
+
+import numpy as np
+
+from extensions.front.caffe.priorbox_clustered_ext import PriorBoxClusteredFrontExtractor
+from extensions.ops.priorbox_clustered import PriorBoxClusteredOp
+from mo.utils.unittest.extractors import FakeMultiParam
+from mo.utils.unittest.graph import FakeNode
+from mo.ops.op import Op
+
+
+class FakePriorBoxClusteredProtoLayer:
+    def __init__(self, val):
+        self.prior_box_param = val
+
+
+class TestPriorBoxClusteredExt(unittest.TestCase):
+    @classmethod
+    def setUpClass(cls):
+        Op.registered_ops['PriorBoxClustered'] = PriorBoxClusteredOp
+
+    def test_priorboxclustered_no_pb_no_ml(self):
+        self.assertRaises(AttributeError, PriorBoxClusteredFrontExtractor.extract, None)
+
+    @patch('extensions.front.caffe.priorbox_clustered_ext.merge_attrs')
+    def test_priorboxclustered_ext_ideal_numbers(self, merge_attrs_mock):
+        params = {
+            'width': '30.0',
+            'height': '60.0',
+            'clip': False,
+            'flip': True,
+            'variance': np.array(['0.2', '0.3', '0.2', '0.3']),
+            'img_size': '300',
+            'img_h': '0',
+            'img_w': '0',
+            'step': '0,5',
+            'step_h': '0',
+            'step_w': '0',
+            'offset': '0.6'
+        }
+        merge_attrs_mock.return_value = {
+            **params
+        }
+
+        fake_pl = FakePriorBoxClusteredProtoLayer(FakeMultiParam(params))
+        fake_node = FakeNode(fake_pl, None)
+
+        PriorBoxClusteredFrontExtractor.extract(fake_node)
+
+        exp_res = {
+            'op': 'PriorBoxClustered',
+            'type': 'PriorBoxClustered',
+            'width': '30.0',
+            'height': '60.0',
+            'clip': 0,
+            'flip': 1,
+            'variance': np.array(['0.2', '0.3', '0.2', '0.3']),
+            'img_size': '300',
+            'img_h': '0',
+            'img_w': '0',
+            'step': '0,5',
+            'step_h': '0',
+            'step_w': '0',
+            'offset': '0.6'
+        }
+
+        for key in exp_res.keys():
+            if key in ['width', 'height', 'variance']:
+                np.testing.assert_equal(fake_node[key], exp_res[key])
+            else:
+                self.assertEqual(fake_node[key], exp_res[key])
diff --git a/model-optimizer/extensions/front/caffe/priorbox_ext_test.py b/model-optimizer/extensions/front/caffe/priorbox_ext_test.py
new file mode 100644 (file)
index 0000000..b93a883
--- /dev/null
@@ -0,0 +1,86 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import patch
+
+import numpy as np
+
+from extensions.front.caffe.priorbox_ext import PriorBoxFrontExtractor
+from extensions.ops.priorbox import PriorBoxOp
+from mo.utils.unittest.extractors import FakeMultiParam
+from mo.utils.unittest.graph import FakeNode
+from mo.ops.op import Op
+
+
+class FakePriorBoxProtoLayer:
+    def __init__(self, val):
+        self.prior_box_param = val
+
+
+class TestPriorBoxExt(unittest.TestCase):
+    @classmethod
+    def setUpClass(cls):
+        Op.registered_ops['PriorBox'] = PriorBoxOp
+
+    def test_priorbox_no_pb_no_ml(self):
+        self.assertRaises(AttributeError, PriorBoxFrontExtractor.extract, None)
+
+    @patch('extensions.front.caffe.priorbox_ext.merge_attrs')
+    def test_priorbox_ext_ideal_numbers(self, merge_attrs_mock):
+        params = {
+            'clip': False,
+            'flip': True,
+            'min_size': np.array([]),
+            'max_size': np.array([]),
+            'aspect_ratio': np.array([2, 3]),
+            'variance': np.array(['0.2', '0.3', '0.2', '0.3']),
+            'img_size': '300',
+            'img_h': '0',
+            'img_w': '0',
+            'step': '0,5',
+            'step_h': '0',
+            'step_w': '0',
+            'offset': '0.6'
+        }
+        merge_attrs_mock.return_value = {
+            **params
+        }
+
+        fake_pl = FakePriorBoxProtoLayer(FakeMultiParam(params))
+        fake_node = FakeNode(fake_pl, None)
+
+        PriorBoxFrontExtractor.extract(fake_node)
+
+        exp_res = {
+            'op': 'PriorBox',
+            'type': 'PriorBox',
+            'clip': 0,
+            'variance': np.array(['0.2', '0.3', '0.2', '0.3']),
+            'img_size': '300',
+            'img_h': '0',
+            'img_w': '0',
+            'step': '0,5',
+            'step_h': '0',
+            'step_w': '0',
+            'offset': '0.6'
+        }
+
+        for key in exp_res.keys():
+            if key in ['width', 'height', 'variance']:
+                np.testing.assert_equal(fake_node[key], exp_res[key])
+            else:
+                self.assertEqual(fake_node[key], exp_res[key])
diff --git a/model-optimizer/extensions/front/caffe/proposal_ext_test.py b/model-optimizer/extensions/front/caffe/proposal_ext_test.py
new file mode 100644 (file)
index 0000000..ff41fb0
--- /dev/null
@@ -0,0 +1,75 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import patch
+
+from extensions.front.caffe.proposal_ext import ProposalFrontExtractor
+from extensions.ops.proposal import ProposalOp
+from mo.utils.unittest.extractors import FakeMultiParam
+from mo.utils.unittest.graph import FakeNode
+from mo.ops.op import Op
+
+
+class FakeProposalProtoLayer:
+    def __init__(self, val):
+        self.proposal_param = val
+
+
+class TestProposalExt(unittest.TestCase):
+    @classmethod
+    def setUpClass(cls):
+        Op.registered_ops['Proposal'] = ProposalOp
+
+    def test_proposal_no_pb_no_ml(self):
+        self.assertRaises(AttributeError, ProposalFrontExtractor.extract, None)
+
+    @patch('extensions.front.caffe.proposal_ext.merge_attrs')
+    def test_proposal_ext_ideal_numbers(self, merge_attrs):
+        params = {
+            'feat_stride': 1,
+            'base_size': 16,
+            'min_size': 16,
+            'ratio': 1,
+            'scale': 2,
+            'pre_nms_topn': 6000,
+            'post_nms_topn': 300,
+            'nms_thresh': 0.7
+        }
+        merge_attrs.return_value = {
+            **params
+        }
+
+        fake_pl = FakeProposalProtoLayer(FakeMultiParam(params))
+        fake_node = FakeNode(fake_pl, None)
+
+        ProposalFrontExtractor.extract(fake_node)
+
+        exp_res = {
+            'type': "Proposal",
+            'feat_stride': 1,
+            'base_size': 16,
+            'min_size': 16,
+            'ratio': 1,
+            'scale': 2,
+            'pre_nms_topn': 6000,
+            'post_nms_topn': 300,
+            'nms_thresh': 0.7,
+            'infer': ProposalOp.proposal_infer
+        }
+
+        for key in exp_res.keys():
+            self.assertEqual(fake_node[key], exp_res[key])
diff --git a/model-optimizer/extensions/front/caffe/proposal_python_ext_test.py b/model-optimizer/extensions/front/caffe/proposal_python_ext_test.py
new file mode 100644 (file)
index 0000000..d47f2b7
--- /dev/null
@@ -0,0 +1,122 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import patch
+
+from extensions.front.caffe.proposal_python_ext import ProposalPythonFrontExtractor
+from extensions.ops.proposal import ProposalOp
+from mo.utils.unittest.extractors import FakeMultiParam
+from mo.utils.unittest.graph import FakeNode
+from mo.ops.op import Op
+
+
+class FakeProposalPythonProtoLayer:
+    def __init__(self, val):
+        self.python_param = val
+
+
+class TestProposalPythonExt(unittest.TestCase):
+    @classmethod
+    def setUpClass(cls):
+        Op.registered_ops['Proposal'] = ProposalOp
+
+    def test_proposal_no_pb_no_ml(self):
+        self.assertRaises(AttributeError, ProposalPythonFrontExtractor.extract, None)
+
+    @patch('mo.front.caffe.extractors.utils.merge_attrs')
+    def test_proposal_ext_ideal_numbers(self, merge_attrs):
+        params = {
+            'param_str': "'feat_stride': 16"
+        }
+        merge_attrs.return_value = params
+
+        fake_pl = FakeProposalPythonProtoLayer(FakeMultiParam(params))
+        fake_node = FakeNode(fake_pl, None)
+
+        ProposalPythonFrontExtractor.extract(fake_node)
+
+        exp_res = {
+            'type': "Proposal",
+            'feat_stride': 16,
+            'base_size': 16,
+            'min_size': 16,
+            'ratio': [0.5, 1, 2],
+            'scale': [8, 16, 32],
+            'pre_nms_topn': 6000,
+            'post_nms_topn': 300,
+            'nms_thresh': 0.7,
+            'infer': ProposalOp.proposal_infer
+        }
+
+        for key in exp_res.keys():
+            self.assertEqual(fake_node[key], exp_res[key])
+
+    @patch('mo.front.caffe.extractors.utils.merge_attrs')
+    def test_proposal_ext_scales(self, merge_attrs):
+        params = {
+            'param_str': "'feat_stride': 16, 'scales': [1,2,3], 'ratios':[5, 6,7]"
+        }
+        merge_attrs.return_value = params
+
+        fake_pl = FakeProposalPythonProtoLayer(FakeMultiParam(params))
+        fake_node = FakeNode(fake_pl, None)
+
+        ProposalPythonFrontExtractor.extract(fake_node)
+
+        exp_res = {
+            'type': "Proposal",
+            'feat_stride': 16,
+            'base_size': 16,
+            'min_size': 16,
+            'ratio': [5, 6, 7],
+            'scale': [1, 2, 3],
+            'pre_nms_topn': 6000,
+            'post_nms_topn': 300,
+            'nms_thresh': 0.7,
+            'infer': ProposalOp.proposal_infer
+        }
+
+        for key in exp_res.keys():
+            self.assertEqual(fake_node[key], exp_res[key])
+
+    @patch('mo.front.caffe.extractors.utils.merge_attrs')
+    def test_proposal_ext_scale(self, merge_attrs):
+        params = {
+            'param_str': "'feat_stride': 16, 'scale': [1,2,3], 'ratio':[5, 6,7]"
+        }
+        merge_attrs.return_value = params
+
+        fake_pl = FakeProposalPythonProtoLayer(FakeMultiParam(params))
+        fake_node = FakeNode(fake_pl, None)
+
+        ProposalPythonFrontExtractor.extract(fake_node)
+
+        exp_res = {
+            'type': "Proposal",
+            'feat_stride': 16,
+            'base_size': 16,
+            'min_size': 16,
+            'ratio': [5, 6, 7],
+            'scale': [1, 2, 3],
+            'pre_nms_topn': 6000,
+            'post_nms_topn': 300,
+            'nms_thresh': 0.7,
+            'infer': ProposalOp.proposal_infer
+        }
+
+        for key in exp_res.keys():
+            self.assertEqual(fake_node[key], exp_res[key])
diff --git a/model-optimizer/extensions/front/caffe/psroipooling_ext_test.py b/model-optimizer/extensions/front/caffe/psroipooling_ext_test.py
new file mode 100644 (file)
index 0000000..f175278
--- /dev/null
@@ -0,0 +1,65 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import patch
+
+from extensions.front.caffe.psroipooling_ext import PSROIPoolingFrontExtractor
+from extensions.ops.psroipooling import PSROIPoolingOp
+from mo.utils.unittest.extractors import FakeMultiParam
+from mo.utils.unittest.graph import FakeNode
+from mo.ops.op import Op
+
+
+class FakePSROIPoolingProtoLayer:
+    def __init__(self, val):
+        self.psroi_pooling_param = val
+
+
+class TestPSROIPoolingExt(unittest.TestCase):
+    @classmethod
+    def setUpClass(cls):
+        Op.registered_ops['PSROIPooling'] = PSROIPoolingOp
+
+    def test_psroipooling_no_pb_no_ml(self):
+        self.assertRaises(AttributeError, PSROIPoolingFrontExtractor.extract, None)
+
+    @patch('extensions.front.caffe.psroipooling_ext.merge_attrs')
+    def test_psroipooling_ext_ideal_numbers(self, merge_attrs_mock):
+        params = {
+            'spatial_scale': 4,
+            'output_dim': 20,
+            'group_size': 5,
+        }
+        merge_attrs_mock.return_value = {
+            **params
+        }
+
+        fake_pl = FakePSROIPoolingProtoLayer(FakeMultiParam(params))
+        fake_node = FakeNode(fake_pl, None)
+
+        PSROIPoolingFrontExtractor.extract(fake_node)
+
+        exp_res = {
+            'type': "PSROIPooling",
+            'spatial_scale': 4,
+            'output_dim': 20,
+            'group_size': 5,
+            'infer': PSROIPoolingOp.psroipooling_infer
+        }
+
+        for key in exp_res.keys():
+            self.assertEqual(fake_node[key], exp_res[key])
diff --git a/model-optimizer/extensions/front/caffe/regionyolo_ext_test.py b/model-optimizer/extensions/front/caffe/regionyolo_ext_test.py
new file mode 100644 (file)
index 0000000..8c37989
--- /dev/null
@@ -0,0 +1,79 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import patch
+
+from extensions.front.caffe.regionyolo_ext import RegionYoloFrontExtractor
+from extensions.ops.regionyolo import RegionYoloOp
+from mo.utils.unittest.extractors import FakeMultiParam
+from mo.utils.unittest.graph import FakeNode
+from mo.ops.op import Op
+
+
+class FakeRegionYoloProtoLayer:
+    def __init__(self, val, val_f):
+        self.region_yolo_param = val
+        self.flatten_param = val_f
+
+
+class TestReorgYoloExt(unittest.TestCase):
+    @classmethod
+    def setUpClass(cls):
+        Op.registered_ops['RegionYolo'] = RegionYoloOp
+
+    def test_reogyolo_no_pb_no_ml(self):
+        self.assertRaises(AttributeError, RegionYoloFrontExtractor.extract, None)
+
+    @patch('extensions.front.caffe.regionyolo_ext.merge_attrs')
+    def test_reogyolo_ext_ideal_numbers(self, merge_attrs_mock):
+        params = {
+            'coords': 4,
+            'classes': 20,
+            'num': 5,
+            'do_softmax': 1,
+            'anchors': 5,
+            'mask': 5,
+        }
+        params_flatten = {
+            'axis': 1,
+            'end_axis': -1
+        }
+        merge_attrs_mock.return_value = {
+            **params,
+            **params_flatten
+        }
+
+        fake_pl = FakeRegionYoloProtoLayer(FakeMultiParam(params), FakeMultiParam(params_flatten))
+        fake_node = FakeNode(fake_pl, None)
+
+        RegionYoloFrontExtractor.extract(fake_node)
+
+        exp_res = {
+            'type': "RegionYolo",
+            'coords': 4,
+            'classes': 20,
+            'num': 5,
+            'axis': 1,
+            'end_axis': -1,
+            'do_softmax': 1,
+            'anchors': 5,
+            'mask': 5,
+            'infer': RegionYoloOp.regionyolo_infer
+        }
+
+        for key in exp_res.keys():
+            self.assertEqual(fake_node[key], exp_res[key])
diff --git a/model-optimizer/extensions/front/caffe/reorgyolo_ext_test.py b/model-optimizer/extensions/front/caffe/reorgyolo_ext_test.py
new file mode 100644 (file)
index 0000000..502c5ad
--- /dev/null
@@ -0,0 +1,61 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import patch
+
+from extensions.front.caffe.reorgyolo_ext import ReorgYoloFrontExtractor
+from extensions.ops.reorgyolo import ReorgYoloOp
+from mo.utils.unittest.extractors import FakeMultiParam
+from mo.utils.unittest.graph import FakeNode
+from mo.ops.op import Op
+
+
+class FakeReorgYoloProtoLayer:
+    def __init__(self, val):
+        self.reorg_yolo_param = val
+
+
+class TestReorgYoloExt(unittest.TestCase):
+    @classmethod
+    def setUpClass(cls):
+        Op.registered_ops['ReorgYolo'] = ReorgYoloOp
+
+    def test_elu_no_pb_no_ml(self):
+        self.assertRaises(AttributeError, ReorgYoloFrontExtractor.extract, None)
+
+    @patch('extensions.front.caffe.reorgyolo_ext.merge_attrs')
+    def test_elu_ext_ideal_numbers(self, merge_attrs_mock):
+        params = {
+            'stride': 2
+        }
+        merge_attrs_mock.return_value = {
+            **params
+        }
+
+        fake_pl = FakeReorgYoloProtoLayer(FakeMultiParam(params))
+        fake_node = FakeNode(fake_pl, None)
+
+        ReorgYoloFrontExtractor.extract(fake_node)
+
+        exp_res = {
+            'type': "ReorgYolo",
+            'stride': 2,
+            'infer': ReorgYoloOp.reorgyolo_infer
+        }
+
+        for key in exp_res.keys():
+            self.assertEqual(fake_node[key], exp_res[key])
diff --git a/model-optimizer/extensions/front/caffe/resample_ext_test.py b/model-optimizer/extensions/front/caffe/resample_ext_test.py
new file mode 100644 (file)
index 0000000..c1fc3d6
--- /dev/null
@@ -0,0 +1,72 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import patch
+
+from extensions.front.caffe.resample_ext import ResampleFrontExtractor
+from extensions.ops.resample import ResampleOp
+from mo.utils.unittest.extractors import FakeMultiParam
+from mo.utils.unittest.graph import FakeNode
+from mo.ops.op import Op
+
+
+class FakeResampleProtoLayer:
+    def __init__(self, val):
+        self.resample_param = val
+
+
+class TestResampleExt(unittest.TestCase):
+    @classmethod
+    def setUpClass(cls):
+        Op.registered_ops['Resample'] = ResampleOp
+
+    def test_da_no_pb_no_ml(self):
+        self.assertRaises(AttributeError, ResampleFrontExtractor.extract, None)
+
+    @patch('extensions.front.caffe.resample_ext.merge_attrs')
+    def test_resample_ext_ideal_numbers(self, merge_attrs_mock):
+        params = {
+            'antialias': True,
+            'height': 384,
+            'width': 512,
+            'type': 2,
+            'factor': 1.0,
+        }
+        merge_attrs_mock.return_value = {
+            'antialias': True,
+            'height': 384,
+            'width': 512,
+            'type': 'caffe.ResampleParameter.LINEAR',
+            'factor': 1.0,
+        }
+        fake_pl = FakeResampleProtoLayer(FakeMultiParam(params))
+        fake_node = FakeNode(fake_pl, None)
+
+        ResampleFrontExtractor.extract(fake_node)
+
+        exp_res = {
+            'op': 'Resample',
+            'antialias': 1,
+            'height': 384,
+            'width': 512,
+            'resample_type': 'caffe.ResampleParameter.LINEAR',
+            'factor': 1.0,
+            'infer': ResampleOp.resample_infer
+        }
+
+        for key in exp_res.keys():
+            self.assertEqual(exp_res[key], fake_node[key])
diff --git a/model-optimizer/extensions/front/caffe/simplernms_ext_test.py b/model-optimizer/extensions/front/caffe/simplernms_ext_test.py
new file mode 100644 (file)
index 0000000..06b298b
--- /dev/null
@@ -0,0 +1,74 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import patch
+
+from extensions.front.caffe.simplernms_ext import SimplerNMSFrontExtractor
+from extensions.ops.simplernms import SimplerNMSOp
+from mo.utils.unittest.extractors import FakeMultiParam
+from mo.utils.unittest.graph import FakeNode
+from mo.ops.op import Op
+
+
+class FakeSimplerNMSProtoLayer:
+    def __init__(self, val):
+        self.simpler_nms_param = val
+
+
+class TestSimplerNMSExt(unittest.TestCase):
+    @classmethod
+    def setUpClass(cls):
+        Op.registered_ops['SimplerNMS'] = SimplerNMSOp
+
+    def test_simplernms_no_pb_no_ml(self):
+        self.assertRaises(AttributeError, SimplerNMSFrontExtractor.extract, None)
+
+    @patch('extensions.front.caffe.simplernms_ext.merge_attrs')
+    def test_simplernms_ext_ideal_numbers(self, merge_attrs_mock):
+        params = {
+            'cls_threshold': 0.5,
+            'max_num_proposals': 300,
+            'iou_threshold': 0.7,
+            'min_bbox_size': 16,
+            'feat_stride': 16,
+            'pre_nms_topn': 6000,
+            'post_nms_topn': 150,
+            'scale': [1, 2, 3]
+        }
+        merge_attrs_mock.return_value = {
+            **params
+        }
+
+        fake_pl = FakeSimplerNMSProtoLayer(FakeMultiParam(params))
+        fake_node = FakeNode(fake_pl, None)
+
+        SimplerNMSFrontExtractor.extract(fake_node)
+
+        exp_res = {
+            'cls_threshold': 0.5,
+            'max_num_proposals': 300,
+            'iou_threshold': 0.7,
+            'min_bbox_size': 16,
+            'feat_stride': 16,
+            'pre_nms_topn': 6000,
+            'post_nms_topn': 150,
+            'scale': [1, 2, 3],
+            'infer': SimplerNMSOp.simplernms_infer
+        }
+
+        for key in exp_res.keys():
+            self.assertEqual(fake_node[key], exp_res[key])
diff --git a/model-optimizer/extensions/front/caffe/spatial_transformer_ext_test.py b/model-optimizer/extensions/front/caffe/spatial_transformer_ext_test.py
new file mode 100644 (file)
index 0000000..9039cda
--- /dev/null
@@ -0,0 +1,81 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import patch
+
+from extensions.front.caffe.spatial_transformer_ext import SpatialTransformFrontExtractor
+from extensions.ops.spatial_transformer import SpatialTransformOp
+from mo.utils.unittest.extractors import FakeMultiParam
+from mo.utils.unittest.graph import FakeNode
+from mo.ops.op import Op
+
+
+class FakeSpatialTransformProtoLayer:
+    def __init__(self, val):
+        self.st_param = val
+
+
+class TestSpatialTransformExt(unittest.TestCase):
+    @classmethod
+    def setUpClass(cls):
+        Op.registered_ops['SpatialTransformer'] = SpatialTransformOp
+
+    def test_st_no_pb_no_ml(self):
+        self.assertRaises(AttributeError, SpatialTransformFrontExtractor.extract, None)
+
+    @patch('extensions.front.caffe.spatial_transformer_ext.merge_attrs')
+    def test_st_ext_ideal_numbers(self, merge_attrs_mock):
+        params = {
+            'transform_type': "ffff",
+            'sampler_type': "gggg",
+            'output_H': 56,
+            'output_W': 78,
+            'to_compute_dU': True,
+            'theta_1_1': 0.1,
+            'theta_1_2': 0.2,
+            'theta_1_3': 0.3,
+            'theta_2_1': 0.4,
+            'theta_2_2': 0.5,
+            'theta_2_3': 0.6
+        }
+        merge_attrs_mock.return_value = {
+            **params
+        }
+
+        fake_pl = FakeSpatialTransformProtoLayer(FakeMultiParam(params))
+        fake_node = FakeNode(fake_pl, None)
+
+        SpatialTransformFrontExtractor.extract(fake_node)
+
+        exp_res = {
+            'type': "SpatialTransformer",
+            'transform_type': "ffff",
+            'sampler_type': "gggg",
+            'output_H': 56,
+            'output_W': 78,
+            'to_compute_dU': 1,
+            'theta_1_1': 0.1,
+            'theta_1_2': 0.2,
+            'theta_1_3': 0.3,
+            'theta_2_1': 0.4,
+            'theta_2_2': 0.5,
+            'theta_2_3': 0.6,
+            'infer': SpatialTransformOp.sp_infer
+        }
+
+        for key in exp_res.keys():
+            self.assertEqual(fake_node[key], exp_res[key])
diff --git a/model-optimizer/extensions/front/eltwise_n_test.py b/model-optimizer/extensions/front/eltwise_n_test.py
new file mode 100644 (file)
index 0000000..33cedbd
--- /dev/null
@@ -0,0 +1,89 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+from extensions.front.eltwise_n import EltwiseNReplacement
+from mo.utils.unittest.graph import build_graph
+from mo.graph.graph import Node
+
+
+class TestAddNFrontReplacement(unittest.TestCase):
+    def test_replase_eltwise_n(self):
+        graph = build_graph(
+            {'node_1': {'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'Placeholder'},
+             'node_2': {'type': 'Identity', 'value': None, 'kind': 'op'},
+             'node_3': {'type': 'Identity', 'value': None, 'kind': 'op'},
+             'add_n': {'value': None, 'operation': 'sum', 'type': None, 'kind': 'op', 'op': 'EltwiseN'},
+             'node_4': {'type': 'Identity', 'value': None, 'kind': 'op'},
+             },
+            [('node_1', 'node_2'),
+             ('node_2', 'add_n'),
+             ('node_3', 'add_n'),
+             ('add_n', 'node_4'), ],
+        )
+
+        add_n_node = Node(graph, 'add_n')
+        rep_op = EltwiseNReplacement()
+        rep_op.replace_op(graph, add_n_node)
+        eltwise_nodes = [node for node, attrs in list(graph.nodes(data=True)) if attrs['type'] == 'Eltwise']
+        self.assertEqual(len(eltwise_nodes), 1)
+
+    def test_replase_eltwise_n_2(self):
+        graph = build_graph(
+            {'node_1': {'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'Placeholder'},
+             'node_2': {'type': 'Identity', 'value': None, 'kind': 'op'},
+             'node_3': {'type': 'Identity', 'value': None, 'kind': 'op'},
+             'node_4': {'type': 'Identity', 'value': None, 'kind': 'op'},
+             'add_n': {'value': None, 'operation': 'sum', 'type': None, 'kind': 'op', 'op': 'EltwiseN'},
+             'node_5': {'type': 'Identity', 'value': None, 'kind': 'op'},
+             },
+            [('node_1', 'node_2'),
+             ('node_2', 'add_n'),
+             ('node_3', 'add_n'),
+             ('node_4', 'add_n'),
+             ('add_n', 'node_5'), ],
+        )
+
+        add_n_node = Node(graph, 'add_n')
+        rep_op = EltwiseNReplacement()
+        rep_op.replace_op(graph, add_n_node)
+        eltwise_nodes = [node for node, attrs in list(graph.nodes(data=True)) if attrs['type'] == 'Eltwise']
+        self.assertEqual(len(eltwise_nodes), 2)
+
+    def test_replase_eltwise_n_3(self):
+        graph = build_graph(
+            {'node_1': {'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'Placeholder'},
+             'node_2': {'type': 'Identity', 'value': None, 'kind': 'op'},
+             'node_3': {'type': 'Identity', 'value': None, 'kind': 'op'},
+             'node_4': {'type': 'Identity', 'value': None, 'kind': 'op'},
+             'node_5': {'type': 'Identity', 'value': None, 'kind': 'op'},
+             'add_n': {'value': None, 'operation': 'sum', 'type': None, 'kind': 'op', 'op': 'EltwiseN'},
+             'node_6': {'type': 'Identity', 'value': None, 'kind': 'op'},
+             },
+            [('node_1', 'node_2'),
+             ('node_2', 'add_n'),
+             ('node_3', 'add_n'),
+             ('node_4', 'add_n'),
+             ('node_5', 'add_n'),
+             ('add_n', 'node_6'), ],
+        )
+
+        add_n_node = Node(graph, 'add_n')
+        rep_op = EltwiseNReplacement()
+        rep_op.replace_op(graph, add_n_node)
+        eltwise_nodes = [node for node, attrs in list(graph.nodes(data=True)) if attrs['type'] == 'Eltwise']
+        self.assertEqual(len(eltwise_nodes), 3)
diff --git a/model-optimizer/extensions/front/freeze_placeholder_value_test.py b/model-optimizer/extensions/front/freeze_placeholder_value_test.py
new file mode 100644 (file)
index 0000000..5c23291
--- /dev/null
@@ -0,0 +1,103 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.front.freeze_placeholder_value import FreezePlaceholderValue
+from mo.utils.unittest.graph import build_graph
+
+nodes_bool = {
+    '0': {'name': 'input1', 'kind': 'op', 'op': 'Placeholder', 'data_type': bool, 'shape': np.array([])},
+    '1': {'name': 'input2', 'kind': 'op', 'op': 'Placeholder', 'data_type': bool, 'shape': np.array([])},
+    '2': {'name': 'node_1', 'kind': 'op', 'op': 'NotPlaceholder'},
+    '3': {'name': 'node_2', 'kind': 'op', 'op': 'NotPlaceholder'},
+    '4': {'name': 'node_3', 'kind': 'op', 'op': 'NotPlaceholder'},
+    '5': {'name': 'node_4', 'kind': 'op', 'op': 'NotPlaceholder'},
+    '6': {'name': 'output1', 'kind': 'op', 'op': 'OpOutput', 'is_output': True},
+    '7': {'name': 'output2', 'kind': 'op', 'op': 'OpOutput', 'is_output': True}
+
+}
+edges = {
+    ('0', '2'),
+    ('2', '3'),
+    ('4', '6'),
+    ('1', '5'),
+    ('5', '7')
+}
+
+
+class TestFreezePlaceholderValue(unittest.TestCase):
+    def test_freeze_true(self):
+        graph = build_graph(nodes_bool, edges)
+        graph.graph['fw'] = 'tf'
+        tested_class = FreezePlaceholderValue()
+        tested_class.replacement_dict = {'input1': 'True'}
+        before_pattern = graph.nodes()
+        tested_class.find_and_replace_pattern(graph=graph)
+        after_pattern = graph.nodes()
+        # number of nodes in the grpaph didn't change
+        self.assertEqual(len(before_pattern), len(after_pattern))
+        # reach new placeholder
+        try:
+            new_ph_dict = graph.node[[u for u, v in graph.in_edges('2')][0]]
+        except Exception as e:
+            self.fail("Can't get frozen placeholder. Broken edge. Additional information: {}".format(e))
+        # check value
+        self.assertEqual('value' in new_ph_dict, True)
+        self.assertEqual(new_ph_dict['value'], True)
+
+    def test_freeze_false(self):
+        graph = build_graph(nodes_bool, edges)
+        graph.graph['fw'] = 'tf'
+        tested_class = FreezePlaceholderValue()
+        tested_class.replacement_dict = {'input1': 'False'}
+        before_pattern = graph.nodes()
+        tested_class.find_and_replace_pattern(graph=graph)
+        after_pattern = graph.nodes()
+        # number of nodes in the grpaph didn't change
+        self.assertEqual(len(before_pattern), len(after_pattern))
+        # reach new placeholder
+        try:
+            new_ph_dict = graph.node[[u for u, v in graph.in_edges('2')][0]]
+        except Exception as e:
+            self.fail("Can't get frozen placeholder. Broken edge. Additional information: {}".format(e))
+        # check value
+        self.assertEqual('value' in new_ph_dict, True)
+        self.assertEqual(new_ph_dict['value'], False)
+
+    def test_freeze_both(self):
+        graph = build_graph(nodes_bool, edges)
+        graph.graph['fw'] = 'tf'
+        tested_class = FreezePlaceholderValue()
+        tested_class.replacement_dict = {'input1': 'False', 'input2': 'True'}
+        before_pattern = graph.nodes()
+        tested_class.find_and_replace_pattern(graph=graph)
+        after_pattern = graph.nodes()
+        # number of nodes in the graph didn't change
+        self.assertEqual(len(before_pattern), len(after_pattern))
+        # reach new placeholder
+        try:
+            new_ph_dict_1 = graph.node[[u for u, v in graph.in_edges('2')][0]]
+            new_ph_dict_2 = graph.node[[u for u, v in graph.in_edges('5')][0]]
+        except Exception as e:
+            self.fail("Can't get frozen placeholder. Broken edge. Additional information: {}".format(e))
+        # check value
+        self.assertEqual('value' in new_ph_dict_1, True)
+        self.assertEqual('value' in new_ph_dict_2, True)
+        self.assertEqual(new_ph_dict_1['value'], False)
+        self.assertEqual(new_ph_dict_2['value'], True)
diff --git a/model-optimizer/extensions/front/image_scaler_test.py b/model-optimizer/extensions/front/image_scaler_test.py
new file mode 100644 (file)
index 0000000..2c4ec90
--- /dev/null
@@ -0,0 +1,103 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.front.image_scaler import ImageScaler
+from mo.utils.unittest.graph import build_graph, compare_graphs
+
+nodes_attributes = {
+    'placeholder_1': {'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+    'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    # ImageScaler operation
+    'im_scaler': {'type': None, 'kind': 'op', 'op': 'ImageScaler'},
+    'im_scaler_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # Test operation
+    'last': {'type': None, 'value': None, 'kind': 'op', 'op': None},
+    'last_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # Mul and Add operations
+    'mul_1': {'type': None, 'value': None, 'kind': 'op', 'op': 'Mul'},
+    'mul_1_w': {'value': None, 'shape': None, 'kind': 'op', 'op': 'Const'},
+    'mul_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+    'add_1': {'type': None, 'value': None, 'kind': 'op', 'op': 'Add'},
+    'add_1_w': {'value': None, 'shape': None, 'kind': 'op', 'op': 'Const'},
+    'add_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+}
+
+
+class ImageScalerTest(unittest.TestCase):
+    def test_image_scaler_test1(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'im_scaler'),
+                             ('im_scaler', 'im_scaler_data'),
+                             ('im_scaler_data', 'last'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'im_scaler': {'scale': np.array(1.0), 'bias': np.reshape(np.array([1, 2, 3]), [3, 1, 1])},
+                             }, nodes_with_edges_only=True)
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'add_1'),
+                                 ('add_1_w', 'add_1'),
+                                 ('add_1', 'add_1_data'),
+                                 ('add_1_data', 'last')
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'add_1_w': {'shape': np.array([3, 1, 1]),
+                                             'value': np.reshape(np.array([1, 2, 3]), [3, 1, 1])},
+                                 }, nodes_with_edges_only=True)
+
+        graph.graph['layout'] = 'NCHW'
+
+        replacer = ImageScaler()
+        replacer.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'last')
+        self.assertTrue(flag, resp)
+
+    def test_image_scaler_test2(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'im_scaler'),
+                             ('im_scaler', 'im_scaler_data'),
+                             ('im_scaler_data', 'last'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'im_scaler': {'scale': np.array(2.0), 'bias': np.reshape(np.array([0, 0, 0]), [3, 1, 1])},
+                             }, nodes_with_edges_only=True)
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'mul_1'),
+                                 ('mul_1_w', 'mul_1'),
+                                 ('mul_1', 'mul_1_data'),
+                                 ('mul_1_data', 'last')
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_w': {'shape': np.array(2.0).shape, 'value': np.array(2.0)},
+                                 }, nodes_with_edges_only=True)
+
+        graph.graph['layout'] = 'NCHW'
+
+        replacer = ImageScaler()
+        replacer.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'last')
+        self.assertTrue(flag, resp)
diff --git a/model-optimizer/extensions/front/instance_normalization_test.py b/model-optimizer/extensions/front/instance_normalization_test.py
new file mode 100644 (file)
index 0000000..90dbe1b
--- /dev/null
@@ -0,0 +1,61 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import networkx as nx
+
+from extensions.front.instance_normalization import InstanceNormalization
+from mo.utils.unittest.graph import build_graph
+from mo.middle.pattern_match import node_match
+
+
+class TestInstanceNormalization(unittest.TestCase):
+    def test_default(self):
+        nodes = {
+            'input': {'kind': 'op', 'op': 'AnyOp'},
+            'scale': {'kind': 'op', 'op': 'AnyOp'},
+            'B': {'kind': 'op', 'op': 'AnyOp'},
+            'node': {'kind': 'op', 'op': 'InstanceNormalization', 'epsilon': 0.123},
+        }
+        edges = [
+            ('input', 'node'),
+            ('scale', 'node'),
+            ('B', 'node'),
+        ]
+
+        graph = build_graph(nodes, edges)
+        tested_class = InstanceNormalization()
+        tested_class.find_and_replace_pattern(graph)
+
+        ref_nodes = {
+            'input': {'op': 'AnyOp'},
+            'scale': {'op': 'AnyOp'},
+            'B': {'op': 'AnyOp'},
+            'mvn': {'kind': 'op', 'op': 'MVN', 'name': 'node/InstanceNormalization/MVN', 'eps': 0.123},
+            'mul': {'kind': 'op', 'op': 'Mul', 'name': 'node/InstanceNormalization/Mul'},
+            'add': {'kind': 'op', 'op': 'Add', 'name': 'node/InstanceNormalization/Add'},
+        }
+        ref_edges = [
+            ('input', 'mvn'),
+            ('mvn', 'mul'),
+            ('scale', 'mul'),
+            ('mul', 'add'),
+            ('B', 'add'),
+        ]
+
+        ref_graph = build_graph(ref_nodes, ref_edges)
+        self.assertTrue(nx.is_isomorphic(graph, ref_graph, node_match))
diff --git a/model-optimizer/extensions/front/kaldi/replace_splice_node_pattern_test.py b/model-optimizer/extensions/front/kaldi/replace_splice_node_pattern_test.py
new file mode 100644 (file)
index 0000000..f967f4b
--- /dev/null
@@ -0,0 +1,57 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+import unittest
+
+from extensions.front.kaldi.replace_splice_node_pattern import ReplaceSpliceNodePattern
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph
+
+
+class ReplaceSpliceNodePatternTests(unittest.TestCase):
+    @classmethod
+    def setUpClass(cls):
+        cls.nodes_attributes = {
+            'in_node': {'kind': 'op', 'op': 'Input', 'shape': [1, 13]},
+            'slice': {'kind': 'op', 'op': 'Splice', 'context': range(-5, 5)}
+        }
+        cls.graph = build_graph(cls.nodes_attributes,
+                                [('in_node', 'slice')])
+
+        ReplaceSpliceNodePattern().find_and_replace_pattern(cls.graph)
+
+    def test_memory(self):
+        memory_nodes = [node for node in self.graph.nodes(data=True) if node[1]['op'] == 'Memory']
+        self.assertEqual(len(memory_nodes), 2)
+        for memory_node in memory_nodes:
+            node = Node(self.graph, memory_node[0])
+            if len(node.in_nodes()):
+                self.assertEqual(node.index, 0)
+            elif len(node.out_nodes()):
+                self.assertEqual(node.index, 1)
+        self.assertEqual(memory_nodes[0][1]['id'], memory_nodes[1][1]['id'])
+
+    def test_crop(self):
+        crop_node = [node for node in self.graph.nodes(data=True) if node[1]['op'] == 'Crop']
+        self.assertEqual(len(crop_node), 1)
+        crop_node = Node(self.graph, crop_node[0][0])
+        self.assertEqual(crop_node.offset, [13])
+        self.assertEqual(crop_node.dim, [13 * 9])
+
+    def test_concat(self):
+        concat_node = [node for node in self.graph.nodes(data=True) if node[1]['op'] == 'Concat']
+        self.assertEqual(len(concat_node), 1)
+        crop_node = Node(self.graph, concat_node[0][0])
+        self.assertEqual(crop_node.axis, 1)
diff --git a/model-optimizer/extensions/front/mxnet/check_softmax_node_inputs_test.py b/model-optimizer/extensions/front/mxnet/check_softmax_node_inputs_test.py
new file mode 100644 (file)
index 0000000..ea7da2a
--- /dev/null
@@ -0,0 +1,60 @@
+"""
+ Copyright (c) 2017-2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+from extensions.front.mxnet.check_softmax_node_inputs import CheckSoftmaxNodeInputs
+from mo.utils.unittest.graph import build_graph
+from mo.graph.graph import Node
+
+
+class TestCheckSoftmaxNodeInputs(unittest.TestCase):
+    def test_remove_softmax_output_input(self):
+        graph = build_graph(
+            {'node_1': {'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'Placeholder'},
+             'node_2': {'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'Placeholder'},
+             'softmax': {'type': 'SoftmaxOutput', 'value': None, 'kind': 'op', 'op': 'SoftmaxOutput'},
+             },
+            [('node_1', 'softmax'),
+             ('node_2', 'softmax')
+             ])
+
+        pattern = CheckSoftmaxNodeInputs()
+        pattern.find_and_replace_pattern(graph)
+
+        node_softmax = Node(graph, 'softmax')
+
+        self.assertEqual(len(node_softmax.in_nodes()), 1)
+
+        node_input1 = node_softmax.in_node(0)
+        self.assertEqual(node_input1.name, 'node_1')
+
+    def test_remove_softmax_activation_input(self):
+        graph = build_graph(
+            {'node_1': {'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'Placeholder'},
+             'softmax': {'type': 'SoftmaxActivation', 'value': None, 'kind': 'op', 'op': 'SoftmaxActivation'},
+             },
+            [('node_1', 'softmax')])
+
+        pattern = CheckSoftmaxNodeInputs()
+        pattern.find_and_replace_pattern(graph)
+
+        node_softmax = Node(graph, 'softmax')
+
+        self.assertEqual(len(node_softmax.in_nodes()), 1)
+
+        node_input1 = node_softmax.in_node(0)
+        self.assertEqual(node_input1.name, 'node_1')
diff --git a/model-optimizer/extensions/front/mxnet/conv_ext_test.py b/model-optimizer/extensions/front/mxnet/conv_ext_test.py
new file mode 100644 (file)
index 0000000..ee68688
--- /dev/null
@@ -0,0 +1,152 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.front.mxnet.conv_ext import DeconvFrontExtractor
+from mo.utils.unittest.extractors import PB
+
+
+class TestDeconvShapesParsing(unittest.TestCase):
+    def test_conv_ext_ideal_numbers(self):
+        params = {'attrs': {
+            "kernel": "(4, 4)",
+            "no_bias": "True",
+            "num_filter": "21",
+            "num_group": "14",
+            "pad": "(4, 4)",
+            "stride": "(2, 2)",
+            "dilate": "(3, 3)",
+            "workspace": "1536"
+        }}
+        node = PB({'symbol_dict': params})
+        DeconvFrontExtractor.extract(node)
+        exp_res = {
+            'op': 'Deconvolution',
+            'pad': np.array([[0, 0], [0, 0], [4, 4], [4, 4]]),
+            'pad_spatial_shape': np.array([[4, 4], [4, 4]]),
+            'stride': np.array([1, 1, 2, 2]),
+            'kernel_spatial': np.array([4, 4]),
+            'dilation': np.array([1, 1, 3, 3]),
+            'group': 14,
+            'output': 21,
+            'bias_addable': True,
+            'bias_term': False,
+        }
+        for key in exp_res.keys():
+            if key in ('pad', 'pad_spatial_shape', 'stride', 'kernel_spatial', 'dilation'):
+                np.testing.assert_equal(node[key], exp_res[key])
+            else:
+                self.assertEqual(node[key], exp_res[key])
+
+
+    def test_conv_ext_no_bias(self):
+        params = { 'attrs':{
+            "kernel": "(4, 4)",
+            "num_filter": "21",
+            "num_group": "14",
+            "pad": "(4, 4)",
+            "stride": "(2, 2)",
+            "dilate": "(3, 3)",
+            "workspace": "1536"
+        }}
+        node = PB({'symbol_dict': params})
+        DeconvFrontExtractor.extract(node)
+        exp_res = {
+            'op': 'Deconvolution',
+            'pad': np.array([[0, 0], [0, 0], [4, 4], [4, 4]]),
+            'pad_spatial_shape': np.array([[4, 4], [4, 4]]),
+            'stride': np.array([1, 1, 2, 2]),
+            'kernel_spatial': np.array([4, 4]),
+            'dilation': np.array([1, 1, 3, 3]),
+            'group': 14,
+            'output': 21,
+            'bias_addable': True,
+            'bias_term': False,
+        }
+        for key in exp_res.keys():
+            if key in ('pad', 'pad_spatial_shape', 'stride', 'kernel_spatial', 'dilation'):
+                np.testing.assert_equal(node[key], exp_res[key])
+            else:
+                self.assertEqual(node[key], exp_res[key])
+
+
+    def test_conv_ext_with_bias(self):
+        params = { 'attrs':{
+            "kernel": "(4, 4)",
+            "no_bias": "False",
+            "num_filter": "21",
+            "num_group": "14",
+            "pad": "(4, 4)",
+            "stride": "(2, 2)",
+            "dilate": "(3, 3)",
+            "workspace": "1536"
+        }}
+        node = PB({'symbol_dict': params})
+        DeconvFrontExtractor.extract(node)
+        exp_res = {
+            'op': 'Deconvolution',
+            'pad': np.array([[0, 0], [0, 0], [4, 4], [4, 4]]),
+            'pad_spatial_shape': np.array([[4, 4], [4, 4]]),
+            'stride': np.array([1, 1, 2, 2]),
+            'kernel_spatial': np.array([4, 4]),
+            'dilation': np.array([1, 1, 3, 3]),
+            'group': 14,
+            'output': 21,
+            'bias_addable': True,
+            'bias_term': True,
+        }
+        for key in exp_res.keys():
+            if key in ('pad', 'pad_spatial_shape', 'stride', 'kernel_spatial', 'dilation'):
+                np.testing.assert_equal(node[key], exp_res[key])
+            else:
+                self.assertEqual(node[key], exp_res[key])
+
+
+    def test_deconv_ext_target_shape(self):
+        params = {'attrs': {
+            "kernel": "(4, 4)",
+            "no_bias": "True",
+            "num_filter": "21",
+            "num_group": "14",
+            "pad": "(4, 4)",
+            "stride": "(2, 2)",
+            "dilate": "(3, 3)",
+            "workspace": "1536",
+            "target_shape": "(120, 120)"
+        }}
+        node = PB({'symbol_dict': params})
+        DeconvFrontExtractor.extract(node)
+        exp_res = {
+            'op': 'Deconvolution',
+            'pad': np.array([[0, 0], [0, 0], [4, 4], [4, 4]]),
+            'pad_spatial_shape': np.array([[4, 4], [4, 4]]),
+            'stride': np.array([1, 1, 2, 2]),
+            'kernel_spatial': np.array([4, 4]),
+            'dilation': np.array([1, 1, 3, 3]),
+            'group': 14,
+            'output': 21,
+            'bias_addable': True,
+            'bias_term': False,
+            'output_spatial_shape': np.array([120, 120]),
+        }
+        for key in exp_res.keys():
+            if key in ('pad', 'pad_spatial_shape', 'stride', 'kernel_spatial', 'dilation', 'output_spatial_shape'):
+                np.testing.assert_equal(node[key], exp_res[key])
+            else:
+                self.assertEqual(node[key], exp_res[key])
diff --git a/model-optimizer/extensions/front/mxnet/custom_test.py b/model-optimizer/extensions/front/mxnet/custom_test.py
new file mode 100644 (file)
index 0000000..3d698cf
--- /dev/null
@@ -0,0 +1,57 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+from extensions.front.mxnet.custom import CustomFrontExtractorOp
+from mo.utils.unittest.graph import build_graph
+from mo.front.extractor import FrontExtractorOp, MXNetCustomFrontExtractorOp
+from mo.graph.graph import Node
+
+attrs = {'test_attr': 1}
+
+
+class FakeExtractor(MXNetCustomFrontExtractorOp):
+    @staticmethod
+    def extract(node: Node):
+        return True, attrs
+
+
+class TestCustomFrontExtractorOp(unittest.TestCase):
+    @classmethod
+    def setUpClass(cls):
+        FrontExtractorOp.registered_ops['Custom'] = CustomFrontExtractorOp
+
+    def test_extract_custom_layer(self):
+        graph = build_graph(
+            {'node_1': {'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'Placeholder'},
+             'node_2': {'type': 'Identity', 'value': None, 'kind': 'op'},
+             'node_custom': {'type': 'Custom', 'value': None, 'kind': 'op', 'op': 'Custom', },
+             'node_3': {'type': 'Identity', 'value': None, 'kind': 'op'},
+             },
+            [('node_1', 'node_2'),
+             ('node_2', 'node_custom'),
+             ('node_custom', 'node_3'),
+             ],
+            {
+                'node_custom': {'symbol_dict': {'attrs': {'op_type': 'test_type'}}},
+            })
+
+        custom_node = Node(graph, 'node_custom')
+        custom_op = FakeExtractor()
+        supported, op_attrs = custom_op.extract(custom_node)
+        self.assertTrue(supported)
+        self.assertEquals(op_attrs, attrs)
diff --git a/model-optimizer/extensions/front/mxnet/pooling_ext_test.py b/model-optimizer/extensions/front/mxnet/pooling_ext_test.py
new file mode 100644 (file)
index 0000000..43450a8
--- /dev/null
@@ -0,0 +1,50 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.front.mxnet.pooling_ext import PoolingFrontExtractor
+from mo.utils.unittest.extractors import PB
+
+
+class TestPoolingShapesParsing(unittest.TestCase):
+    def test_conv_ext_ideal_numbers(self):
+        params = {'attrs': {
+            "kernel": "(3, 4)",
+            "stride": "(3, 2)",
+            "pad": "(7, 8)",
+            "pool_type": "max"
+        }}
+
+        node = PB({'symbol_dict': params})
+        PoolingFrontExtractor.extract(node)
+        exp_res = {
+            'op': 'Pooling',
+            'pad': np.array([[0, 0], [0, 0], [7, 7], [8, 8]]),
+            'pad_spatial_shape': np.array([[7, 7], [8, 8]]),
+            'stride': np.array([1, 1, 3, 2]),
+            'window': np.array([1, 1, 3, 4]),
+            'pool_method': 'max',
+            'exclude_pad': 'false',
+        }
+
+        for key in exp_res.keys():
+            if key in ('pad', 'stride', 'window', 'pad_spatial_shape'):
+                np.testing.assert_equal(node[key], exp_res[key])
+            else:
+                self.assertEqual(node[key], exp_res[key])
diff --git a/model-optimizer/extensions/front/mxnet/slice_channel_ext_test.py b/model-optimizer/extensions/front/mxnet/slice_channel_ext_test.py
new file mode 100644 (file)
index 0000000..080e871
--- /dev/null
@@ -0,0 +1,55 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+from extensions.front.mxnet.slice_channel_ext import SliceChannelFrontExtractor
+from mo.utils.unittest.extractors import PB
+
+
+class TestSliceChannelParsing(unittest.TestCase):
+    def test_parse_values(self):
+        params = {'attrs': {
+            "num_outputs": "2",
+            'axis': "2",
+        }}
+
+        node = PB({'symbol_dict': params})
+        SliceChannelFrontExtractor.extract(node)
+        exp_res = {
+            'op': 'Split',
+            'axis': 2,
+            'num_split': 2,
+        }
+
+        for key in exp_res.keys():
+            self.assertEqual(node[key], exp_res[key])
+
+    def test_parse_dafault_values(self):
+        params = {'attrs': {
+            "num_outputs": "2",
+        }}
+
+        node = PB({'symbol_dict': params})
+        SliceChannelFrontExtractor.extract(node)
+        exp_res = {
+            'op': 'Split',
+            'axis': 1,
+            'num_split': 2,
+        }
+
+        for key in exp_res.keys():
+            self.assertEqual(node[key], exp_res[key])
diff --git a/model-optimizer/extensions/front/mxnet/ssd_pattern_flatten_softmax_activation_test.py b/model-optimizer/extensions/front/mxnet/ssd_pattern_flatten_softmax_activation_test.py
new file mode 100644 (file)
index 0000000..7c9bc9e
--- /dev/null
@@ -0,0 +1,45 @@
+"""
+ Copyright (c) 2017-2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+from extensions.front.mxnet.ssd_pattern_flatten_softmax_activation import SsdPatternFlattenSoftmaxActivation
+from mo.utils.unittest.graph import build_graph
+from mo.graph.graph import Node
+
+
+class TestSsdPatternFlattenSoftmaxActivation(unittest.TestCase):
+    def test_pattern_remove_transpose(self):
+        graph = build_graph({'node_1': {'type': 'Identity', 'kind': 'op', 'op': 'Placeholder'},
+                             'node_2': {'type': 'Identity', 'kind': 'op'},
+                             'node_3': {'type': 'Identity', 'kind': 'op'},
+                             'node_softmax_activation': {'type': 'SoftMax', 'kind': 'op', 'op': 'SoftMax'},
+                             'node_multi_box_detection': {'type': '_contrib_MultiBoxDetection', 'kind': 'op',
+                                                          'op': '_contrib_MultiBoxDetection'},
+                             'node_4': {'type': 'Identity', 'kind': 'op'},
+                             },
+                            [('node_1', 'node_softmax_activation'),
+                             ('node_2', 'node_multi_box_detection'),
+                             ('node_softmax_activation', 'node_multi_box_detection'),
+                             ('node_3', 'node_multi_box_detection'),
+                             ('node_multi_box_detection', 'node_4'), ],
+                            )
+
+        pattern = SsdPatternFlattenSoftmaxActivation()
+        pattern.find_and_replace_pattern(graph)
+        flatten_name = list(graph.nodes())[-1]
+        self.assertTrue(graph.has_node(flatten_name))
+        self.assertFalse(graph.has_edge(Node(graph, 'softmax_activation').id, Node(graph, 'multi_box_detection').id))
diff --git a/model-optimizer/extensions/front/mxnet/ssd_pattern_remove_flatten_test.py b/model-optimizer/extensions/front/mxnet/ssd_pattern_remove_flatten_test.py
new file mode 100644 (file)
index 0000000..dfd5708
--- /dev/null
@@ -0,0 +1,42 @@
+"""
+ Copyright (c) 2017-2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+from extensions.front.mxnet.ssd_pattern_remove_flatten import SsdPatternRemoveFlatten
+from mo.utils.unittest.graph import build_graph
+from mo.graph.graph import Node
+
+
+class TestSsdPatternRemoveFlatten(unittest.TestCase):
+    def test_pattern_remove_transpose(self):
+        graph = build_graph({'node_1': {'type': 'Identity', 'kind': 'op', 'op': 'Placeholder'},
+                             'node_2': {'type': 'Identity', 'kind': 'op'},
+                             'node_multi_box_prior': {'type': '_contrib_MultiBoxPrior', 'kind': 'op',
+                                                      'op': '_contrib_MultiBoxPrior'},
+                             'node_flatten': {'type': 'Flatten', 'kind': 'op', 'op': 'Flatten'},
+                             'node_3': {'type': 'Identity', 'kind': 'op'},
+                             },
+                            [('node_1', 'node_2'),
+                             ('node_2', 'node_multi_box_prior'),
+                             ('node_multi_box_prior', 'node_flatten'),
+                             ('node_flatten', 'node_3'), ],
+                            )
+
+        pattern = SsdPatternRemoveFlatten()
+        pattern.find_and_replace_pattern(graph)
+        self.assertFalse(graph.has_node('node_flatten'))
+        self.assertTrue(graph.has_edge(Node(graph, 'node_multi_box_prior').id, Node(graph, 'node_3').id))
diff --git a/model-optimizer/extensions/front/mxnet/ssd_pattern_remove_reshape_test.py b/model-optimizer/extensions/front/mxnet/ssd_pattern_remove_reshape_test.py
new file mode 100644 (file)
index 0000000..40a7649
--- /dev/null
@@ -0,0 +1,56 @@
+"""
+ Copyright (c) 2017-2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+from extensions.front.mxnet.ssd_pattern_remove_reshape import SsdPatternRemoveReshape
+from mo.utils.unittest.graph import build_graph
+from mo.graph.graph import Node
+
+
+class TestSsdPatternRemoveReshape(unittest.TestCase):
+    def test_pattern_remove_reshape(self):
+        graph = build_graph({'node_1': {'type': 'Identity', 'kind': 'op', 'op': 'Placeholder'},
+                             'node_2': {'type': 'Identity', 'kind': 'op'},
+                             'node_multi_box_prior1': {'type': '_contrib_MultiBoxPrior', 'kind': 'op',
+                                                       'op': '_contrib_MultiBoxPrior'},
+                             'node_multi_box_prior2': {'type': '_contrib_MultiBoxPrior', 'kind': 'op',
+                                                       'op': '_contrib_MultiBoxPrior'},
+                             'node_multi_box_prior3': {'type': '_contrib_MultiBoxPrior', 'kind': 'op',
+                                                       'op': '_contrib_MultiBoxPrior'},
+                             'node_concat': {'type': 'Concat', 'kind': 'op', 'op': 'Concat'},
+                             'node_reshape': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},
+                             'node_3': {'type': 'Identity', 'kind': 'op'},
+                             },
+                            [('node_1', 'node_2'),
+                             ('node_2', 'node_multi_box_prior1'),
+                             ('node_2', 'node_multi_box_prior2'),
+                             ('node_2', 'node_multi_box_prior3'),
+                             ('node_multi_box_prior1', 'node_concat'),
+                             ('node_multi_box_prior2', 'node_concat'),
+                             ('node_multi_box_prior3', 'node_concat'),
+                             ('node_concat', 'node_reshape'),
+                             ('node_reshape', 'node_3'), ],
+                            {
+                                'node_concat': {'symbol_dict': {'attrs': {'dim': 3}}},
+                            })
+
+        pattern = SsdPatternRemoveReshape()
+        pattern.find_and_replace_pattern(graph)
+        node_concat = Node(graph, 'node_concat')
+        self.assertEqual(node_concat['symbol_dict']['attrs']['dim'], 2)
+        self.assertFalse(graph.has_node('node_reshape'))
+        self.assertTrue(graph.has_edge(Node(graph, 'node_concat').id, Node(graph, 'node_3').id))
diff --git a/model-optimizer/extensions/front/mxnet/ssd_pattern_remove_transpose_test.py b/model-optimizer/extensions/front/mxnet/ssd_pattern_remove_transpose_test.py
new file mode 100644 (file)
index 0000000..576e2f9
--- /dev/null
@@ -0,0 +1,47 @@
+"""
+ Copyright (c) 2017-2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+from extensions.front.mxnet.ssd_pattern_remove_transpose import SsdPatternRemoveTranspose
+from mo.utils.unittest.graph import build_graph
+from mo.graph.graph import Node
+
+
+class TestSsdPatternRemoveTranspose(unittest.TestCase):
+    def test_pattern_remove_transpose(self):
+        graph = build_graph({'node_1': {'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'Placeholder'},
+                             'node_3': {'type': 'Identity', 'value': None, 'kind': 'op'},
+                             'node_4': {'type': 'Identity', 'value': None, 'kind': 'op'},
+                             'node_transpose': {'type': 'transpose', 'value': None, 'kind': 'op', 'op': 'transpose'},
+                             'node_softmax_activation': {'type': 'SoftMax', 'value': None, 'kind': 'op',
+                                                         'op': 'SoftMax'},
+                             'node_multi_box_detection': {'type': '_contrib_MultiBoxDetection', 'value': None,
+                                                          'kind': 'op', 'op': '_contrib_MultiBoxDetection'},
+                             'node_5': {'type': 'Identity', 'value': None, 'kind': 'op'},
+                             },
+                            [('node_1', 'node_transpose'),
+                             ('node_transpose', 'node_softmax_activation'),
+                             ('node_3', 'node_multi_box_detection'),
+                             ('node_softmax_activation', 'node_multi_box_detection'),
+                             ('node_4', 'node_multi_box_detection'),
+                             ('node_multi_box_detection', 'node_5'), ],
+                            )
+
+        pattern = SsdPatternRemoveTranspose()
+        pattern.find_and_replace_pattern(graph)
+        self.assertFalse(graph.has_node('node_transpose'))
+        self.assertTrue(graph.has_edge(Node(graph, 'node_1').id, Node(graph, 'node_softmax_activation').id))
diff --git a/model-optimizer/extensions/front/mxnet/ssd_reorder_detection_out_inputs_test.py b/model-optimizer/extensions/front/mxnet/ssd_reorder_detection_out_inputs_test.py
new file mode 100644 (file)
index 0000000..6ddde4c
--- /dev/null
@@ -0,0 +1,54 @@
+"""
+ Copyright (c) 2017-2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.front.mxnet.ssd_reorder_detection_out_inputs import SsdReorderDetectionOutInputs
+from mo.utils.unittest.graph import build_graph
+from mo.graph.graph import Node
+
+
+class TestSsdReorderDetectionOutInputs(unittest.TestCase):
+    def test_reorder_detection_out_inputs(self):
+        graph = build_graph(
+            {'node_1': {'type': 'Identity', 'kind': 'op', 'op': 'Placeholder'},
+             'node_2': {'type': 'Identity', 'kind': 'op', 'op': 'Placeholder'},
+             'node_3': {'type': 'Identity', 'kind': 'op', 'op': 'Placeholder'},
+             'multi_box_detection': {'type': '_contrib_MultiBoxDetection', 'kind': 'op',
+                                     'op': '_contrib_MultiBoxDetection'},
+             },
+            [('node_1', 'multi_box_detection'),
+             ('node_2', 'multi_box_detection'),
+             ('node_3', 'multi_box_detection')],
+            {
+                'node_1': {'shape': np.array([1, 34928])},
+                'node_2': {'shape': np.array([1, 183372])},
+                'node_3': {'shape': np.array([1, 2, 34928])},
+            })
+
+        pattern = SsdReorderDetectionOutInputs()
+        pattern.find_and_replace_pattern(graph)
+
+        node_multi_box = Node(graph, 'multi_box_detection')
+
+        node_input1 = node_multi_box.in_node(0)
+        node_input2 = node_multi_box.in_node(1)
+        node_input3 = node_multi_box.in_node(2)
+        self.assertEqual(node_input1.name, 'node_2')
+        self.assertEqual(node_input2.name, 'node_1')
+        self.assertEqual(node_input3.name, 'node_3')
diff --git a/model-optimizer/extensions/front/onnx/affine_ext_test.py b/model-optimizer/extensions/front/onnx/affine_ext_test.py
new file mode 100644 (file)
index 0000000..799e643
--- /dev/null
@@ -0,0 +1,79 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+import onnx
+
+from extensions.front.onnx.affine_ext import AffineFrontExtractor
+from mo.utils.unittest.graph import build_graph
+from mo.graph.graph import Node
+
+
+class AffineONNXExtractorTest(unittest.TestCase):
+    @staticmethod
+    def _create_node(attrs: dict):
+        pb = onnx.helper.make_node("Affine", ["X"], ["Y"], **attrs)
+        graph = build_graph({'node_0': {'pb': pb}}, [])
+        return Node(graph, 'node_0')
+
+    @staticmethod
+    def _base_attrs():
+        # Commonly used attributes in the tests
+        # Each test takes these ones and then adds/modifies/deletes particular fields
+        return (
+            # test input ONNX attributes
+            dict(
+                alpha=1.0,
+                beta=0.0
+            ),
+            # reference output Node attributes
+            dict(
+                op='ImageScaler',
+                scale=1.0,
+                bias=0.0
+            )
+        )
+
+    @staticmethod
+    def _extract(inp):
+        node = __class__._create_node(inp)
+        AffineFrontExtractor.extract(node)
+        return node.graph.node[node.id]
+
+    def _match(self, out, ref):
+        for key in ref.keys():
+            status = out[key] == ref[key]
+            if type(status) in [list, np.ndarray]:
+                status = np.all(status)
+            self.assertTrue(status, 'Mismatch for field {}, observed: {}, expected: {}'.format(key, out[key], ref[key]))
+
+    def test_default(self):
+        inp, ref = self._base_attrs()
+        out = self._extract(inp)
+        self._match(out, ref)
+
+    def test_random(self):
+        inp, ref = self._base_attrs()
+        inp['alpha'] = 123.
+        inp['beta'] = 321.
+
+        ref['scale'] = 123.
+        ref['bias'] = 321.
+
+        out = self._extract(inp)
+        self._match(out, ref)
diff --git a/model-optimizer/extensions/front/onnx/conv_ext_test.py b/model-optimizer/extensions/front/onnx/conv_ext_test.py
new file mode 100644 (file)
index 0000000..937542a
--- /dev/null
@@ -0,0 +1,137 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+import onnx
+
+from extensions.front.onnx.conv_ext import ConvTransposeFrontExtractor
+from mo.utils.unittest.graph import build_graph
+from mo.graph.graph import Node
+from mo.utils.error import Error
+
+
+class ConvTransposeONNXExtractorTest(unittest.TestCase):
+    @staticmethod
+    def _create_node(attrs: dict):
+        pb = onnx.helper.make_node("ConvTranspose", ["X", "W"], ["Y"], **attrs)
+        graph = build_graph({'node_0': {'pb': pb}}, [])
+        return Node(graph, 'node_0')
+
+    @staticmethod
+    def _base_attrs():
+        # Commonly used attributes in the tests
+        # Each test takes these ones and then adds/modifies/deletes particular fields
+        return (
+            # test input ONNX attributes
+            dict(
+                pads=[1, 2, 3, 4],
+                kernel_shape=[5, 6]
+            ),
+            # reference output Node attributes
+            dict(
+                type='Deconvolution',
+                pad=[[0, 0], [0, 0], [1, 3], [2, 4]],
+                pad_spatial_shape=[[1, 3], [2, 4]],
+                kernel_spatial=[5, 6],
+                bias_term=None,
+                output_shape=None,
+                output_padding=[0, 0, 0, 0],
+                dilation=[1, 1, 1, 1],
+                stride=[1, 1, 1, 1],
+                output_spatial_shape=None,
+                group=1
+            )
+        )
+
+    @staticmethod
+    def _extract(inp):
+        node = __class__._create_node(inp)
+        ConvTransposeFrontExtractor.extract(node)
+        return node.graph.node[node.id]
+
+    def _match(self, out, ref):
+        for key in ref.keys():
+            status = out[key] == ref[key]
+            if type(status) in [list, np.ndarray]:
+                status = np.all(status)
+            self.assertTrue(status, 'Mismatch for field {}, observed: {}, expected: {}'.format(key, out[key], ref[key]))
+
+    def test_all_valid_default(self):
+        inp, ref = self._base_attrs()
+        del inp['pads']
+        ref['pad'] = [[0, 0], [0, 0], [0, 0], [0, 0]]
+        ref['pad_spatial_shape'] = [[0, 0], [0, 0]]
+        out = self._extract(inp)
+        self._match(out, ref)
+
+    def test_most_used(self):
+        inp, ref = self._base_attrs()
+        out = self._extract(inp)
+        self._match(out, ref)
+
+    def test_dilation(self):
+        inp, ref = self._base_attrs()
+        inp['dilations'] = [10, 11]
+        ref['dilation'] = [1, 1, 10, 11]
+        out = self._extract(inp)
+        self._match(out, ref)
+
+    def test_stride(self):
+        inp, ref = self._base_attrs()
+        inp['strides'] = [12, 13]
+        ref['stride'] = [1, 1, 12, 13]
+        out = self._extract(inp)
+        self._match(out, ref)
+
+    def test_group(self):
+        inp, ref = self._base_attrs()
+        inp['group'] = 14
+        ref['group'] = 14
+        out = self._extract(inp)
+        self._match(out, ref)
+
+    def test_auto_pad_supported(self):
+        inp, ref = self._base_attrs()
+        del inp['pads']
+        inp['auto_pad'] = 'SAME_UPPER'
+
+        ref['auto_pad'] = 'same_upper'
+        ref['pad'] = [[0, 0], [0, 0], [0, 0], [0, 0]]
+        ref['pad_spatial_shape'] = [[0, 0], [0, 0]]
+
+        out = self._extract(inp)
+        self._match(out, ref)
+
+    def test_pads_not_even_invalid(self):
+        inp, ref = self._base_attrs()
+        inp['pads'] = [1, 2, 3]
+        with self.assertRaisesRegex(Error, '.*pads.*not correct.*'):
+            out = self._extract(inp)
+
+    def test_missing_kernel_shape_not_supported(self):
+        inp, ref = self._base_attrs()
+        del inp['kernel_shape']
+        with self.assertRaisesRegex(Error, '.*kernel_shape.*not supported.*'):
+            out = self._extract(inp)
+
+    def test_output_padding(self):
+        inp, ref = self._base_attrs()
+        inp['output_padding'] = [19, 20]
+        ref['output_padding'] = [0, 0, 19, 20]
+        out = self._extract(inp)
+        self._match(out, ref)
diff --git a/model-optimizer/extensions/front/onnx/crop_ext_test.py b/model-optimizer/extensions/front/onnx/crop_ext_test.py
new file mode 100644 (file)
index 0000000..1696b69
--- /dev/null
@@ -0,0 +1,80 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+import onnx
+
+from extensions.front.onnx.crop_ext import CropFrontExtractor
+from mo.utils.unittest.graph import build_graph
+from mo.graph.graph import Node
+
+
+class CropONNXExtractorTest(unittest.TestCase):
+    @staticmethod
+    def _create_node(attrs: dict):
+        pb = onnx.helper.make_node("Crop", ["X"], ["Y"], **attrs)
+        graph = build_graph({'node_0': {'pb': pb}}, [])
+        return Node(graph, 'node_0')
+
+    @staticmethod
+    def _base_attrs():
+        # Commonly used attributes in the tests
+        # Each test takes these ones and then adds/modifies/deletes particular fields
+        return (
+            # test input ONNX attributes
+            dict(
+                border=[5, 10, 15, 20],
+            ),
+            # reference output Node attributes
+            dict(
+                op='Crop',
+                crop_begin=np.array([10, 5]),
+                crop_end=np.array([20, 15]),
+                axis=np.array([2, 3])
+            )
+        )
+
+    @staticmethod
+    def _extract(inp):
+        node = __class__._create_node(inp)
+        CropFrontExtractor.extract(node)
+        return node.graph.node[node.id]
+
+    def _match(self, out, ref):
+        for key in ref.keys():
+            status = out[key] == ref[key]
+            if type(status) in [list, np.ndarray]:
+                status = np.all(status)
+            self.assertTrue(status, 'Mismatch for field {}, observed: {}, expected: {}'.format(key, out[key], ref[key]))
+
+    def test_default(self):
+        inp, ref = self._base_attrs()
+        out = self._extract(inp)
+        self._match(out, ref)
+
+    def test_with_scale(self):
+        inp, ref = self._base_attrs()
+        inp['scale'] = np.array([34, 50])
+
+        del ref['crop_begin']
+        del ref['crop_end']
+        ref['dim'] = np.array([34, 50])
+        ref['offset'] = np.array([10, 5])
+
+        out = self._extract(inp)
+        self._match(out, ref)
diff --git a/model-optimizer/extensions/front/onnx/elu_ext_test.py b/model-optimizer/extensions/front/onnx/elu_ext_test.py
new file mode 100644 (file)
index 0000000..e509e4e
--- /dev/null
@@ -0,0 +1,58 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import onnx
+from generator import generator, generate
+
+from extensions.front.onnx.elu_ext import EluFrontExtractor
+from mo.ops.activation import Activation
+from mo.ops.op import Op
+from mo.utils.unittest.extractors import PB
+
+
+@generator
+class TestEluONNXExt(unittest.TestCase):
+    @staticmethod
+    def _create_elu_node(alpha=1.0):
+        pb = onnx.helper.make_node(
+            'Elu',
+            inputs=['x'],
+            outputs=['y'],
+            alpha=alpha
+        )
+        node = PB({'pb': pb})
+        return node
+
+    @classmethod
+    def setUpClass(cls):
+        Op.registered_ops['Elu'] = Activation
+
+    @generate(*[1.0, 2.0, 3.0])
+    def test_elu_ext(self, alpha):
+        node = self._create_elu_node(alpha)
+        EluFrontExtractor.extract(node)
+
+        exp_res = {
+            'type': 'Activation',
+            'operation': 'elu',
+            'alpha': alpha,
+            'infer': Activation.infer
+        }
+
+        for key in exp_res.keys():
+            self.assertEqual(node[key], exp_res[key])
diff --git a/model-optimizer/extensions/front/onnx/flatten_ext_test.py b/model-optimizer/extensions/front/onnx/flatten_ext_test.py
new file mode 100644 (file)
index 0000000..5498343
--- /dev/null
@@ -0,0 +1,57 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import onnx
+from generator import generator, generate
+
+from extensions.front.onnx.flatten_ext import FlattenFrontExtractor
+from mo.ops.flatten_onnx import FlattenONNX
+from mo.ops.op import Op
+from mo.utils.unittest.extractors import PB
+
+
+@generator
+class TestFlattenONNXExt(unittest.TestCase):
+    @staticmethod
+    def _create_flatten_node(axis):
+        pb = onnx.helper.make_node(
+            'Flatten',
+            inputs=['a'],
+            outputs=['b'],
+            axis=axis,
+        )
+        node = PB({'pb': pb})
+        return node
+
+    @classmethod
+    def setUpClass(cls):
+        Op.registered_ops['Flatten'] = FlattenONNX
+
+    @generate(*[x for x in range(4)])
+    def test_flatten_ext(self, axis):
+        node = self._create_flatten_node(axis)
+        FlattenFrontExtractor.extract(node)
+
+        exp_res = {
+            'type': 'Reshape',
+            'axis': axis,
+            'infer': FlattenONNX.infer
+        }
+
+        for key in exp_res.keys():
+            self.assertEqual(node[key], exp_res[key])
diff --git a/model-optimizer/extensions/front/onnx/gather_ext_test.py b/model-optimizer/extensions/front/onnx/gather_ext_test.py
new file mode 100644 (file)
index 0000000..d91c793
--- /dev/null
@@ -0,0 +1,57 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import onnx
+from generator import generator, generate
+
+from extensions.front.onnx.gather_ext import GatherFrontExtractor
+from extensions.ops.gather import Gather
+from mo.ops.op import Op
+from mo.utils.unittest.extractors import PB
+
+
+@generator
+class TestGatherONNXExt(unittest.TestCase):
+    @staticmethod
+    def _create_gather_node(axis=0):
+        pb = onnx.helper.make_node(
+            'Gather',
+            inputs=['data', 'indices'],
+            outputs=['y'],
+            axis=axis,
+        )
+        node = PB({'pb': pb})
+        return node
+
+    @classmethod
+    def setUpClass(cls):
+        Op.registered_ops['Gather'] = Gather
+
+    @generate(*[0, 1, 2, 3])
+    def test_gather_ext(self, axis):
+        node = self._create_gather_node(axis)
+        GatherFrontExtractor.extract(node)
+
+        exp_res = {
+            'type': 'Gather',
+            'axis': axis,
+            'infer': Gather.infer
+        }
+
+        for key in exp_res.keys():
+            self.assertEqual(node[key], exp_res[key])
diff --git a/model-optimizer/extensions/front/onnx/image_scaler_ext_test.py b/model-optimizer/extensions/front/onnx/image_scaler_ext_test.py
new file mode 100644 (file)
index 0000000..8f5fb04
--- /dev/null
@@ -0,0 +1,52 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+import onnx
+
+from extensions.front.onnx.image_scaler_ext import ImageScalerFrontExtractor
+from mo.utils.unittest.extractors import PB
+
+
+class TestImageScalerONNXExt(unittest.TestCase):
+    @staticmethod
+    def _create_image_scaler_node():
+        pb = onnx.helper.make_node(
+            'ImageScaler',
+            inputs=['a'],
+            outputs=['b'],
+            scale=1.0,
+            bias=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0],
+        )
+        node = PB({'pb': pb, 'graph': PB({'graph': {'layout': 'NCHW'}})})
+        return node
+
+    def test_image_scaler_ext(self):
+        node = self._create_image_scaler_node()
+        ImageScalerFrontExtractor.extract(node)
+
+        exp_res = {
+            'scale': 1.0,
+            'bias': [[[1.0]], [[2.0]], [[3.0]], [[4.0]], [[5.0]], [[6.0]], [[7.0]], [[8.0]]],
+        }
+
+        for key in exp_res.keys():
+            if type(node[key]) in [list, np.ndarray]:
+                self.assertTrue(np.array_equal(np.array(node[key]), np.array(exp_res[key])))
+            else:
+                self.assertEqual(node[key], exp_res[key])
diff --git a/model-optimizer/extensions/front/onnx/instance_normalization_ext_test.py b/model-optimizer/extensions/front/onnx/instance_normalization_ext_test.py
new file mode 100644 (file)
index 0000000..c38a30f
--- /dev/null
@@ -0,0 +1,44 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import onnx
+
+from extensions.front.onnx.instance_normalization_ext import InstanceNormalizationExtractor
+from mo.utils.unittest.extractors import PB, BaseExtractorsTestingClass
+
+
+class TestInstanceNormalization(BaseExtractorsTestingClass):
+    @staticmethod
+    def _create_node():
+        pb = onnx.helper.make_node(
+            'InstanceNormalization',
+            inputs=['a'],
+            outputs=['b'],
+            epsilon=0.5,
+        )
+        node = PB({'pb': pb})
+        return node
+
+    def test_image_scaler_ext(self):
+        node = self._create_node()
+        InstanceNormalizationExtractor.extract(node)
+        self.res = node
+
+        self.expected = {
+            'epsilon': 0.5,
+        }
+
+        self.compare()
diff --git a/model-optimizer/extensions/front/onnx/pad_ext_test.py b/model-optimizer/extensions/front/onnx/pad_ext_test.py
new file mode 100644 (file)
index 0000000..1f4f25d
--- /dev/null
@@ -0,0 +1,80 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import onnx
+
+from extensions.front.onnx.pad_ext import PadFrontExtractor
+from mo.utils.unittest.extractors import PB, BaseExtractorsTestingClass
+
+
+class TestPad(BaseExtractorsTestingClass):
+    @staticmethod
+    def _create_node(pads=None, value=None, mode=None):
+        if pads is None:
+            pads = [1, 2, 3, 4]
+        if value is None:
+            value = 0.0
+        if mode is None:
+            mode = 'constant'
+        pb = onnx.helper.make_node(
+            'Pad',
+            pads=pads,
+            mode=mode,
+            value=value,
+            inputs=['a'],
+            outputs=['b']
+        )
+        node = PB({'pb': pb})
+        return node
+
+    def test_ok(self):
+        node = self._create_node()
+        PadFrontExtractor.extract(node)
+        self.res = node
+
+        self.expected = {
+            'pads': [[1, 3], [2, 4]],
+            'mode': 'constant',
+            'fill_value': 0
+        }
+
+        self.compare()
+
+    def test_reflect(self):
+        node = self._create_node(mode='reflect')
+        PadFrontExtractor.extract(node)
+        self.res = node
+
+        self.expected = {
+            'pads': [[1, 3], [2, 4]],
+            'mode': 'reflect',
+            'fill_value': 0
+        }
+
+        self.compare()
+
+    def test_non_zero_fill_value(self):
+        node = self._create_node(value=1.0)
+        PadFrontExtractor.extract(node)
+        self.res = node
+
+        self.expected = {
+            'pads': [[1, 3], [2, 4]],
+            'mode': 'constant',
+            'fill_value': 1.0
+        }
+
+        self.compare()
diff --git a/model-optimizer/extensions/front/onnx/sigmoid_ext_test.py b/model-optimizer/extensions/front/onnx/sigmoid_ext_test.py
new file mode 100644 (file)
index 0000000..3d25ea1
--- /dev/null
@@ -0,0 +1,62 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+import onnx
+
+from extensions.front.onnx.sigmoid_ext import SigmoidFrontExtractor
+from mo.utils.unittest.graph import build_graph
+from mo.graph.graph import Node
+
+
+class SigmoidONNXExtractorTest(unittest.TestCase):
+    @staticmethod
+    def _create_node():
+        pb = onnx.helper.make_node("Sigmoid", ["X"], ["Y"])
+        graph = build_graph({'node_0': {'pb': pb}}, [])
+        return Node(graph, 'node_0')
+
+    @staticmethod
+    def _base_attrs():
+        # Commonly used attributes in the tests
+        # Each test takes these ones and then adds/modifies/deletes particular fields
+        return (
+            # reference output Node attributes
+            dict(
+                op='Activation',
+                operation='sigmoid'
+            )
+        )
+
+    @staticmethod
+    def _extract():
+        node = __class__._create_node()
+        SigmoidFrontExtractor.extract(node)
+        return node.graph.node[node.id]
+
+    def _match(self, out, ref):
+        for key in ref.keys():
+            status = out[key] == ref[key]
+            if type(status) in [list, np.ndarray]:
+                status = np.all(status)
+            self.assertTrue(status, 'Mismatch for field {}, observed: {}, expected: {}'.format(key, out[key], ref[key]))
+
+    def test_default(self):
+        ref = self._base_attrs()
+        out = self._extract()
+        self._match(out, ref)
diff --git a/model-optimizer/extensions/front/onnx/slice_ext_test.py b/model-optimizer/extensions/front/onnx/slice_ext_test.py
new file mode 100644 (file)
index 0000000..74ab96a
--- /dev/null
@@ -0,0 +1,75 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+import onnx
+from generator import generator, generate
+
+from extensions.front.onnx.slice_ext import SliceFrontExtractor
+from mo.ops.op import Op
+from mo.ops.slice import Slice
+from mo.utils.unittest.extractors import PB
+
+
+@generator
+class TestSliceONNXExt(unittest.TestCase):
+    @staticmethod
+    def _create_slice_node(axes, starts, ends):
+        if axes is None:
+            pb = onnx.helper.make_node(
+                'Slice',
+                inputs=['x'],
+                outputs=['y'],
+                starts=starts,
+                ends=ends,
+            )
+        else:
+            pb = onnx.helper.make_node(
+                'Slice',
+                inputs=['x'],
+                outputs=['y'],
+                axes=axes,
+                starts=starts,
+                ends=ends,
+            )
+
+        node = PB({'pb': pb})
+        return node
+
+    @classmethod
+    def setUpClass(cls):
+        Op.registered_ops['Slice'] = Slice
+
+    @generate(*[([0, 1], [0, 0], [28, 28]), (None, [0, 0], [28, 28])])
+    def test_slice_ext(self, axes, starts, ends):
+        node = self._create_slice_node(axes, starts, ends)
+        SliceFrontExtractor.extract(node)
+
+        exp_res = {
+            'op': 'Slice',
+            'axis': axes,
+            'start': starts,
+            'end': ends,
+            'infer': Slice.infer
+        }
+
+        for key in exp_res.keys():
+            if type(node[key]) in [list, np.ndarray]:
+                self.assertTrue(np.array_equal(np.array(node[key]), np.array(exp_res[key])))
+            else:
+                self.assertEqual(node[key], exp_res[key])
diff --git a/model-optimizer/extensions/front/onnx/squeeze_ext_test.py b/model-optimizer/extensions/front/onnx/squeeze_ext_test.py
new file mode 100644 (file)
index 0000000..5c69728
--- /dev/null
@@ -0,0 +1,68 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+import onnx
+from generator import generator, generate
+
+from extensions.front.onnx.squeeze_ext import SqueezeFrontExtractor
+from mo.ops.op import Op
+from mo.ops.squeeze import Squeeze
+from mo.utils.unittest.extractors import PB
+
+
+@generator
+class TestSqueezeONNXExt(unittest.TestCase):
+    @staticmethod
+    def _create_squeeze_node(axes):
+        if axes is None:
+            pb = onnx.helper.make_node(
+                'Squeeze',
+                inputs=['x'],
+                outputs=['y'],
+            )
+        else:
+            pb = onnx.helper.make_node(
+                'Squeeze',
+                inputs=['x'],
+                outputs=['y'],
+                axes=axes,
+            )
+
+        node = PB({'pb': pb})
+        return node
+
+    @classmethod
+    def setUpClass(cls):
+        Op.registered_ops['Squeeze'] = Squeeze
+
+    @generate(*[[0, 1, 2, 3], [1], None])
+    def test_squeeze_ext(self, axes):
+        node = self._create_squeeze_node(axes)
+        SqueezeFrontExtractor.extract(node)
+
+        exp_res = {
+            'type': 'Reshape',
+            'squeeze_dims': axes,
+        }
+
+        for key in exp_res.keys():
+            if type(node[key]) in [list, np.ndarray]:
+                self.assertTrue(np.array_equal(np.array(node[key]), np.array(exp_res[key])))
+            else:
+                self.assertEqual(node[key], exp_res[key])
diff --git a/model-optimizer/extensions/front/onnx/tanh_ext_test.py b/model-optimizer/extensions/front/onnx/tanh_ext_test.py
new file mode 100644 (file)
index 0000000..25b8586
--- /dev/null
@@ -0,0 +1,62 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+import onnx
+
+from extensions.front.onnx.tanh_ext import TanhFrontExtractor
+from mo.utils.unittest.graph import build_graph
+from mo.graph.graph import Node
+
+
+class TanhONNXExtractorTest(unittest.TestCase):
+    @staticmethod
+    def _create_node():
+        pb = onnx.helper.make_node("Tanh", ["X"], ["Y"])
+        graph = build_graph({'node_0': {'pb': pb}}, [])
+        return Node(graph, 'node_0')
+
+    @staticmethod
+    def _base_attrs():
+        # Commonly used attributes in the tests
+        # Each test takes these ones and then adds/modifies/deletes particular fields
+        return (
+            # reference output Node attributes
+            dict(
+                op='Activation',
+                operation='tanh'
+            )
+        )
+
+    @staticmethod
+    def _extract():
+        node = __class__._create_node()
+        TanhFrontExtractor.extract(node)
+        return node.graph.node[node.id]
+
+    def _match(self, out, ref):
+        for key in ref.keys():
+            status = out[key] == ref[key]
+            if type(status) in [list, np.ndarray]:
+                status = np.all(status)
+            self.assertTrue(status, 'Mismatch for field {}, observed: {}, expected: {}'.format(key, out[key], ref[key]))
+
+    def test_default(self):
+        ref = self._base_attrs()
+        out = self._extract()
+        self._match(out, ref)
diff --git a/model-optimizer/extensions/front/onnx/transpose_ext_test.py b/model-optimizer/extensions/front/onnx/transpose_ext_test.py
new file mode 100644 (file)
index 0000000..2880c2d
--- /dev/null
@@ -0,0 +1,75 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import itertools
+import unittest
+
+import numpy as np
+import onnx
+from generator import generator, generate
+
+from extensions.front.onnx.transpose_ext import TransposeFrontExtractor
+from mo.ops.op import Op
+from mo.ops.permute import Permute
+from mo.utils.unittest.extractors import PB
+
+
+@generator
+class TestTransposeONNXExt(unittest.TestCase):
+    @staticmethod
+    def _create_transpose_node(order: list):
+        if order is None:
+            # Default transpose
+            pb = onnx.helper.make_node(
+                'Transpose',
+                inputs=['data'],
+                outputs=['transposed'],
+            )
+        else:
+            # Transpose with order
+            pb = onnx.helper.make_node(
+                'Transpose',
+                inputs=['data'],
+                outputs=['transposed'],
+                perm=order
+            )
+        node = PB({'pb': pb})
+        return node
+
+    @classmethod
+    def setUpClass(cls):
+        Op.registered_ops['Permute'] = Permute
+        pass
+
+    # This generator generates all permutations for [0,1,2,3] and [0,1,2] orders
+    @generate(*[list(order) for order in list(itertools.permutations(np.arange(4)))] +
+               [list(order) for order in list(itertools.permutations(np.arange(3)))] + [None])
+    def test_transpose_ext(self, order):
+        node = self._create_transpose_node(order)
+        TransposeFrontExtractor.extract(node)
+
+        exp_res = {
+            'type': 'Permute',
+            'order': order,
+            'infer': Permute.infer
+        }
+
+        for key in exp_res.keys():
+            if isinstance(exp_res[key], list):
+                self.assertTrue(np.array_equal(node[key], exp_res[key]),
+                                "Orders are not the same: {} and {}".format(node[key], exp_res[key]))
+            else:
+                self.assertEqual(node[key], exp_res[key])
diff --git a/model-optimizer/extensions/front/onnx/unsqueeze_ext_test.py b/model-optimizer/extensions/front/onnx/unsqueeze_ext_test.py
new file mode 100644 (file)
index 0000000..7cdcdae
--- /dev/null
@@ -0,0 +1,60 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+import onnx
+from generator import generator, generate
+
+from extensions.front.onnx.unsqueeze_ext import UnsqueezeFrontExtractor
+from mo.ops.op import Op
+from mo.ops.unsqueeze import Unsqueeze
+from mo.utils.unittest.extractors import PB
+
+
+@generator
+class TestUnsqueezeONNXExt(unittest.TestCase):
+    @staticmethod
+    def _create_unsqueeze_node(axes):
+        pb = onnx.helper.make_node(
+            'Unsqueeze',
+            inputs=['x'],
+            outputs=['y'],
+            axes=axes,
+        )
+
+        node = PB({'pb': pb})
+        return node
+
+    @classmethod
+    def setUpClass(cls):
+        Op.registered_ops['Unsqueeze'] = Unsqueeze
+
+    @generate(*[[0, 1, 2, 3], [1]])
+    def test_unsqueeze_ext(self, axes):
+        node = self._create_unsqueeze_node(axes)
+        UnsqueezeFrontExtractor.extract(node)
+
+        exp_res = {
+            'unsqueeze_dims': axes,
+        }
+
+        for key in exp_res.keys():
+            if type(node[key]) in [list, np.ndarray]:
+                self.assertTrue(np.array_equal(np.array(node[key]), np.array(exp_res[key])))
+            else:
+                self.assertEqual(node[key], exp_res[key])
diff --git a/model-optimizer/extensions/front/onnx/upsample_ext_test.py b/model-optimizer/extensions/front/onnx/upsample_ext_test.py
new file mode 100644 (file)
index 0000000..e363417
--- /dev/null
@@ -0,0 +1,104 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import onnx
+
+from extensions.front.onnx.upsample_ext import UpsampleFrontExtractor
+from mo.utils.unittest.graph import build_graph
+from mo.graph.graph import Node
+from mo.utils.error import Error
+from mo.utils.unittest.extractors import BaseExtractorsTestingClass
+
+
+class UpsampleONNXExtractorTest(BaseExtractorsTestingClass):
+    @staticmethod
+    def _create_node(attrs: dict):
+        pb = onnx.helper.make_node("Upsample", ["X"], ["Y"], **attrs)
+        graph = build_graph({'node_0': {'pb': pb}}, [])
+        return Node(graph, 'node_0')
+
+    @staticmethod
+    def _base_attrs():
+        # Commonly used attributes in the tests
+        # Each test takes these ones and then adds/modifies/deletes particular fields
+        return (
+            # test input ONNX attributes
+            dict(
+                mode='nearest',
+                width_scale=2.0,
+                height_scale=2.0,
+            ),
+            # reference output Node attributes
+            dict(
+                type='Resample',
+                resample_type='caffe.ResampleParameter.NEAREST',
+                factor=2,
+                antialias=0,
+            )
+        )
+
+    @staticmethod
+    def _extract(inp):
+        node = __class__._create_node(inp)
+        UpsampleFrontExtractor.extract(node)
+        return node
+
+    def _match(self, out, ref):
+        self.res = out
+        self.expected = ref
+        self.compare()
+
+    def test_all_valid_default(self):
+        inp, ref = self._base_attrs()
+        out = self._extract(inp)
+        self._match(out, ref)
+
+    def test_invalid_mode(self):
+        inp, ref = self._base_attrs()
+        inp['mode'] = 'invalid_mode'
+        with self.assertRaisesRegex(Error, '.*decoding Upsample.*supported modes.*'):
+            out = self._extract(inp)
+
+    def test_unsupported_linear(self):
+        inp, ref = self._base_attrs()
+        inp['mode'] = 'linear'
+        with self.assertRaisesRegex(Error, '.*Only nearest is supported.*'):
+            out = self._extract(inp)
+
+    def test_unsupported_scale(self):
+        inp, ref = self._base_attrs()
+        inp['scales'] = [2.0, 2.0]
+        with self.assertRaisesRegex(Error, '.*Only scale_width and scale_height are supported.*'):
+            out = self._extract(inp)
+
+    def test_missing_width_scale(self):
+        inp, ref = self._base_attrs()
+        del inp['width_scale']
+        with self.assertRaisesRegex(Error, '.*One/both of widths_scale.*and height_scale.*is not defined.*'):
+            out = self._extract(inp)
+
+    def test_missing_height_scale(self):
+        inp, ref = self._base_attrs()
+        del inp['height_scale']
+        with self.assertRaisesRegex(Error, '.*One/both of widths_scale.*and height_scale.*is not defined.*'):
+            out = self._extract(inp)
+
+    def test_different_scales(self):
+        inp, ref = self._base_attrs()
+        inp['height_scale'] = 2.0
+        inp['width_scale'] = 3.0
+        with self.assertRaisesRegex(Error, '.*different widths_scale.*and height_scale.*not supported.*'):
+            out = self._extract(inp)
diff --git a/model-optimizer/extensions/front/reciprocal_test.py b/model-optimizer/extensions/front/reciprocal_test.py
new file mode 100644 (file)
index 0000000..527cb7e
--- /dev/null
@@ -0,0 +1,55 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+from extensions.front.reciprocal import ReciprocalReplacer
+from mo.utils.unittest.graph import build_graph, compare_graphs
+
+
+class ReciprocalReplacerTests(unittest.TestCase):
+    @staticmethod
+    def _create_graphs():
+        return (
+            build_graph(
+                {'placeholder': {'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+                 'reciprocal': {'kind': 'op', 'op': 'Reciprocal'}},
+                [('placeholder', 'reciprocal')]),
+
+            build_graph(
+                {'placeholder': {'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+                 'power': {'type': 'Power', 'kind': 'op', 'op': 'Power', 'scale': 1, 'power': -1, 'shift': 0}},
+                [('placeholder', 'power')])
+        )
+
+    def test_replace_reciprocal(self):
+        graph, graph_ref = __class__._create_graphs()
+
+        pattern = ReciprocalReplacer()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'reciprocal/power_', last_node_ref='power', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    def test_neg_replace_reciprocal(self):
+        graph, graph_ref = __class__._create_graphs()
+        graph_ref.node['power']['power'] = 0
+
+        pattern = ReciprocalReplacer()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'reciprocal/power_', last_node_ref='power', check_op_attrs=True)
+        self.assertTrue(not flag)
\ No newline at end of file
diff --git a/model-optimizer/extensions/front/tf/ObjectDetectionAPI_test.py b/model-optimizer/extensions/front/tf/ObjectDetectionAPI_test.py
new file mode 100644 (file)
index 0000000..d9056ef
--- /dev/null
@@ -0,0 +1,127 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import networkx as nx
+
+from extensions.front.tf.ObjectDetectionAPI import calculate_shape_keeping_aspect_ratio, \
+    calculate_placeholder_spatial_shape
+from mo.front.subgraph_matcher import SubgraphMatch
+from mo.utils.custom_replacement_config import CustomReplacementDescriptor
+from mo.utils.error import Error
+
+
+class FakePipelineConfig:
+    def __init__(self, model_params: dict):
+        self._model_params = model_params
+
+    def get_param(self, param: str):
+        if param not in self._model_params:
+            return None
+        return self._model_params[param]
+
+
+class TestCalculateShape(unittest.TestCase):
+    min_size = 600
+    max_size = 1024
+
+    def test_calculate_shape_1(self):
+        self.assertTupleEqual(calculate_shape_keeping_aspect_ratio(100, 300, self.min_size, self.max_size), (341, 1024))
+
+    def test_calculate_shape_2(self):
+        self.assertTupleEqual(calculate_shape_keeping_aspect_ratio(100, 600, self.min_size, self.max_size), (171, 1024))
+
+    def test_calculate_shape_3(self):
+        self.assertTupleEqual(calculate_shape_keeping_aspect_ratio(100, 3000, self.min_size, self.max_size), (34, 1024))
+
+    def test_calculate_shape_4(self):
+        self.assertTupleEqual(calculate_shape_keeping_aspect_ratio(300, 300, self.min_size, self.max_size), (600, 600))
+
+    def test_calculate_shape_5(self):
+        self.assertTupleEqual(calculate_shape_keeping_aspect_ratio(300, 400, self.min_size, self.max_size), (600, 800))
+
+    def test_calculate_shape_6(self):
+        self.assertTupleEqual(calculate_shape_keeping_aspect_ratio(300, 600, self.min_size, self.max_size), (512, 1024))
+
+    def test_calculate_shape_7(self):
+        self.assertTupleEqual(calculate_shape_keeping_aspect_ratio(1000, 2500, self.min_size, self.max_size),
+                              (410, 1024))
+
+    def test_calculate_shape_8(self):
+        self.assertTupleEqual(calculate_shape_keeping_aspect_ratio(1800, 2000, self.min_size, self.max_size),
+                              (600, 667))
+
+    def test_calculate_shape_11(self):
+        self.assertTupleEqual(calculate_shape_keeping_aspect_ratio(300, 100, self.min_size, self.max_size), (1024, 341))
+
+    def test_calculate_shape_12(self):
+        self.assertTupleEqual(calculate_shape_keeping_aspect_ratio(600, 100, self.min_size, self.max_size), (1024, 171))
+
+    def test_calculate_shape_13(self):
+        self.assertTupleEqual(calculate_shape_keeping_aspect_ratio(3000, 100, self.min_size, self.max_size), (1024, 34))
+
+    def test_calculate_shape_15(self):
+        self.assertTupleEqual(calculate_shape_keeping_aspect_ratio(400, 300, self.min_size, self.max_size), (800, 600))
+
+    def test_calculate_shape_16(self):
+        self.assertTupleEqual(calculate_shape_keeping_aspect_ratio(600, 300, self.min_size, self.max_size), (1024, 512))
+
+    def test_calculate_shape_17(self):
+        self.assertTupleEqual(calculate_shape_keeping_aspect_ratio(2500, 1000, self.min_size, self.max_size),
+                              (1024, 410))
+
+    def test_calculate_shape_18(self):
+        self.assertTupleEqual(calculate_shape_keeping_aspect_ratio(2000, 1800, self.min_size, self.max_size),
+                              (667, 600))
+
+
+class TestCalculatePlaceholderSpatialShape(unittest.TestCase):
+    def setUp(self):
+        self.graph = nx.MultiDiGraph()
+        self.graph.graph['user_shapes'] = None
+        self.replacement_desc = CustomReplacementDescriptor('dummy_id', {})
+        self.match = SubgraphMatch(self.graph, self.replacement_desc, [], [], [], '')
+        self.pipeline_config = FakePipelineConfig({})
+
+    def test_default_fixed_shape_resizer(self):
+        self.pipeline_config._model_params['resizer_image_height'] = 300
+        self.pipeline_config._model_params['resizer_image_width'] = 600
+        self.assertTupleEqual((300, 600),
+                              calculate_placeholder_spatial_shape(self.graph, self.match, self.pipeline_config))
+
+    def test_fixed_shape_resizer_overrided_by_user(self):
+        self.pipeline_config._model_params['resizer_image_height'] = 300
+        self.pipeline_config._model_params['resizer_image_width'] = 600
+        self.graph.graph['user_shapes'] = {'image_tensor': [{'shape': [1, 400, 500, 3]}]}
+        self.assertTupleEqual((400, 500),
+                              calculate_placeholder_spatial_shape(self.graph, self.match, self.pipeline_config))
+
+    def test_default_keep_aspect_ratio_resizer(self):
+        self.pipeline_config._model_params['resizer_min_dimension'] = 600
+        self.pipeline_config._model_params['resizer_max_dimension'] = 1024
+        self.assertTupleEqual((600, 600),
+                              calculate_placeholder_spatial_shape(self.graph, self.match, self.pipeline_config))
+
+    def test_keep_aspect_ratio_resizer_overrided_by_user(self):
+        self.pipeline_config._model_params['resizer_min_dimension'] = 600
+        self.pipeline_config._model_params['resizer_max_dimension'] = 1024
+        self.graph.graph['user_shapes'] = {'image_tensor': [{'shape': [1, 400, 300, 3]}]}
+        self.assertTupleEqual((800, 600),
+                              calculate_placeholder_spatial_shape(self.graph, self.match, self.pipeline_config))
+
+    def test_missing_input_shape_information(self):
+        self.assertRaises(Error, calculate_placeholder_spatial_shape, self.graph, self.match, self.pipeline_config)
diff --git a/model-optimizer/extensions/front/tf/concat_ext_test.py b/model-optimizer/extensions/front/tf/concat_ext_test.py
new file mode 100644 (file)
index 0000000..9cf9021
--- /dev/null
@@ -0,0 +1,34 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+from extensions.front.tf.concat_ext import ConcatFrontExtractor
+from mo.utils.unittest.extractors import PB, BaseExtractorsTestingClass
+
+
+class ConcatExtractorTest(BaseExtractorsTestingClass):
+    def test_concat(self):
+        node = PB({'pb': PB({'attr': {'N': PB({'i': 4})}})})
+        self.expected = {
+            'N': 4,
+            'simple_concat': True,
+            'type': 'Concat',
+            'op': 'Concat',
+            'kind': 'op',
+            'axis': 1
+        }
+        ConcatFrontExtractor.extract(node)
+        self.res = node
+        self.compare()
diff --git a/model-optimizer/extensions/front/tf/concat_test.py b/model-optimizer/extensions/front/tf/concat_test.py
new file mode 100644 (file)
index 0000000..abee3b0
--- /dev/null
@@ -0,0 +1,47 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+from extensions.front.tf.concat import Concat
+from mo.utils.unittest.graph import build_graph_with_edge_attrs
+
+
+class TestConcatEdgesReshuffler(unittest.TestCase):
+    def test_concat_edges_reshaffle(self):
+        graph = build_graph_with_edge_attrs(
+            {'axis': {},
+             'input_1': {},
+             'input_2': {},
+             'input_3': {},
+             'concat': {'op': 'Concat', 'simple_concat': True, 'axis': 1},
+             },
+            [('axis', 'concat', {'in': 0}),
+             ('input_1', 'concat', {'in': 1}),
+             ('input_2', 'concat', {'in': 2}),
+             ('input_3', 'concat', {'in': 3})],
+        )
+        Concat().find_and_replace_pattern(graph=graph)
+        for u, v, attrs in graph.in_edges('concat', data=True):
+            if attrs['in'] == 0:
+                self.assertEqual(u, 'input_1')
+            if attrs['in'] == 1:
+                self.assertEqual(u, 'input_2')
+            if attrs['in'] == 2:
+                self.assertEqual(u, 'input_3')
+            if attrs['in'] == 3:
+                self.assertEqual(u, 'axis')
+        self.assertTrue('axis' not in graph.node['concat'])
diff --git a/model-optimizer/extensions/front/tf/conv_ext_test.py b/model-optimizer/extensions/front/tf/conv_ext_test.py
new file mode 100644 (file)
index 0000000..d420f91
--- /dev/null
@@ -0,0 +1,140 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import numpy as np
+
+from extensions.front.tf.conv_ext import Conv2DFrontExtractor, DepthwiseConv2dNativeFrontExtractor
+from mo.utils.unittest.extractors import PB, BaseExtractorsTestingClass
+
+
+class ConvExtractorTest(BaseExtractorsTestingClass):
+    @classmethod
+    def setUpClass(cls):
+        cls.strides = [1, 2, 3, 4]
+        cls.dilations = [1, 1, 1, 1]
+
+    def test_conv_2d_defaults(self):
+        node = PB({'pb': PB({'attr': {
+            'data_format': PB({
+                's': b"NHWC"
+            }),
+            'strides': PB({
+                'list': PB({"i": self.strides})
+            }),
+            'padding': PB({
+                's': b'VALID'
+            }),
+            'dilations': PB({
+                'list': PB({"i": [1, 1, 1, 1]})
+            })
+        }})})
+        self.expected = {
+            'bias_addable': True,
+            'dilation': np.array([1, 1, 1, 1], dtype=np.int8),
+            'type': 'Convolution',
+            'layout': 'NHWC',
+        }
+        Conv2DFrontExtractor.extract(node)
+        self.res = node
+        self.expected_call_args = (None, False)
+        self.compare()
+
+    def test_conv2d_nhwc(self):
+        node = PB({'pb': PB({'attr': {
+            'data_format': PB({
+                's': b"NHWC"
+            }),
+            'strides': PB({
+                'list': PB({"i": self.strides})
+            }),
+            'padding': PB({
+                's': b'VALID'
+            }),
+            'dilations': PB({
+                'list': PB({"i": [1, 1, 1, 1]})
+            })
+        }})})
+        self.expected = {
+            # spatial_dims = [1, 2] will be detected in infer function
+            "channel_dims": [3],
+            "batch_dims": [0],
+            "input_feature_channel": 2,
+            "output_feature_channel": 3,
+            'dilation': np.array([1, 1, 1, 1], dtype=np.int8),
+            'stride': np.array(self.strides, dtype=np.int8),
+        }
+        Conv2DFrontExtractor.extract(node)
+        self.res = node
+        self.expected_call_args = (None, False)
+        self.compare()
+
+    def test_conv2d_nchw(self):
+        node = PB({'pb': PB({'attr': {
+            'data_format': PB({
+                's': b"NCHW"
+            }),
+            'strides': PB({
+                'list': PB({"i": self.strides})
+            }),
+            'padding': PB({
+                's': b'VALID'
+            }),
+            'dilations': PB({
+                'list': PB({"i": [1, 1, 1, 1]})
+            })
+        }})})
+        self.expected = {
+            # spatial_dims = [2, 3] will be detected in infer function
+            "channel_dims": [1],
+            "batch_dims": [0],
+            "input_feature_channel": 2,
+            "output_feature_channel": 3,
+            'dilation': np.array([1, 1, 1, 1], dtype=np.int8),
+            'stride': np.array(self.strides, dtype=np.int8),
+        }
+        Conv2DFrontExtractor.extract(node)
+        self.res = node
+        self.expected_call_args = (None, False)
+        self.compare()
+
+    def test_conv2d_depthwise(self):
+        node = PB({'pb': PB({'attr': {
+            'data_format': PB({
+                's': b"NHWC"
+            }),
+            'strides': PB({
+                'list': PB({"i": self.strides}),
+            }),
+            'dilations': PB({
+                'list': PB({"i": self.dilations}),
+            }),
+            'padding': PB({
+                's': b'VALID'
+            })
+        }})})
+        self.expected = {
+            # spatial_dims = [1, 2] will be detected in infer function
+            "channel_dims": [3],
+            "batch_dims": [0],
+            "input_feature_channel": 2,
+            "output_feature_channel": 2,
+            'dilation': np.array([1, 1, 1, 1], dtype=np.int8),
+            'stride': np.array(self.strides, dtype=np.int8),
+        }
+        DepthwiseConv2dNativeFrontExtractor.extract(node)
+        self.res = node
+        self.expected_call_args = (None, True)
+        self.compare()
diff --git a/model-optimizer/extensions/front/tf/deconv_ext_test.py b/model-optimizer/extensions/front/tf/deconv_ext_test.py
new file mode 100644 (file)
index 0000000..c11d4da
--- /dev/null
@@ -0,0 +1,100 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import numpy as np
+
+from extensions.front.tf.deconv_ext import Conv2DBackpropInputFrontExtractor
+from mo.utils.unittest.extractors import PB, BaseExtractorsTestingClass
+
+
+class DeconvolutionExtractorTest(BaseExtractorsTestingClass):
+    @classmethod
+    def setUpClass(cls):
+        cls.strides = [1, 2, 3, 4]
+
+    def test_deconv2d_defaults(self):
+        node = PB({'pb': PB({'attr': {
+            'data_format': PB({
+                's': b"NHWC"
+            }),
+            'strides': PB({
+                'list': PB({"i": self.strides})
+            }),
+            'padding': PB({
+                's': b'VALID'
+            })
+        }})})
+        self.expected = {
+            'bias_addable': True,
+            'pad': None,  # will be inferred when input shape is known
+            'pad_spatial_shape': None,
+            'output_spatial_shape': None,
+            'output_shape': None,
+            'group': None,
+        }
+        Conv2DBackpropInputFrontExtractor.extract(node)
+        self.res = node
+        self.expected_call_args = (None, False)
+        self.compare()
+
+    def test_deconv2d_nhwc(self):
+        node = PB({'pb': PB({'attr': {
+            'data_format': PB({
+                's': b"NHWC"
+            }),
+            'strides': PB({
+                'list': PB({"i": self.strides})
+            }),
+            'padding': PB({
+                's': b'VALID'
+            })
+        }})})
+
+        self.expected = {
+            "spatial_dims": [1, 2],
+            "channel_dims": [3],
+            "batch_dims": [0],
+            'stride': np.array(self.strides, dtype=np.int8),
+        }
+
+        Conv2DBackpropInputFrontExtractor.extract(node)
+        self.res = node
+        self.expected_call_args = (None, False)
+        self.compare()
+
+    def test_deconv2d_nchw(self):
+        node = PB({'pb': PB({'attr': {
+            'data_format': PB({
+                's': b"NCHW"
+            }),
+            'strides': PB({
+                'list': PB({"i": self.strides})
+            }),
+            'padding': PB({
+                's': b'VALID'
+            })
+        }})})
+        self.expected = {
+            "spatial_dims": [2, 3],
+            "channel_dims": [1],
+            "batch_dims": [0],
+            'stride': np.array(self.strides, dtype=np.int8),
+        }
+
+        Conv2DBackpropInputFrontExtractor.extract(node)
+        self.res = node
+        self.expected_call_args = (None, False)
+        self.compare()
diff --git a/model-optimizer/extensions/front/tf/fifo_replacer_test.py b/model-optimizer/extensions/front/tf/fifo_replacer_test.py
new file mode 100644 (file)
index 0000000..e1150c2
--- /dev/null
@@ -0,0 +1,79 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.front.tf.fifo_replacer import FIFOQueue
+from mo.utils.unittest.graph import build_graph_with_edge_attrs
+
+
+class TestFIFOQueueReplacement(unittest.TestCase):
+    def test_fifo_with_label_batch(self):
+        nodes = {
+            'placeholder': {'op': 'Placeholder', 'data_type': np.int32, 'kind': 'op', 'shape': np.array(1)},
+            'batch_join/fifo_queue': {'op': 'FIFOQueueV2', 'name': 'batch_join/fifo_queue',
+                                      'shapes': np.array([[1, 2, 3]]), 'kind': 'op'},
+            'batch_join': {'op': 'QueueDequeueUpToV2', 'kind': 'op'},
+            'image_batch': {'op': 'Identity', 'data_type': np.float32, 'kind': 'op'},
+            'label_batch': {'op': 'Identity', 'kind': 'op'},
+            'label_batch_op_output': {'op': 'OpOutput', 'kind': 'op'},
+        }
+        edges = [
+            ('placeholder', 'batch_join', {'out': 0, 'in': 0}),
+            ('batch_join/fifo_queue', 'batch_join', {'out': 0, 'in': 1}),
+            ('batch_join', 'image_batch', {'out': 0, 'in': 0}),
+            ('batch_join', 'label_batch', {'out': 1, 'in': 0}),
+            ('label_batch', 'label_batch_op_output', {'out': 0, 'in': 0})
+        ]
+        graph = build_graph_with_edge_attrs(nodes, edges)
+        tested_class = FIFOQueue()
+        tested_class.find_and_replace_pattern(graph=graph)
+        after_pattern = graph.nodes()
+        self.assertEqual(2, len(after_pattern))
+        try:
+            new_ph_dict = graph.node[[u for u, v in graph.in_edges('image_batch')][0]]
+        except Exception as e:
+            self.fail("Can't get new placeholder. Broken edge. Additional information: {}".format(e))
+        self.assertEqual(new_ph_dict['name'], 'batch_join/fifo_queue')
+        self.assertTrue(np.array_equal(new_ph_dict['shape'], [1, 2, 3]))
+
+    def test_fifo_with_out_label_batch(self):
+        nodes_no_label = {
+            'placeholder': {'op': 'Placeholder', 'data_type': np.int32, 'kind': 'op', 'shape': np.array(0)},
+            'batch_join/fifo_queue': {'op': 'FIFOQueueV2', 'name': 'batch_join/fifo_queue',
+                                      'shapes': np.array([[1, 2, 3]]), 'kind': 'op'},
+            'batch_join': {'op': 'QueueDequeueUpToV2', 'kind': 'op'},
+            'image_batch': {'op': 'Identity', 'data_type': np.float32, 'kind': 'op'},
+        }
+        edges_no_label = [
+            ('placeholder', 'batch_join', {'out': 0}),
+            ('batch_join/fifo_queue', 'batch_join', {'out': 0}),
+            ('batch_join', 'image_batch', {'out': 0})
+        ]
+
+        graph = build_graph_with_edge_attrs(nodes_no_label, edges_no_label)
+        tested_class = FIFOQueue()
+        tested_class.find_and_replace_pattern(graph=graph)
+        after_pattern = graph.nodes()
+        self.assertEqual(2, len(after_pattern))
+        try:
+            new_ph_dict = graph.node[[u for u, v in graph.in_edges('image_batch')][0]]
+        except Exception as e:
+            self.fail("Can't get new placeholder. Broken edge. Additional information: {}".format(e))
+        self.assertEqual(new_ph_dict['name'], 'batch_join/fifo_queue')
+        self.assertTrue(np.array_equal(new_ph_dict['shape'], np.array([1, 2, 3])))
diff --git a/model-optimizer/extensions/front/tf/mvn_unrolled_test.py b/model-optimizer/extensions/front/tf/mvn_unrolled_test.py
new file mode 100644 (file)
index 0000000..de9618b
--- /dev/null
@@ -0,0 +1,69 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+import unittest
+
+from extensions.front.tf.mvn_unrolled import MVNUnrolled
+from mo.ops.op import Op
+from mo.utils.unittest.graph import compare_graphs, build_graph_with_attrs
+from extensions.ops.mvn import MVN
+
+
+class MVNUnrolledMatchingTests(unittest.TestCase):
+    @classmethod
+    def setUpClass(cls):
+        Op.registered_ops['MVN'] = MVN
+
+    def test(self):
+        pattern_matcher = MVNUnrolled()
+        pattern = pattern_matcher.pattern()
+        graph = build_graph_with_attrs(nodes_with_attrs=pattern['nodes'], edges_with_attrs=pattern['edges'], update_edge_attrs=None,
+                                       new_nodes_with_attrs=[('reduction_indicies', {'kind': 'data'}),
+                                                             ('conv2d', {'kind': 'op'}),
+                                                             ('variance_reduction', {'kind': 'data'}),
+                                                             ('pow2', {'kind': 'data'}),
+                                                             ('eps', {'kind': 'data'}),
+                                                             ('next_op', {'kind': 'op'})],
+                                       new_edges_with_attrs=[('reduction_indicies', 'mean', {'in': 1}),
+                                                             ('conv2d', 'mean',{'in': 0, 'out': 1}),
+                                                             ('variance_reduction', 'variance', {'in': 1}),
+                                                             ('pow2', 'pow', {'in': 1}),
+                                                             ('eps', 'add'), ('truediv', 'next_op')])
+        graph.graph['layout'] = 'NHWC'
+        pattern_matcher.find_and_replace_pattern(graph)
+
+        graph_ref = build_graph_with_attrs(nodes_with_attrs=pattern['nodes'][:-1],
+                                           edges_with_attrs=pattern['edges'][:-2], update_edge_attrs=None,
+                                           new_nodes_with_attrs=[('reduction_indicies', {'kind':'data'}),
+                                                                 ('conv2d', {'kind':'op'}),
+                                                                 ('variance_reduction', {'kind':'data'}),
+                                                                 ('pow2', {'kind': 'data'}),
+                                                                 ('eps', {'kind': 'data'}),
+                                                                 ('mvn', {'kind': 'op', 'op': 'MVN'}),
+                                                                 ('next_op', {'kind': 'op'})],
+                                           new_edges_with_attrs=[('reduction_indicies', 'mean', {'in':1}),
+                                                                 ('conv2d', 'mean', {'in': 0}),
+                                                                 ('variance_reduction', 'variance',{'in': 1}),
+                                                                 ('pow2', 'pow', {'in': 1}),
+                                                                 ('eps', 'add'),
+                                                                 ('conv2d', 'mvn',{'in': 0}),
+                                                                 ('reduction_indicies', 'mvn', {'in': 1}),
+                                                                 ('variance_reduction', 'mvn',{'in': 2}),
+                                                                 ('pow2', 'mvn', {'in': 3}),
+                                                                 ('eps', 'mvn',{'in': 4}),
+                                                                 ('mvn', 'next_op')])
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'next_op', check_op_attrs=True)
+        self.assertTrue(flag, resp)
diff --git a/model-optimizer/extensions/front/tf/next_iteration_ext_test.py b/model-optimizer/extensions/front/tf/next_iteration_ext_test.py
new file mode 100644 (file)
index 0000000..98e0ab6
--- /dev/null
@@ -0,0 +1,30 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+from extensions.front.tf.next_iteration_ext import NextIterationExtractor
+from mo.utils.unittest.extractors import PB, BaseExtractorsTestingClass
+
+
+class TestNextIteration(BaseExtractorsTestingClass):
+    def test_is_cyclic(self):
+        pb = PB({})
+        node = PB({'pb': pb})
+        NextIterationExtractor.extract(node)
+        self.expected = {
+            'is_cyclic': True,
+        }
+        self.res = node
+        self.compare()
diff --git a/model-optimizer/extensions/front/tf/pad_ext_test.py b/model-optimizer/extensions/front/tf/pad_ext_test.py
new file mode 100644 (file)
index 0000000..138b4f0
--- /dev/null
@@ -0,0 +1,27 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+from extensions.front.tf.pad_ext import PadFrontExtractor
+from mo.utils.unittest.extractors import PB
+
+
+class TestPad(unittest.TestCase):
+    def test_no_pads(self):
+        node = PB({})
+        PadFrontExtractor.extract(node)
+        self.assertTrue(not 'pads' in node or node['pads'] is None)
diff --git a/model-optimizer/extensions/front/tf/pooling_ext_test.py b/model-optimizer/extensions/front/tf/pooling_ext_test.py
new file mode 100644 (file)
index 0000000..a03095e
--- /dev/null
@@ -0,0 +1,154 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import numpy as np
+
+from extensions.front.tf.pooling_ext import AvgPoolFrontExtractor, MaxPoolFrontExtractor
+from mo.utils.unittest.extractors import PB, BaseExtractorsTestingClass
+
+
+class PoolingExtractorTest(BaseExtractorsTestingClass):
+    @classmethod
+    def setUpClass(cls):
+        cls.strides = [1, 2, 3, 4]
+        cls.ksize = [1, 3, 3, 1]
+        cls.patcher = 'mo.ops.pooling.Pooling.infer'
+
+    def test_pool_defaults(self):
+        pb = PB({'attr': {
+            'data_format': PB({
+                's': b"NHWC"
+            }),
+            'strides': PB({
+                'list': PB({
+                    "i": self.strides
+                })
+            }),
+            'ksize': PB({
+                'list': PB({"i": self.ksize})
+            }),
+            'padding': PB({
+                's': b'VALID'
+            })
+        }})
+        self.expected = {
+            'pad': None,  # will be inferred when input shape is known
+            'pad_spatial_shape': None,
+            'type': 'Pooling',
+            'exclude_pad': 'true',
+        }
+        node = PB({'pb': pb})
+        AvgPoolFrontExtractor.extract(node)
+        self.res = node
+        self.res["infer"](None)
+        self.call_args = self.infer_mock.call_args
+        self.expected_call_args = (None, None)
+        self.compare()
+
+    def test_avg_pool_nhwc(self):
+        pb = PB({'attr': {
+            'data_format': PB({
+                's': b"NHWC"
+            }),
+            'strides': PB({
+                'list': PB({"i": self.strides})
+            }),
+            'ksize': PB({
+                'list': PB({"i": self.ksize})
+            }),
+            'padding': PB({
+                's': b'VALID'
+            })
+        }})
+        self.expected = {
+            'window': np.array(self.ksize, dtype=np.int8),
+            'spatial_dims': [1, 2],
+            'stride': np.array(self.strides, dtype=np.int8),
+            'pool_method': "avg",
+        }
+        node = PB({'pb': pb})
+        AvgPoolFrontExtractor.extract(node)
+        self.res = node
+        self.res["infer"](None)
+        self.call_args = self.infer_mock.call_args
+        self.expected_call_args = (None, "avg")
+        self.compare()
+
+    def test_avg_pool_nchw(self):
+        pb = PB({'attr': {
+            'data_format': PB({
+                's': b"NCHW"
+            }),
+            'strides': PB({
+                'list': PB({
+                    "i": self.strides
+                })
+            }),
+            'ksize': PB({
+                'list': PB({
+                    "i": self.ksize
+                })
+            }),
+            'padding': PB({
+                's': b'VALID'
+            })
+        }})
+        self.expected = {
+            'window': np.array(self.ksize, dtype=np.int8),
+            'spatial_dims': [2, 3],
+            'stride': np.array(self.strides, dtype=np.int8),
+            'pool_method': "avg",
+        }
+        node = PB({'pb': pb})
+        AvgPoolFrontExtractor.extract(node)
+        self.res = node
+        self.res["infer"](None)
+        self.call_args = self.infer_mock.call_args
+        self.expected_call_args = (None, "avg")
+        self.compare()
+
+    def test_max_pool_nhwc(self):
+        pb = PB({'attr': {
+            'data_format': PB({
+                's': b"NHWC"
+            }),
+            'strides': PB({
+                'list': PB({
+                    "i": self.strides
+                })
+            }),
+            'ksize': PB({
+                'list': PB({
+                    "i": self.ksize
+                })
+            }),
+            'padding': PB({
+                's': b'VALID'
+            })
+        }})
+        self.expected = {
+            'window': np.array(self.ksize, dtype=np.int8),
+            'spatial_dims': [1, 2],
+            'stride': np.array(self.strides, dtype=np.int64),
+            'pool_method': "max",
+        }
+        node = PB({'pb': pb})
+        MaxPoolFrontExtractor.extract(node)
+        self.res = node
+        self.res["infer"](None)
+        self.call_args = self.infer_mock.call_args
+        self.expected_call_args = (None, "max")
+        self.compare()
diff --git a/model-optimizer/extensions/front/tf/stop_gradient_ext_test.py b/model-optimizer/extensions/front/tf/stop_gradient_ext_test.py
new file mode 100644 (file)
index 0000000..6030393
--- /dev/null
@@ -0,0 +1,46 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+from extensions.front.tf.stop_gradient_ext import StopGradientExtractor
+from mo.utils.unittest.extractors import PB
+from generator import generator, generate
+
+
+from mo.utils.unittest.extractors import PB, BaseExtractorsTestingClass
+
+
+class StopGradientTest(BaseExtractorsTestingClass):
+
+    def test_stop_gradient(self):
+        node = PB({
+            'pb': PB({
+                'attr': PB({
+                    'T': PB({
+                        "type": 1
+                    })
+                })
+            })
+        })
+        self.expected = {
+            'op': 'StopGradient'
+        }
+        StopGradientExtractor().extract(node)
+        self.res = node
+        self.compare()
+
diff --git a/model-optimizer/extensions/middle/AddIsCyclicAttribute_test.py b/model-optimizer/extensions/middle/AddIsCyclicAttribute_test.py
new file mode 100644 (file)
index 0000000..81f4ba7
--- /dev/null
@@ -0,0 +1,50 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+from extensions.middle.AddIsCyclicAttribute import AddIsCyclicAttribute
+from mo.utils.unittest.graph import build_graph_with_attrs
+
+
+class AddIsCyclicAttributeTest(unittest.TestCase):
+    nodes = [('node_1', {}),
+             ('node_2', {})]
+    edges = [('node_1', 'node_2')]
+
+    def test_1(self):
+        """
+        Acyclic case => graph.graph['is_cyclic'] should be False.
+        """
+        graph = build_graph_with_attrs(nodes_with_attrs=self.nodes,
+                                       edges_with_attrs=self.edges)
+        tested_pass = AddIsCyclicAttribute()
+        tested_pass.find_and_replace_pattern(graph)
+
+        assert graph.graph['is_cyclic'] is False
+
+    def test_2(self):
+        """
+        Cyclic case => graph.graph['is_cyclic'] should be True.
+        :return:
+        """
+        graph = build_graph_with_attrs(nodes_with_attrs=self.nodes,
+                                       edges_with_attrs=self.edges,
+                                       new_edges_with_attrs=[('node_2', 'node_1')])
+        tested_pass = AddIsCyclicAttribute()
+        tested_pass.find_and_replace_pattern(graph)
+
+        assert graph.graph['is_cyclic'] is True
diff --git a/model-optimizer/extensions/middle/AddReshapeAfterStridedSlice_test.py b/model-optimizer/extensions/middle/AddReshapeAfterStridedSlice_test.py
new file mode 100644 (file)
index 0000000..a834d99
--- /dev/null
@@ -0,0 +1,312 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import numpy as np
+import unittest
+
+from extensions.middle.AddReshapeAfterStridedSlice import AddReshapeAfterStridedSlice
+from mo.graph.graph import Node
+from mo.middle.passes.fusing.fuse_linear_ops_test import compare_graphs
+from mo.middle.passes.eliminate_test import build_graph
+
+# The dictionary with nodes attributes used to build various graphs. A key is the name of the node and the value is the
+# dictionary with node attributes.
+nodes_attributes_test = {
+    'placeholder_1': {'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+    'placeholder_1_data': {'shape': None, 'kind': 'data', 'data_type': None},
+    'placeholder_2': {'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+    'placeholder_2_data': {'shape': None, 'kind': 'data', 'data_type': None},
+    'placeholder_begin_data': {'shape': None, 'kind': 'data', 'data_type': None},
+    'placeholder_end_data': {'shape': None, 'kind': 'data', 'data_type': None},
+    'placeholder_stride_data': {'shape': None, 'kind': 'data', 'data_type': None},
+    # StridedSlice layers
+    'sslice_1': {'type': 'StridedSlice', 'kind': 'op', 'op': 'StridedSlice', 'slices': None,
+                 'shrink_axis_mask': np.array([False, False, True, False]),
+                 'new_axis_mask': np.array([False, False, False, False])},
+    'sslice_1_data': {'shape': None, 'kind': 'data'},
+    'sslice_2': {'type': 'StridedSlice', 'kind': 'op', 'op': 'StridedSlice', 'slices': None,
+                 'shrink_axis_mask': np.array([False, False, True, False]),
+                 'new_axis_mask': np.array([False, False, False, False])},
+    'sslice_2_data': {'shape': None, 'kind': 'data'}}
+
+nodes_reshape = {
+    'placeholder_1': {'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+    'placeholder_1_data': {'shape': None, 'kind': 'data', 'data_type': None},
+    'placeholder_2': {'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+    'placeholder_2_data': {'shape': None, 'kind': 'data', 'data_type': None},
+    'placeholder_begin_data': {'shape': None, 'kind': 'data', 'data_type': None},
+    'placeholder_end_data': {'shape': None, 'kind': 'data', 'data_type': None},
+    'placeholder_stride_data': {'shape': None, 'kind': 'data', 'data_type': None},
+    # StridedSlice layers
+    'sslice_1': {'type': 'StridedSlice', 'value': None, 'kind': 'op', 'op': 'StridedSlice', 'slices': None,
+                 'shrink_axis_mask': np.array([False, False, True, False]),
+                 'new_axis_mask': np.array([False, False, False, False])},
+    'sslice_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+    'sslice_2': {'type': 'StridedSlice', 'value': None, 'kind': 'op', 'op': 'StridedSlice', 'slices': None,
+                 'shrink_axis_mask': np.array([False, False, True, False]),
+                 'new_axis_mask': np.array([False, False, False, False])},
+    'sslice_2_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # Reshape layer
+    'sslice_1/Reshape_shrink': {'type': 'Reshape', 'value': None, 'kind': 'op', 'op': 'Reshape'},
+    'sslice_1/Reshape_shrink_data': {'value': None, 'shape': None, 'kind': 'data'},
+    'sslice_2/Reshape_shrink': {'type': 'Reshape', 'value': None, 'kind': 'op', 'op': 'Reshape'},
+    'sslice_2/Reshape_shrink_data': {'value': None, 'shape': None, 'kind': 'data'},
+    'sslice_2/Reshape_new': {'type': 'Reshape', 'value': None, 'kind': 'op', 'op': 'Reshape'},
+    'sslice_2/Reshape_new_data': {'value': None, 'shape': None, 'kind': 'data'},
+}
+
+
+class AddReshapeAfterStridedSliceTests(unittest.TestCase):
+    def test_ss_1_shrink_last(self):
+        graph = build_graph(nodes_attributes_test,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'sslice_1'),
+                             ('placeholder_begin_data', 'sslice_1'),
+                             ('placeholder_end_data', 'sslice_1'),
+                             ('placeholder_stride_data', 'sslice_1'),
+                             ('sslice_1', 'sslice_1_data')],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])},
+                             'sslice_1': {'slices': np.array(
+                                 [slice(0, 1, 1), slice(0, 227, 1), slice(0, 1, 1), slice(0, 54, 1)])},
+                             'sslice_1_data': {'shape': np.array([1, 227, 54]), 'is_output': True},
+                             })
+        graph.graph['layout'] = 'NHWC'
+
+        graph_ref = build_graph(nodes_reshape,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'sslice_1'),
+                                 ('placeholder_begin_data', 'sslice_1'),
+                                 ('placeholder_end_data', 'sslice_1'),
+                                 ('placeholder_stride_data', 'sslice_1'),
+                                 ('sslice_1', 'sslice_1/Reshape_shrink_data'),
+                                 ('sslice_1/Reshape_shrink_data', 'sslice_1/Reshape_shrink'),
+                                 ('sslice_1/Reshape_shrink', 'sslice_1_data')],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])},
+                                 'sslice_1': {'slices': np.array(
+                                     [slice(0, 1, 1), slice(0, 227, 1), slice(0, 1, 1), slice(0, 54, 1)]),
+                                     'shrink_axis_mask': np.array([False, False, False, False]),
+                                     'new_axis_mask': np.array([False, False, False, False])},
+                                 'sslice_1_data': {'shape': np.array([1, 227, 54]), 'is_output': True},
+                                 'sslice_1/Reshape_shrink': {'dim': np.array([1, 227, 54])},
+                                 'sslice_1/Reshape_shrink_data': {'shape': np.array([1, 227, 1, 54])}
+                                 })
+
+        pattern = AddReshapeAfterStridedSlice()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'sslice_1_data', check_op_attrs=True)
+        graph.clear()
+        graph_ref.clear()
+        self.assertTrue(flag, resp)
+
+    def test_ss_1_shrink(self):
+        graph = build_graph(nodes_attributes_test,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'sslice_2'),
+                             ('placeholder_begin_data', 'sslice_2'),
+                             ('placeholder_end_data', 'sslice_2'),
+                             ('placeholder_stride_data', 'sslice_2'),
+                             ('sslice_2', 'sslice_2_data'),
+                             ('sslice_2_data', 'placeholder_2'),
+                             ('placeholder_2', 'placeholder_2_data'), ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])},
+                             'sslice_2': {'slices': np.array(
+                                 [slice(0, 1, 1), slice(0, 227, 1), slice(0, 1, 1), slice(0, 54, 1)]), },
+                             'sslice_2_data': {'shape': np.array([1, 227, 54]), 'is_output': True}
+                             })
+        graph.graph['layout'] = 'NHWC'
+
+        graph_ref = build_graph(nodes_reshape,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'sslice_2'),
+                                 ('placeholder_begin_data', 'sslice_2'),
+                                 ('placeholder_end_data', 'sslice_2'),
+                                 ('placeholder_stride_data', 'sslice_2'),
+                                 ('sslice_2', 'sslice_2/Reshape_shrink_data'),
+                                 ('sslice_2/Reshape_shrink_data', 'sslice_2/Reshape_shrink'),
+                                 ('sslice_2/Reshape_shrink', 'sslice_2_data'),
+                                 ('sslice_2_data', 'placeholder_2'),
+                                 ('placeholder_2', 'placeholder_2_data')],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])},
+                                 'sslice_2': {'slices': np.array(
+                                     [slice(0, 1, 1), slice(0, 227, 1), slice(0, 1, 1), slice(0, 54, 1)]),
+                                     'shrink_axis_mask': np.array([False, False, False, False]),
+                                     'new_axis_mask': np.array([False, False, False, False])},
+                                 'sslice_2_data': {'shape': np.array([1, 227, 54])},
+                                 'sslice_2/Reshape_shrink': {'dim': np.array([1, 227, 54])},
+                                 'sslice_2/Reshape_shrink_data': {'shape': np.array([1, 227, 1, 54])},
+                                 })
+
+        pattern = AddReshapeAfterStridedSlice()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'sslice_2_data', check_op_attrs=True)
+        graph.clear()
+        graph_ref.clear()
+        self.assertTrue(flag, resp)
+
+    def test_ss_2_shrink(self):
+        graph = build_graph(nodes_attributes_test,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'sslice_2'),
+                             ('placeholder_begin_data', 'sslice_2'),
+                             ('placeholder_end_data', 'sslice_2'),
+                             ('placeholder_stride_data', 'sslice_2'),
+                             ('sslice_2', 'sslice_2_data'),
+                             ('sslice_2_data', 'placeholder_2'),
+                             ('placeholder_2', 'placeholder_2_data'), ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])},
+                             'sslice_2': {
+                                 'slices': np.array([slice(0, 1, 1), slice(0, 1, 1), slice(0, 227, 1), slice(0, 1, 1)]),
+                                 'shrink_axis_mask': np.array([False, True, False, True])},
+                             'sslice_2_data': {'shape': np.array([1, 227]), 'is_output': True}
+                             })
+        graph.graph['layout'] = 'NHWC'
+
+        graph_ref = build_graph(nodes_reshape,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'sslice_2'),
+                                 ('placeholder_begin_data', 'sslice_2'),
+                                 ('placeholder_end_data', 'sslice_2'),
+                                 ('placeholder_stride_data', 'sslice_2'),
+                                 ('sslice_2', 'sslice_2/Reshape_shrink_data'),
+                                 ('sslice_2/Reshape_shrink_data', 'sslice_2/Reshape_shrink'),
+                                 ('sslice_2/Reshape_shrink', 'sslice_2_data'),
+                                 ('sslice_2_data', 'placeholder_2'),
+                                 ('placeholder_2', 'placeholder_2_data')],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])},
+                                 'sslice_2': {'slices': np.array(
+                                     [slice(0, 1, 1), slice(0, 1, 1), slice(0, 227, 1), slice(0, 1, 1)]),
+                                     'shrink_axis_mask': np.array([False, False, False, False]),
+                                     'new_axis_mask': np.array([False, False, False, False])},
+                                 'sslice_2_data': {'shape': np.array([1, 227])},
+                                 'sslice_2/Reshape_shrink': {'dim': np.array([1, 227])},
+                                 'sslice_2/Reshape_shrink_data': {'shape': np.array([1, 1, 227, 1])},
+                                 })
+
+        pattern = AddReshapeAfterStridedSlice()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'sslice_2_data', check_op_attrs=True)
+        graph.clear()
+        graph_ref.clear()
+        self.assertTrue(flag, resp)
+
+    def test_ss_1_new(self):
+        graph = build_graph(nodes_attributes_test,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'sslice_2'),
+                             ('placeholder_begin_data', 'sslice_2'),
+                             ('placeholder_end_data', 'sslice_2'),
+                             ('placeholder_stride_data', 'sslice_2'),
+                             ('sslice_2', 'sslice_2_data'),
+                             ('sslice_2_data', 'placeholder_2'),
+                             ('placeholder_2', 'placeholder_2_data'), ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])},
+                             'sslice_2': {'slices': np.array(
+                                 [slice(0, 1, 1), slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(0, 54, 1)]),
+                                 'shrink_axis_mask': np.array([False, False, False, False, False]),
+                                 'new_axis_mask': np.array([False, True, False, False, False])},
+                             'sslice_2_data': {'shape': np.array([1, 1, 227, 227, 54])}
+                             })
+        graph.graph['layout'] = 'NHWC'
+
+        graph_ref = build_graph(nodes_reshape,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'sslice_2'),
+                                 ('placeholder_begin_data', 'sslice_2'),
+                                 ('placeholder_end_data', 'sslice_2'),
+                                 ('placeholder_stride_data', 'sslice_2'),
+                                 ('sslice_2', 'sslice_2/Reshape_new_data'),
+                                 ('sslice_2/Reshape_new_data', 'sslice_2/Reshape_new'),
+                                 ('sslice_2/Reshape_new', 'sslice_2_data'),
+                                 ('sslice_2_data', 'placeholder_2'),
+                                 ('placeholder_2', 'placeholder_2_data')],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])},
+                                 'sslice_2': {'slices': np.array(
+                                     [slice(0, 1, 1), slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1),
+                                      slice(0, 54, 1)]),
+                                     'shrink_axis_mask': np.array([False, False, False, False, False]),
+                                     'new_axis_mask': np.array([False, False, False, False, False])},
+                                 'sslice_2_data': {'shape': np.array([1, 1, 227, 227, 54])},
+                                 'sslice_2/Reshape_new': {'dim': np.array([1, 1, 227, 227, 54])},
+                                 'sslice_2/Reshape_new_data': {'shape': np.array([1, 227, 227, 54])},
+                                 })
+
+        pattern = AddReshapeAfterStridedSlice()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'sslice_2_data', check_op_attrs=True)
+        graph.clear()
+        graph_ref.clear()
+        self.assertTrue(flag, resp)
+
+    def test_ss_shrink_new(self):
+        graph = build_graph(nodes_attributes_test,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'sslice_2'),
+                             ('placeholder_begin_data', 'sslice_2'),
+                             ('placeholder_end_data', 'sslice_2'),
+                             ('placeholder_stride_data', 'sslice_2'),
+                             ('sslice_2', 'sslice_2_data'),
+                             ('sslice_2_data', 'placeholder_2'),
+                             ('placeholder_2', 'placeholder_2_data'), ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])},
+                             'sslice_2': {'slices': np.array(
+                                 [slice(0, 1, 1), slice(0, 1, 1), slice(0, 227, 1), slice(0, 1, 1), slice(0, 54, 1)]),
+                                 'shrink_axis_mask': np.array([False, False, False, True, False]),
+                                 'new_axis_mask': np.array([False, True, False, False, False])},
+                             'sslice_2_data': {'shape': np.array([1, 1, 227, 54]), 'is_output': True}
+                             })
+        graph.graph['layout'] = 'NHWC'
+
+        graph_ref = build_graph(nodes_reshape,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'sslice_2'),
+                                 ('placeholder_begin_data', 'sslice_2'),
+                                 ('placeholder_end_data', 'sslice_2'),
+                                 ('placeholder_stride_data', 'sslice_2'),
+                                 ('sslice_2', 'sslice_2/Reshape_new_data'),
+                                 ('sslice_2/Reshape_new_data', 'sslice_2/Reshape_new'),
+                                 ('sslice_2/Reshape_new', 'sslice_2/Reshape_shrink_data'),
+                                 ('sslice_2/Reshape_shrink_data', 'sslice_2/Reshape_shrink'),
+                                 ('sslice_2/Reshape_shrink', 'sslice_2_data'),
+                                 ('sslice_2_data', 'placeholder_2'),
+                                 ('placeholder_2', 'placeholder_2_data')],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])},
+                                 'sslice_2': {'slices': np.array(
+                                     [slice(0, 1, 1), slice(0, 1, 1), slice(0, 227, 1), slice(0, 1, 1),
+                                      slice(0, 54, 1)]),
+                                     'shrink_axis_mask': np.array([False, False, False, False, False]),
+                                     'new_axis_mask': np.array([False, False, False, False, False])},
+                                 'sslice_2_data': {'shape': np.array([1, 1, 227, 54])},
+                                 'sslice_2/Reshape_new': {'dim': np.array([1, 1, 227, 1, 54])},
+                                 'sslice_2/Reshape_new_data': {'shape': np.array([1, 227, 1, 54])},
+                                 'sslice_2/Reshape_shrink': {'dim': np.array([1, 1, 227, 54])},
+                                 'sslice_2/Reshape_shrink_data': {'shape': np.array([1, 1, 227, 1, 54])},
+                                 })
+
+        pattern = AddReshapeAfterStridedSlice()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'sslice_2_data', check_op_attrs=True)
+        graph.clear()
+        graph_ref.clear()
+        self.assertTrue(flag, resp)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/model-optimizer/extensions/middle/ConvertGroupedStridedSlice_test.py b/model-optimizer/extensions/middle/ConvertGroupedStridedSlice_test.py
new file mode 100644 (file)
index 0000000..0ebdb38
--- /dev/null
@@ -0,0 +1,530 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.middle.ConvertGroupedStridedSlice import ConvertGroupedStridedSlice
+from mo.utils.unittest.graph import build_graph, compare_graphs
+
+nodes_attributes = {
+    'placeholder_1': {'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+    'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    # StridedSlice layers
+    'sslice_1': {'type': None, 'kind': 'op', 'op': 'StridedSlice', 'slices': None,
+                 'shrink_axis_mask': np.array([False, False, False, False])},
+    'sslice_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+    'sslice_2': {'type': None, 'kind': 'op', 'op': 'StridedSlice', 'slices': None,
+                 'shrink_axis_mask': np.array([False, False, False, False])},
+    'sslice_2_data': {'value': None, 'shape': None, 'kind': 'data'},
+    'sslice_3': {'type': None, 'kind': 'op', 'op': 'StridedSlice', 'slices': None,
+                 'shrink_axis_mask': np.array([False, False, False, False])},
+    'sslice_3_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # Split layer
+    'split_1': {'type': 'Split', 'kind': 'op', 'op': 'SplitV'},
+    'split_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+    'split_2_data': {'value': None, 'shape': None, 'kind': 'data'},
+    'split_3_data': {'value': None, 'shape': None, 'kind': 'data'},
+    'split_4_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # Concat1 operation
+    'concat_1': {'type': 'Concat', 'kind': 'op', 'op': 'Concat'},
+    'concat_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+}
+
+
+class ConvertGroupedStridedSliceTests(unittest.TestCase):
+    def test_1(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'sslice_1'),
+                             ('sslice_1', 'sslice_1_data'),
+                             ('placeholder_1_data', 'sslice_2'),
+                             ('sslice_2', 'sslice_2_data'),
+                             ('placeholder_1_data', 'sslice_3'),
+                             ('sslice_3', 'sslice_3_data'),
+                             ('sslice_1_data', 'concat_1'),
+                             ('sslice_2_data', 'concat_1'),
+                             ('sslice_3_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data')
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])},
+
+                             'sslice_1': {'slices': np.array(
+                                 [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(0, 18, 1)])},
+                             'sslice_1_data': {'shape': np.array([1, 227, 227, 18])},
+
+                             'sslice_2': {'slices': np.array(
+                                 [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(18, 36, 1)])},
+                             'sslice_2_data': {'shape': np.array([1, 227, 227, 18])},
+
+                             'sslice_3': {'slices': np.array(
+                                 [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(36, 54, 1)])},
+                             'sslice_3_data': {'shape': np.array([1, 227, 227, 18])},
+
+                             'concat_1_data': {'shape': np.array([1, 227, 227, 54]), 'is_output': True},
+                             })
+        graph.graph['layout'] = 'NHWC'
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'split_1'),
+                                 ('split_1', 'split_1_data'),
+                                 ('split_1', 'split_2_data'),
+                                 ('split_1', 'split_3_data'),
+                                 ('split_1_data', 'concat_1'),
+                                 ('split_2_data', 'concat_1'),
+                                 ('split_3_data', 'concat_1'),
+                                 ('concat_1', 'concat_1_data')
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])},
+                                 'split_1': {'axis': 3},
+                                 'split_1_data': {'shape': np.array([1, 227, 227, 18])},
+                                 'split_2_data': {'shape': np.array([1, 227, 227, 18])},
+                                 'split_3_data': {'shape': np.array([1, 227, 227, 18])},
+                                 'concat_1_data': {'shape': np.array([1, 227, 227, 54]), 'is_output': True},
+                                 })
+
+        pattern = ConvertGroupedStridedSlice()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    def test_2(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'sslice_1'),
+                             ('sslice_1', 'sslice_1_data'),
+                             ('placeholder_1_data', 'sslice_2'),
+                             ('sslice_2', 'sslice_2_data'),
+                             ('placeholder_1_data', 'sslice_3'),
+                             ('sslice_3', 'sslice_3_data'),
+                             ('sslice_1_data', 'concat_1'),
+                             ('sslice_2_data', 'concat_1'),
+                             ('sslice_3_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data')
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])},
+
+                             'sslice_1': {'slices': np.array(
+                                 [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(19, 37, 1)])},
+                             'sslice_1_data': {'shape': np.array([1, 227, 227, 18])},
+
+                             'sslice_2': {'slices': np.array(
+                                 [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(37, 54, 1)])},
+                             'sslice_2_data': {'shape': np.array([1, 227, 227, 17])},
+
+                             'sslice_3': {'slices': np.array(
+                                 [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(0, 19, 1)])},
+                             'sslice_3_data': {'shape': np.array([1, 227, 227, 19])},
+
+                             'concat_1_data': {'shape': np.array([1, 227, 227, 54]), 'is_output': True},
+                             })
+        graph.graph['layout'] = 'NHWC'
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'split_1'),
+                                 ('split_1', 'split_1_data'),
+                                 ('split_1', 'split_2_data'),
+                                 ('split_1', 'split_3_data'),
+                                 ('split_1_data', 'concat_1'),
+                                 ('split_2_data', 'concat_1'),
+                                 ('split_3_data', 'concat_1'),
+                                 ('concat_1', 'concat_1_data')
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])},
+                                 'split_1': {'axis': 3},
+                                 'split_1_data': {'shape': np.array([1, 227, 227, 18])},
+                                 'split_2_data': {'shape': np.array([1, 227, 227, 17])},
+                                 'split_3_data': {'shape': np.array([1, 227, 227, 19])},
+                                 'concat_1_data': {'shape': np.array([1, 227, 227, 54]), 'is_output': True},
+                                 })
+
+        pattern = ConvertGroupedStridedSlice()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    # Intersection of split ranges in feature dimension
+    def test_3_neg(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'sslice_1'),
+                             ('sslice_1', 'sslice_1_data'),
+                             ('placeholder_1_data', 'sslice_2'),
+                             ('sslice_2', 'sslice_2_data'),
+                             ('placeholder_1_data', 'sslice_3'),
+                             ('sslice_3', 'sslice_3_data'),
+                             ('sslice_1_data', 'concat_1'),
+                             ('sslice_2_data', 'concat_1'),
+                             ('sslice_3_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data')
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])},
+
+                             'sslice_1': {'slices': np.array(
+                                 [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(19, 39, 1)])},
+                             'sslice_1_data': {'shape': np.array([1, 227, 227, 20])},
+
+                             'sslice_2': {'slices': np.array(
+                                 [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(37, 54, 1)])},
+                             'sslice_2_data': {'shape': np.array([1, 227, 227, 17])},
+
+                             'sslice_3': {'slices': np.array(
+                                 [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(0, 19, 1)])},
+                             'sslice_3_data': {'shape': np.array([1, 227, 227, 19])},
+
+                             'concat_1_data': {'shape': np.array([1, 227, 227, 54]), 'is_output': True},
+                             })
+        graph.graph['layout'] = 'NHWC'
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'sslice_1'),
+                                 ('sslice_1', 'sslice_1_data'),
+                                 ('placeholder_1_data', 'sslice_2'),
+                                 ('sslice_2', 'sslice_2_data'),
+                                 ('placeholder_1_data', 'sslice_3'),
+                                 ('sslice_3', 'sslice_3_data'),
+                                 ('sslice_1_data', 'concat_1'),
+                                 ('sslice_2_data', 'concat_1'),
+                                 ('sslice_3_data', 'concat_1'),
+                                 ('concat_1', 'concat_1_data')
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])},
+
+                                 'sslice_1': {'slices': np.array(
+                                     [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(19, 39, 1)])},
+                                 'sslice_1_data': {'shape': np.array([1, 227, 227, 20])},
+
+                                 'sslice_2': {'slices': np.array(
+                                     [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(37, 54, 1)])},
+                                 'sslice_2_data': {'shape': np.array([1, 227, 227, 17])},
+
+                                 'sslice_3': {'slices': np.array(
+                                     [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(0, 19, 1)])},
+                                 'sslice_3_data': {'shape': np.array([1, 227, 227, 19])},
+
+                                 'concat_1_data': {'shape': np.array([1, 227, 227, 54]), 'is_output': True},
+                                 })
+
+        pattern = ConvertGroupedStridedSlice()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    # Split range overflow in feature dimension
+    def test_4_neg(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'sslice_1'),
+                             ('sslice_1', 'sslice_1_data'),
+                             ('placeholder_1_data', 'sslice_2'),
+                             ('sslice_2', 'sslice_2_data'),
+                             ('placeholder_1_data', 'sslice_3'),
+                             ('sslice_3', 'sslice_3_data'),
+                             ('sslice_1_data', 'concat_1'),
+                             ('sslice_2_data', 'concat_1'),
+                             ('sslice_3_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data')
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])},
+
+                             'sslice_1': {'slices': np.array(
+                                 [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(19, 37, 1)])},
+                             'sslice_1_data': {'shape': np.array([1, 227, 227, 18])},
+
+                             'sslice_2': {'slices': np.array(
+                                 [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(37, 55, 1)])},
+                             'sslice_2_data': {'shape': np.array([1, 227, 227, 18])},
+
+                             'sslice_3': {'slices': np.array(
+                                 [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(0, 19, 1)])},
+                             'sslice_3_data': {'shape': np.array([1, 227, 227, 19])},
+
+                             'concat_1_data': {'shape': np.array([1, 227, 227, 54]), 'is_output': True},
+                             })
+        graph.graph['layout'] = 'NHWC'
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'sslice_1'),
+                                 ('sslice_1', 'sslice_1_data'),
+                                 ('placeholder_1_data', 'sslice_2'),
+                                 ('sslice_2', 'sslice_2_data'),
+                                 ('placeholder_1_data', 'sslice_3'),
+                                 ('sslice_3', 'sslice_3_data'),
+                                 ('sslice_1_data', 'concat_1'),
+                                 ('sslice_2_data', 'concat_1'),
+                                 ('sslice_3_data', 'concat_1'),
+                                 ('concat_1', 'concat_1_data')
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])},
+
+                                 'sslice_1': {'slices': np.array(
+                                     [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(19, 37, 1)])},
+                                 'sslice_1_data': {'shape': np.array([1, 227, 227, 18])},
+
+                                 'sslice_2': {'slices': np.array(
+                                     [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(37, 55, 1)])},
+                                 'sslice_2_data': {'shape': np.array([1, 227, 227, 18])},
+
+                                 'sslice_3': {'slices': np.array(
+                                     [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(0, 19, 1)])},
+                                 'sslice_3_data': {'shape': np.array([1, 227, 227, 19])},
+
+                                 'concat_1_data': {'shape': np.array([1, 227, 227, 54]), 'is_output': True},
+                                 })
+
+        pattern = ConvertGroupedStridedSlice()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    # Split(1,H,W,54)--->Fake_data (1,H,W,1)
+    #       |`---->Sslice1_out (1,H,W,18)
+    #       |`---->Sslice2_out (1,H,W,18)
+    #       `----->Sslice3_out (1,H,W,17)
+    def test_5(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'sslice_1'),
+                             ('sslice_1', 'sslice_1_data'),
+                             ('placeholder_1_data', 'sslice_2'),
+                             ('sslice_2', 'sslice_2_data'),
+                             ('placeholder_1_data', 'sslice_3'),
+                             ('sslice_3', 'sslice_3_data'),
+                             ('sslice_1_data', 'concat_1'),
+                             ('sslice_2_data', 'concat_1'),
+                             ('sslice_3_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data')
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])},
+
+                             'sslice_1': {'slices': np.array(
+                                 [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(19, 37, 1)])},
+                             'sslice_1_data': {'shape': np.array([1, 227, 227, 18])},
+
+                             'sslice_2': {'slices': np.array(
+                                 [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(37, 54, 1)])},
+                             'sslice_2_data': {'shape': np.array([1, 227, 227, 17])},
+
+                             'sslice_3': {'slices': np.array(
+                                 [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(1, 19, 1)])},
+                             'sslice_3_data': {'shape': np.array([1, 227, 227, 18])},
+
+                             'concat_1_data': {'shape': np.array([1, 227, 227, 54]), 'is_output': True},
+                             })
+        graph.graph['layout'] = 'NHWC'
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'split_1'),
+                                 ('split_1', 'split_1_data'),
+                                 ('split_1', 'split_2_data'),
+                                 ('split_1', 'split_3_data'),
+                                 ('split_1', 'split_4_data'),
+                                 ('split_2_data', 'concat_1'),
+                                 ('split_3_data', 'concat_1'),
+                                 ('split_4_data', 'concat_1'),
+                                 ('concat_1', 'concat_1_data')
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])},
+                                 'split_1': {'axis': 3},
+                                 'split_1_data': {'shape': np.array([1, 227, 227, 1])},
+                                 'split_2_data': {'shape': np.array([1, 227, 227, 18])},
+                                 'split_3_data': {'shape': np.array([1, 227, 227, 17])},
+                                 'split_4_data': {'shape': np.array([1, 227, 227, 18])},
+                                 'concat_1_data': {'shape': np.array([1, 227, 227, 54]), 'is_output': True},
+                                 })
+
+        pattern = ConvertGroupedStridedSlice()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    # Split(1,H,W,54)
+    #       |`---->Sslice1_out (1,H,W,(0,18))
+    #       |`---->Fake_data (1,H,W,(18,27))
+    #       |`---->Sslice3_out (1,H,W,(27,45))
+    #       `----->Fake_data (1,H,W,(45,54))
+    def test_6(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'sslice_1'),
+                             ('sslice_1', 'sslice_1_data'),
+                             ('placeholder_1_data', 'sslice_2'),
+                             ('sslice_2', 'sslice_2_data'),
+                             ('sslice_1_data', 'concat_1'),
+                             ('sslice_2_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data')
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])},
+
+                             'sslice_1': {'slices': np.array(
+                                 [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(0, 18, 1)])},
+                             'sslice_1_data': {'shape': np.array([1, 227, 227, 18])},
+
+                             'sslice_2': {'slices': np.array(
+                                 [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(27, 45, 1)])},
+                             'sslice_2_data': {'shape': np.array([1, 227, 227, 18])},
+
+                             'concat_1_data': {'shape': np.array([1, 227, 227, 54]), 'is_output': True},
+                             })
+        graph.graph['layout'] = 'NHWC'
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'split_1'),
+                                 ('split_1', 'split_1_data'),
+                                 ('split_1', 'split_2_data'),
+                                 ('split_1', 'split_3_data'),
+                                 ('split_1', 'split_4_data'),
+                                 ('split_1_data', 'concat_1'),
+                                 ('split_3_data', 'concat_1'),
+                                 ('concat_1', 'concat_1_data')
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])},
+                                 'split_1': {'axis': 3},
+                                 'split_1_data': {'shape': np.array([1, 227, 227, 18])},
+                                 'split_2_data': {'shape': np.array([1, 227, 227, 9])},
+                                 'split_3_data': {'shape': np.array([1, 227, 227, 18])},
+                                 'split_4_data': {'shape': np.array([1, 227, 227, 9])},
+                                 'concat_1_data': {'shape': np.array([1, 227, 227, 54]), 'is_output': True},
+                                 })
+
+        pattern = ConvertGroupedStridedSlice()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    def test_7_neg(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'sslice_1'),
+                             ('sslice_1', 'sslice_1_data'),
+                             ('placeholder_1_data', 'sslice_2'),
+                             ('sslice_2', 'sslice_2_data'),
+                             ('sslice_1_data', 'concat_1'),
+                             ('sslice_2_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data')
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])},
+
+                             'sslice_1': {'slices': np.array(
+                                 [slice(0, 1, 1), slice(0, 10, 1), slice(0, 227, 1), slice(0, 18, 1)])},
+                             'sslice_1_data': {'shape': np.array([1, 10, 227, 18])},
+
+                             'sslice_2': {'slices': np.array(
+                                 [slice(0, 1, 1), slice(10, 227, 1), slice(0, 227, 1), slice(27, 45, 1)])},
+                             'sslice_2_data': {'shape': np.array([1, 217, 227, 18])},
+
+                             'concat_1_data': {'shape': np.array([1, 227, 227, 54]), 'is_output': True},
+                             })
+        graph.graph['layout'] = 'NHWC'
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'sslice_1'),
+                                 ('sslice_1', 'sslice_1_data'),
+                                 ('placeholder_1_data', 'sslice_2'),
+                                 ('sslice_2', 'sslice_2_data'),
+                                 ('sslice_1_data', 'concat_1'),
+                                 ('sslice_2_data', 'concat_1'),
+                                 ('concat_1', 'concat_1_data')
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])},
+
+                                 'sslice_1': {'slices': np.array(
+                                     [slice(0, 1, 1), slice(0, 10, 1), slice(0, 227, 1), slice(0, 18, 1)])},
+                                 'sslice_1_data': {'shape': np.array([1, 10, 227, 18])},
+
+                                 'sslice_2': {'slices': np.array(
+                                     [slice(0, 1, 1), slice(10, 227, 1), slice(0, 227, 1), slice(27, 45, 1)])},
+                                 'sslice_2_data': {'shape': np.array([1, 217, 227, 18])},
+
+                                 'concat_1_data': {'shape': np.array([1, 227, 227, 54]), 'is_output': True},
+                                 })
+
+        pattern = ConvertGroupedStridedSlice()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    # Split(1,54,W,C)
+    #       |`---->Sslice1_out (1,(0,18),W,C)
+    #       |`---->Sslice2_out (1,(18,36),W,C)
+    #       `----->Fake_data (1,(36,54),W,C)
+    def test_8(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'sslice_1'),
+                             ('sslice_1', 'sslice_1_data'),
+                             ('placeholder_1_data', 'sslice_2'),
+                             ('sslice_2', 'sslice_2_data'),
+                             ('sslice_1_data', 'concat_1'),
+                             ('sslice_2_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data')
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 54, 54, 3])},
+
+                             'sslice_1': {'slices': np.array(
+                                 [slice(0, 1, 1), slice(0, 18, 1), slice(0, 54, 1), slice(0, 3, 1)])},
+                             'sslice_1_data': {'shape': np.array([1, 18, 54, 3])},
+
+                             'sslice_2': {'slices': np.array(
+                                 [slice(0, 1, 1), slice(18, 36, 1), slice(0, 54, 1), slice(0, 3, 1)])},
+                             'sslice_2_data': {'shape': np.array([1, 18, 54, 3])},
+
+                             'concat_1_data': {'shape': np.array([1, 54, 54, 3]), 'is_output': True},
+                             })
+        graph.graph['layout'] = 'NHWC'
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'split_1'),
+                                 ('split_1', 'split_1_data'),
+                                 ('split_1', 'split_2_data'),
+                                 ('split_1', 'split_3_data'),
+                                 ('split_1_data', 'concat_1'),
+                                 ('split_3_data', 'concat_1'),
+                                 ('concat_1', 'concat_1_data')
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 54, 54, 3])},
+                                 'split_1': {'axis': 1},
+                                 'split_1_data': {'shape': np.array([1, 18, 54, 3])},
+                                 'split_2_data': {'shape': np.array([1, 18, 54, 3])},
+                                 'split_3_data': {'shape': np.array([1, 18, 54, 3])},
+                                 'concat_1_data': {'shape': np.array([1, 54, 54, 3]), 'is_output': True},
+                                 })
+
+        pattern = ConvertGroupedStridedSlice()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/model-optimizer/extensions/middle/EltwiseInputNormalization_test.py b/model-optimizer/extensions/middle/EltwiseInputNormalization_test.py
new file mode 100644 (file)
index 0000000..829b13b
--- /dev/null
@@ -0,0 +1,178 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.middle.EltwiseInputNormalization import EltwiseInputNormalize
+from extensions.middle.EltwiseInputReshape import EltwiseInputReshape
+from mo.middle.passes.eliminate_test import build_graph
+from mo.middle.passes.fusing.fuse_linear_ops_test import compare_graphs
+
+# The dictionary with nodes attributes used to build various graphs. A key is the name of the node and the value is the
+# dictionary with node attributes.
+nodes_attributes = {
+    # Placeholder layers
+    'placeholder_1': {'value': None, 'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+    'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'placeholder_3_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'placeholder_4_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+
+    # Reshape layers
+    'reshape_1': {'type': 'Reshape', 'value': None, 'kind': 'op', 'op': 'Reshape'},
+    'reshape_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+
+    'reshape_2': {'type': 'Reshape', 'value': None, 'kind': 'op', 'op': 'Reshape'},
+    'reshape_2_data': {'value': None, 'shape': None, 'kind': 'data'},
+
+    # Eltwise consumes layers
+    'eltwise_1': {'type': 'Eltwise', 'value': None, 'kind': 'op', 'op': 'Eltwise'},
+    'eltwise_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+
+    'eltwise_2': {'type': 'Eltwise', 'value': None, 'kind': 'op', 'op': 'Eltwise'},
+    'eltwise_2_data': {'value': None, 'shape': None, 'kind': 'data'},
+
+    'eltwise_3': {'type': 'Eltwise', 'value': None, 'kind': 'op', 'op': 'Eltwise'},
+    'eltwise_3_data': {'value': None, 'shape': None, 'kind': 'data'},
+
+    'eltwise_4': {'type': 'Eltwise', 'value': None, 'kind': 'op', 'op': 'Eltwise'},
+    'eltwise_4_data': {'value': None, 'shape': None, 'kind': 'data'},
+
+    # Concat
+    'concat': {'type': 'Concat', 'kind': 'op', 'op': 'Concat'},
+}
+
+
+class EltwiseInputNormalizationTest(unittest.TestCase):
+    def test1_not_constant(self):
+        #
+        #   data1(1,3,64,64)----.                                                   data(1,3,64,64)-------.
+        #   data2(1,64,1)-------->Eltwise-->data(1,3,64,64)   =>    data(1,64,1)->Reshape->data(1,1,64,1)-->Eltwise->...
+        #   data3(64,1)------'                                       data(64,1)->Reshape->data(1,1,64,1)-'
+        #
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1_data', 'eltwise_1'),
+                             ('placeholder_2_data', 'eltwise_1'),
+                             ('placeholder_3_data', 'eltwise_1'),
+                             ('eltwise_1', 'eltwise_1_data')
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 3, 64, 64])},
+                             'placeholder_2_data': {'shape': np.array([1, 64, 1])},
+                             'placeholder_3_data': {'shape': np.array([64, 1])},
+                             'eltwise_1_data': {'shape': np.array([1, 3, 64, 64])}
+                             }, nodes_with_edges_only=True)
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1_data', 'eltwise_1'),
+                                 ('placeholder_2_data', 'reshape_1'),
+                                 ('placeholder_3_data', 'reshape_2'),
+                                 ('reshape_1', 'reshape_1_data'),
+                                 ('reshape_2', 'reshape_2_data'),
+                                 ('reshape_1_data', 'eltwise_1'),
+                                 ('reshape_2_data', 'eltwise_1'),
+                                 ('eltwise_1', 'eltwise_1_data')
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 3, 64, 64])},
+                                 'reshape_1': {'dim': np.array([1, 1, 64, 1])},
+                                 'reshape_1_data': {'shape': np.array([1, 1, 64, 1])},
+                                 'reshape_2': {'dim': np.array([1, 1, 64, 1])},
+                                 'reshape_2_data': {'shape': np.array([1, 1, 64, 1])},
+                                 'eltwise_1_data': {'shape': np.array([1, 3, 64, 64])}
+                                 }, nodes_with_edges_only=True)
+
+        pattern = EltwiseInputNormalize()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'eltwise_1', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    def test_mega_hardcore(self):
+        #   ORIGINAL GRAPH
+        #
+        #   data1(1,3,64,64)---,->Eltwise1->data(1,3,64,64)-----,->Eltwise2->data(1,3,64,64)---,->Eltwise4->data(1,3,64,64)
+        #                     /\                               /\                             /\
+        #   data2(64,1)-----,-'--------------------------------'------------------------------'
+        #                  \/                                 /
+        #   data3(64,1)----`-->Eltwise3->data(64,1)----------'
+        #
+        #   REFERENCE GRAPH AFTER TRANSFORMATION
+        #
+        #   data1(1,3,64,64)---,->Eltwise1->data(1,3,64,64)-----,->Eltwise2->data(1,3,64,64)---,->Eltwise4->data(1,3,64,64)
+        #                     /\                               /\                              /\
+        #   data2(1,1,64,1)---'--------------------------------'-------------------------------'
+        #                                                     /
+        #   data4(64,1)-------,                        Reshape(1,1,64,1)
+        #                    \/                           |
+        #   data3(64,1)------`---->Eltwise3->data(64,1)---'
+        #
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1_data', 'eltwise_1'),
+                             ('placeholder_2_data', 'eltwise_1'),
+                             ('eltwise_1', 'eltwise_1_data'),
+                             ('eltwise_1_data', 'eltwise_2'),
+                             ('placeholder_2_data', 'eltwise_3'),
+                             ('placeholder_3_data', 'eltwise_3'),
+                             ('eltwise_3', 'eltwise_3_data'),
+                             ('eltwise_3_data', 'eltwise_2'),
+                             ('eltwise_2', 'eltwise_2_data'),
+                             ('eltwise_2_data', 'eltwise_4'),
+                             ('placeholder_2_data', 'eltwise_4'),
+                             ('eltwise_4', 'eltwise_4_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 3, 64, 64])},
+                             'placeholder_2_data': {'shape': np.array([64, 1]), 'value': np.ones([64, 1])},
+                             'placeholder_3_data': {'shape': np.array([64, 1])},
+                             'eltwise_1_data': {'shape': np.array([1, 3, 64, 64])},
+                             'eltwise_2_data': {'shape': np.array([1, 3, 64, 64])},
+                             'eltwise_3_data': {'shape': np.array([64, 1])},
+                             'eltwise_4_data': {'shape': np.array([1, 3, 64, 64])}
+                             }, nodes_with_edges_only=True)
+
+        graph_ref = build_graph(nodes_attributes,
+                            [('placeholder_1_data', 'eltwise_1'),
+                             ('placeholder_2_data', 'eltwise_1'),
+                             ('eltwise_1', 'eltwise_1_data'),
+                             ('eltwise_1_data', 'eltwise_2'),
+                             ('placeholder_4_data', 'eltwise_3'),
+                             ('placeholder_3_data', 'eltwise_3'),
+                             ('eltwise_3', 'eltwise_3_data'),
+                             ('eltwise_3_data', 'reshape_1'),
+                             ('reshape_1', 'reshape_1_data'),
+                             ('reshape_1_data', 'eltwise_2'),
+                             ('eltwise_2', 'eltwise_2_data'),
+                             ('eltwise_2_data', 'eltwise_4'),
+                             ('placeholder_2_data', 'eltwise_4'),
+                             ('eltwise_4', 'eltwise_4_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 3, 64, 64])},
+                             'placeholder_2_data': {'shape': np.array([1, 1, 64, 1]), 'value': np.ones([1, 1, 64, 1])},
+                             'placeholder_3_data': {'shape': np.array([64, 1])},
+                             'placeholder_4_data': {'shape': np.array([64, 1]), 'value': np.ones([64, 1])},
+                             'reshape_1': {'dim': np.array([1,1,64,1])},
+                             'reshape_1_data': {'shape': np.array([1,1,64,1])},
+                             'eltwise_1_data': {'shape': np.array([1, 3, 64, 64])},
+                             'eltwise_2_data': {'shape': np.array([1, 3, 64, 64])},
+                             'eltwise_3_data': {'shape': np.array([64, 1])},
+                             'eltwise_4_data': {'shape': np.array([1, 3, 64, 64])}
+                             }, nodes_with_edges_only=True)
+
+        pattern = EltwiseInputNormalize()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'eltwise_1', check_op_attrs=True)
+        self.assertTrue(flag, resp)
diff --git a/model-optimizer/extensions/middle/EltwiseInputReshape_test.py b/model-optimizer/extensions/middle/EltwiseInputReshape_test.py
new file mode 100644 (file)
index 0000000..24c727d
--- /dev/null
@@ -0,0 +1,225 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.middle.EltwiseInputReshape import EltwiseInputReshape
+from mo.middle.passes.eliminate_test import build_graph
+from mo.middle.passes.fusing.fuse_linear_ops_test import compare_graphs
+
+# The dictionary with nodes attributes used to build various graphs. A key is the name of the node and the value is the
+# dictionary with node attributes.
+nodes_attributes = {
+    # Placeholder layers
+    'placeholder_1': {'value': None, 'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+    'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'placeholder_3_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+
+    # Reshape layers
+    'reshape_1': {'type': 'Reshape', 'value': None, 'kind': 'op', 'op': 'Reshape'},
+    'reshape_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+
+    'reshape_2': {'type': 'Reshape', 'value': None, 'kind': 'op', 'op': 'Reshape'},
+    'reshape_2_data': {'value': None, 'shape': None, 'kind': 'data'},
+
+    # Fake consumes layers
+    'consumer_1': {'type': 'Consumer', 'value': None, 'kind': 'op', 'op': 'Consumer'},
+    'consumer_2': {'type': 'Consumer', 'value': None, 'kind': 'op', 'op': 'Consumer'},
+    'consumer_3': {'type': 'Consumer', 'value': None, 'kind': 'op', 'op': 'Consumer'},
+
+    # Concat
+    'concat': {'type': 'Concat', 'kind': 'op', 'op': 'Concat'},
+}
+
+
+class EltwiseInputReshapeTest(unittest.TestCase):
+    def test1_not_constant(self):
+        #        ,-------------->consumer3                 ,------------>consumer3
+        #   data---(new_shape1)-->consumer1      =>    data---->Reshape-->consumer1
+        #        `-(new_shape2)-->consumer2                 `-->Reshape-->consumer2
+        #
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1_data', 'consumer_1', {'new_shape': [1, 3, 1, 1]}),
+                             ('placeholder_1_data', 'consumer_2', {'new_shape': [1, 1, 3]}),
+                             ('placeholder_1_data', 'consumer_3'),
+                             ('consumer_1', 'concat'),
+                             ('consumer_2', 'concat'),
+                             ('consumer_3', 'concat'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 3])}}, nodes_with_edges_only=True)
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1_data', 'reshape_1'),
+                                 ('placeholder_1_data', 'reshape_2'),
+                                 ('placeholder_1_data', 'consumer_3'),
+                                 ('reshape_1', 'reshape_1_data'),
+                                 ('reshape_2', 'reshape_2_data'),
+                                 ('reshape_1_data', 'consumer_1'),
+                                 ('reshape_2_data', 'consumer_2'),
+                                 ('consumer_1', 'concat'),
+                                 ('consumer_2', 'concat'),
+                                 ('consumer_3', 'concat'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 3])},
+                                 'reshape_1': {'dim': np.array([1, 3, 1, 1])},
+                                 'reshape_1_data': {'shape': np.array([1, 3, 1, 1])},
+                                 'reshape_2': {'dim': np.array([1, 1, 3])},
+                                 'reshape_2_data': {'shape': np.array([1, 1, 3])},
+                                 }, nodes_with_edges_only=True)
+
+        pattern = EltwiseInputReshape()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    def test2_not_constant(self):
+        #        ,--------------->consumer3                ,----------->consumer3
+        #   data---(new_shape1)-->consumer1      =>    data-->Reshape-->consumer1
+        #        `-(new_shape1)-->consumer2                         `-->consumer2
+        #
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1_data', 'consumer_1', {'new_shape': [1, 3, 1, 1]}),
+                             ('placeholder_1_data', 'consumer_2', {'new_shape': [1, 3, 1, 1]}),
+                             ('placeholder_1_data', 'consumer_3'),
+                             ('consumer_1', 'concat'),
+                             ('consumer_2', 'concat'),
+                             ('consumer_3', 'concat'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 3])}}, nodes_with_edges_only=True)
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1_data', 'reshape_1'),
+                                 ('placeholder_1_data', 'consumer_3'),
+                                 ('reshape_1', 'reshape_1_data'),
+                                 ('reshape_1_data', 'consumer_1'),
+                                 ('reshape_1_data', 'consumer_2'),
+                                 ('consumer_1', 'concat'),
+                                 ('consumer_2', 'concat'),
+                                 ('consumer_3', 'concat'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 3])},
+                                 'reshape_1': {'dim': np.array([1, 3, 1, 1])},
+                                 'reshape_1_data': {'shape': np.array([1, 3, 1, 1])},
+                                 }, nodes_with_edges_only=True)
+
+        pattern = EltwiseInputReshape()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    def test3_constant(self):
+        #        ,--------------->consumer3            data-->consumer3
+        #   data---(new_shape1)-->consumer1      =>    data-->consumer1
+        #        `-(new_shape2)-->consumer2            data-->consumer2
+        #
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1_data', 'consumer_1', {'new_shape': [1, 3, 1, 1]}),
+                             ('placeholder_1_data', 'consumer_2', {'new_shape': [1, 1, 3]}),
+                             ('placeholder_1_data', 'consumer_3'),
+                             ('consumer_1', 'concat'),
+                             ('consumer_2', 'concat'),
+                             ('consumer_3', 'concat'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 3]), 'value': np.ones([1, 3])}},
+                            nodes_with_edges_only=True)
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1_data', 'consumer_1'),
+                                 ('placeholder_2_data', 'consumer_2'),
+                                 ('placeholder_3_data', 'consumer_3'),
+                                 ('consumer_1', 'concat'),
+                                 ('consumer_2', 'concat'),
+                                 ('consumer_3', 'concat'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 3, 1, 1]), 'value': np.ones([1, 3, 1, 1])},
+                                 'placeholder_2_data': {'shape': np.array([1, 1, 3]), 'value': np.ones([1, 1, 3])},
+                                 'placeholder_3_data': {'shape': np.array([1, 3]), 'value': np.ones([1, 3])},
+                                 }, nodes_with_edges_only=True)
+
+        pattern = EltwiseInputReshape()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    def test4_constant(self):
+        #        ,--------------->consumer3                 ,-->consumer3
+        #   data---(new_shape1)-->consumer1      =>    data-->consumer1
+        #        `-(new_shape2)-->consumer2                 `->consumer2
+        #
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1_data', 'consumer_1', {'new_shape': [3, 1, 1]}),
+                             ('placeholder_1_data', 'consumer_2', {'new_shape': [3, 1, 1]}),
+                             ('placeholder_1_data', 'consumer_3', {'new_shape': [3, 1, 1]}),
+                             ('consumer_1', 'concat'),
+                             ('consumer_2', 'concat'),
+                             ('consumer_3', 'concat'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 3]), 'value': np.ones([1, 3])}},
+                            nodes_with_edges_only=True)
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1_data', 'consumer_1'),
+                                 ('placeholder_1_data', 'consumer_2'),
+                                 ('placeholder_1_data', 'consumer_3'),
+                                 ('consumer_1', 'concat'),
+                                 ('consumer_2', 'concat'),
+                                 ('consumer_3', 'concat'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([3, 1, 1]), 'value': np.ones([3, 1, 1])}
+                                 }, nodes_with_edges_only=True)
+
+        pattern = EltwiseInputReshape()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    def test5_not_constant(self):
+        #        ,--------------->consumer3                ,->consumer3
+        #   data---(new_shape1)-->consumer1      =>    data----->consumer1
+        #        `-(new_shape1)-->consumer2                `-->consumer2
+        #
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1_data', 'consumer_1', {'new_shape': [1, 3]}),
+                             ('placeholder_1_data', 'consumer_2', {'new_shape': [1, 3]}),
+                             ('placeholder_1_data', 'consumer_3'),
+                             ('consumer_1', 'concat'),
+                             ('consumer_2', 'concat'),
+                             ('consumer_3', 'concat'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 3])}}, nodes_with_edges_only=True)
+
+        graph_ref = build_graph(nodes_attributes,
+                            [('placeholder_1_data', 'consumer_1', {'new_shape': [1, 3]}),
+                             ('placeholder_1_data', 'consumer_2', {'new_shape': [1, 3]}),
+                             ('placeholder_1_data', 'consumer_3'),
+                             ('consumer_1', 'concat'),
+                             ('consumer_2', 'concat'),
+                             ('consumer_3', 'concat'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 3])}}, nodes_with_edges_only=True)
+
+        pattern = EltwiseInputReshape()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
+        self.assertTrue(flag, resp)
diff --git a/model-optimizer/extensions/middle/FusePermutesSequence_test.py b/model-optimizer/extensions/middle/FusePermutesSequence_test.py
new file mode 100644 (file)
index 0000000..850cf17
--- /dev/null
@@ -0,0 +1,118 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.middle.FusePermutesSequence import FusePermutesSequence
+from mo.middle.passes.eliminate_test import build_graph
+from mo.middle.passes.fusing.fuse_linear_ops_test import compare_graphs
+
+# The dictionary with nodes attributes used to build various graphs. A key is the name of the node and the value is the
+# dictionary with node attributes.
+nodes_attributes = {
+    'placeholder_1': {'name': 'placeholder_1', 'value': None, 'shape': None, 'type': 'Placeholder', 'kind': 'op',
+                      'op': 'Placeholder'},
+    'placeholder_1_data': {'name': 'placeholder_1_data', 'value': None, 'shape': None, 'kind': 'data',
+                           'data_type': None},
+    # Permute layers
+    'permute_1': {'type': 'Permute', 'value': None, 'kind': 'op', 'op': 'Permute'},
+    'permute_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+
+    'permute_2': {'type': 'Permute', 'value': None, 'kind': 'op', 'op': 'Permute'},
+    'permute_2_data': {'value': None, 'shape': None, 'kind': 'data'},
+
+    'permute_3': {'type': 'Permute', 'value': None, 'kind': 'op', 'op': 'Permute'},
+    'permute_3_data': {'value': None, 'shape': None, 'kind': 'data'},
+}
+
+
+class FusePermutesSequenceTest(unittest.TestCase):
+    def test_1(self):
+        #
+        #    NHWC         NCHW           NHWC
+        #   Input->DATA->Permute->DATA->Permute->DATA  => Input->DATA
+        #
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'permute_1'),
+                             ('permute_1', 'permute_1_data'),
+                             ('permute_1_data', 'permute_2'),
+                             ('permute_2', 'permute_2_data')
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+
+                             'permute_1': {'order': np.array([0, 3, 1, 2])},
+                             'permute_1_data': {'shape': np.array([1, 3, 227, 227])},
+
+                             'permute_2': {'order': np.array([0, 2, 3, 1])},
+                             'permute_2_data': {'shape': np.array([1, 227, 227, 3]), 'is_output': True},
+                             }, nodes_with_edges_only=True)
+
+        graph.graph['layout'] = 'NHWC'
+
+        graph_ref = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data')],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}}, nodes_with_edges_only=True)
+
+        pattern = FusePermutesSequence()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    def test_2(self):
+        #
+        #   Input->DATA->Permute->DATA->Permute->DATA  => Input->DATA->Permute->DATA
+        #
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'permute_1'),
+                             ('permute_1', 'permute_1_data'),
+                             ('permute_1_data', 'permute_2'),
+                             ('permute_2', 'permute_2_data')
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+
+                             'permute_1': {'order': np.array([0, 3, 1, 2])},
+                             'permute_1_data': {'shape': np.array([1, 3, 227, 227])},
+
+                             'permute_2': {'order': np.array([0, 1, 2, 3])},
+                             'permute_2_data': {'shape': np.array([1, 3, 227, 227]), 'is_output': True},
+                             }, nodes_with_edges_only=True)
+
+        graph.graph['layout'] = 'NHWC'
+
+        graph_ref = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'permute_1'),
+                             ('permute_1', 'permute_1_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'permute_1': {'order': np.array([0, 3, 1, 2])},
+                             'permute_1_data': {'shape': np.array([1, 3, 227, 227])},
+                             }, nodes_with_edges_only=True)
+
+        pattern = FusePermutesSequence()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/model-optimizer/extensions/middle/MinumumMiddleReplacer_test.py b/model-optimizer/extensions/middle/MinumumMiddleReplacer_test.py
new file mode 100644 (file)
index 0000000..eb04cda
--- /dev/null
@@ -0,0 +1,84 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.middle.MinimumMiddleReplacer import MinimumMiddleReplacer
+from mo.utils.unittest.graph import build_graph, compare_graphs
+
+nodes_attributes = {
+    'placeholder_1': {'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+    'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+
+    'placeholder_2': {'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+    'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    # minimum node:
+    'minimum': {'type': 'Minimum', 'kind': 'op', 'op': 'Minimum'},
+    # negates
+    'negate_1': {'type': 'Power', 'kind': 'op', 'op': 'Power', 'power': 1, 'scale': -1, 'shift': 0},
+    'negate_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+
+    'negate_2': {'type': 'Power', 'kind': 'op', 'op': 'Power', 'power': 1, 'scale': -1, 'shift': 0},
+    'negate_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+
+    'negate_output': {'type': 'Power', 'kind': 'op', 'op': 'Power', 'power': 1, 'scale': -1, 'shift': 0},
+    'negate_output_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+
+    # Maximum
+    'maximum': {'type': 'Eltwise', 'kind': 'op', 'op': 'Max'},
+    'maximum_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    # output
+    'output_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+}
+
+
+class MinumumMiddleReplacer_test(unittest.TestCase):
+    def test_1(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_2', 'placeholder_2_data'),
+                             ('placeholder_1_data', 'minimum'),
+                             ('placeholder_2_data', 'minimum'),
+                             ('minimum', 'output_data')
+                             ],
+                            {'placeholder_1_data': {'value': 3, 'shape': np.array([])},
+                             'placeholder_2_data': {'value': None, 'shape': np.array([5, 5])},
+                             })
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_2', 'placeholder_2_data'),
+                                 ('placeholder_1_data', 'negate_1'),
+                                 ('placeholder_2_data', 'negate_2'),
+                                 ('negate_1', 'negate_1_data'),
+                                 ('negate_2', 'negate_2_data'),
+                                 ('negate_1_data', 'maximum'),
+                                 ('negate_2_data', 'maximum'),
+                                 ('maximum', 'maximum_data'),
+                                 ('maximum_data', 'negate_output'),
+                                 ('negate_output', 'negate_output_data')
+                                 ])
+
+        graph.graph['layout'] = 'NHWC'
+
+        tested_class = MinimumMiddleReplacer()
+        tested_class.find_and_replace_pattern(graph=graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'minimum/negate_out_', last_node_ref='negate_output',
+                                      check_op_attrs=True)
+        self.assertTrue(flag, resp)
diff --git a/model-optimizer/extensions/middle/NormalizeFullyConnected_test.py b/model-optimizer/extensions/middle/NormalizeFullyConnected_test.py
new file mode 100644 (file)
index 0000000..de6a73a
--- /dev/null
@@ -0,0 +1,119 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.middle.FusePermutesSequence import FusePermutesSequence
+from extensions.middle.NormalizeFullyConnected import NormalizeFullyConnected
+from mo.middle.passes.eliminate_test import build_graph
+from mo.middle.passes.fusing.fuse_linear_ops_test import compare_graphs
+
+# The dictionary with nodes attributes used to build various graphs. A key is the name of the node and the value is the
+# dictionary with node attributes.
+nodes_attributes = {
+    'placeholder_1': {'name': 'placeholder_1', 'value': None, 'shape': None, 'type': 'Placeholder', 'kind': 'op',
+                      'op': 'Placeholder'},
+    'placeholder_1_data': {'name': 'placeholder_1_data', 'value': None, 'shape': None, 'kind': 'data',
+                           'data_type': None},
+    'reshape_1': {'type': 'Reshape', 'value': None, 'kind': 'op', 'op': 'Reshape'},
+    'reshape_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+
+    'fc': {'type': 'FullyConnected', 'value': None, 'kind': 'op', 'op': 'MatMul'},
+    'fc_data': {'value': None, 'shape': None, 'kind': 'data'},
+    'fc_weights': {'value': None, 'shape': None, 'kind': 'data'},
+
+    'reshape_2': {'type': 'Reshape', 'value': None, 'kind': 'op', 'op': 'Reshape'},
+    'reshape_2_data': {'value': None, 'shape': None, 'kind': 'data'},
+}
+
+
+class NormalizeFullyConnectedTest(unittest.TestCase):
+    def test_1(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'fc'),
+                             ('fc_weights', 'fc'),
+                             ('fc', 'fc_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 16, 512])},
+                             'fc': {'out-size': 101},
+                             'fc_weights': {'shape': np.array([512,101]), 'value': np.ones([512, 101]), 'input_channel_dim': 1},
+                             'fc_data': {'shape': np.array([1, 16, 101])},
+                             }, nodes_with_edges_only=True)
+
+        graph_ref = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'reshape_1'),
+                             ('reshape_1', 'reshape_1_data'),
+                             ('reshape_1_data', 'fc'),
+                             ('fc_weights', 'fc'),
+                             ('fc', 'fc_data'),
+                             ('fc_data', 'reshape_2'),
+                             ('reshape_2', 'reshape_2_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 16, 512])},
+                             'reshape_1_data': {'shape': np.array([16, 512])},
+                             'reshape_2_data': {'shape': np.array([1, 16, 101])},
+                             'fc_weights': {'shape': np.array([512,101]), 'value': np.ones([512, 101])},
+                             'fc': {'out-size': 101},
+                             'fc_data': {'shape': np.array([16, 101])},
+                             }, nodes_with_edges_only=True)
+
+        pattern = NormalizeFullyConnected()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1_data', 'placeholder_1_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+
+    def test_2(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'fc'),
+                             ('fc_weights', 'fc'),
+                             ('fc', 'fc_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([2, 32, 16, 512])},
+                             'fc': {'out-size': 101},
+                             'fc_weights': {'shape': np.array([512,101]), 'value': np.ones([512, 101]), 'input_channel_dim': 1},
+                             'fc_data': {'shape': np.array([2, 32, 16, 101])},
+                             }, nodes_with_edges_only=True)
+
+        graph_ref = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'reshape_1'),
+                             ('reshape_1', 'reshape_1_data'),
+                             ('reshape_1_data', 'fc'),
+                             ('fc_weights', 'fc'),
+                             ('fc', 'fc_data'),
+                             ('fc_data', 'reshape_2'),
+                             ('reshape_2', 'reshape_2_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([2, 32, 16, 512])},
+                             'reshape_1_data': {'shape': np.array([2 * 32 * 16, 512])},
+                             'reshape_2_data': {'shape': np.array([2, 32, 16, 101])},
+                             'fc_weights': {'shape': np.array([512,101]), 'value': np.ones([512, 101])},
+                             'fc': {'out-size': 101},
+                             'fc_data': {'shape': np.array([2 * 32 * 16, 101])},
+                             }, nodes_with_edges_only=True)
+
+        pattern = NormalizeFullyConnected()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1_data', 'placeholder_1_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
diff --git a/model-optimizer/extensions/middle/PixelLinkReshape_test.py b/model-optimizer/extensions/middle/PixelLinkReshape_test.py
new file mode 100644 (file)
index 0000000..e281f60
--- /dev/null
@@ -0,0 +1,110 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.middle.PixelLinkReshape import PixelLinkReshape
+from mo.utils.unittest.graph import build_graph, compare_graphs
+
+nodes_attributes = {
+    'placeholder_1': {'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+    'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    # Reshape layers
+    'reshape_pack': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},
+    'reshape_pack_data': {'value': None, 'shape': None, 'kind': 'data'},
+    'reshape_split': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},
+    'reshape_split_data': {'value': None, 'shape': None, 'kind': 'data'},
+    'reshape_unpack': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},
+    'reshape_unpack_data': {'value': None, 'shape': None, 'kind': 'data'},
+    'strided_slice': {'type': 'StridedSlice', 'kind': 'op', 'op': 'StridedSlice'},
+    'strided_slice_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # Transpose layer
+    'reshape_split/Permute_before': {'type': 'Permute', 'kind': 'op', 'op': 'Permute'},
+    'reshape_split/Permute_before_data': {'value': None, 'shape': None, 'kind': 'data'},
+    'reshape_pack/Permute_after': {'type': 'Permute', 'kind': 'op', 'op': 'Permute'},
+    'reshape_pack/Permute_after_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # Softmax layer
+    'softmax_1': {'type': 'SoftMax', 'kind': 'op', 'op': 'SoftMax'},
+    'softmax_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+}
+
+
+class ReshapeSoftmaxReshapeTests(unittest.TestCase):
+    def test_1(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'reshape_split'),
+                             ('reshape_split', 'reshape_split_data'),
+                             ('reshape_split_data', 'reshape_pack'),
+                             ('reshape_pack', 'reshape_pack_data'),
+                             ('reshape_pack_data', 'softmax_1'),
+                             ('softmax_1', 'softmax_1_data'),
+                             ('softmax_1_data', 'reshape_unpack'),
+                             ('reshape_unpack', 'reshape_unpack_data'),
+                             ('reshape_unpack_data', 'strided_slice'),
+                             ('strided_slice', 'strided_slice_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 16])},
+                             'reshape_split': {'dim': np.array([1, 227, 227, 8, 2])},
+                             'reshape_split_data': {'shape': np.array([1, 227, 227, 8, 2])},
+                             'softmax_1_data': {'shape': np.array([1 * 227 * 227 * 8, 2])},
+                             'reshape_pack': {'dim': np.array([1 * 227 * 227 * 8, 2])},
+                             'reshape_pack_data': {'shape': np.array([1 * 227 * 227 * 8, 2])},
+                             'reshape_unpack': {'dim': np.array([1, 227, 227, 8, 2])},
+                             'reshape_unpack_data': {'shape': np.array([1, 227, 227, 8, 2])},
+                             'strided_slice': {
+                                 'slices': [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(0, 8, 1),
+                                            slice(1, 2, 1)],
+                                 'shrink_axis_mask': [False, False, False, False, True],
+                                 'new_axis_mask': [False, False, False, False, False]},
+                             'strided_slice_data': {'shape': np.array([1, 227, 227, 8])},
+                             })
+        graph.graph['layout'] = 'NHWC'
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'reshape_split/Permute_before'),
+                                 ('reshape_split/Permute_before', 'reshape_split/Permute_before_data'),
+                                 ('reshape_split/Permute_before_data', 'reshape_split'),
+                                 ('reshape_split', 'reshape_split_data'),
+                                 ('reshape_split_data', 'reshape_pack'),
+                                 ('reshape_pack', 'reshape_pack/Permute_after_data'),
+                                 ('reshape_pack/Permute_after_data', 'reshape_pack/Permute_after'),
+                                 ('reshape_pack/Permute_after', 'reshape_pack_data'),
+                                 ('reshape_pack_data', 'softmax_1'),
+                                 ('softmax_1', 'softmax_1_data'),
+                                 ('softmax_1_data', 'strided_slice'),
+                                 ('strided_slice', 'reshape_unpack_data'),
+                                 ('reshape_unpack_data', 'reshape_unpack'),
+                                 ('reshape_unpack', 'strided_slice_data')
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 16])},
+                                 'reshape_split/Permute_before_data': {'shape': np.array([1, 227, 16, 227])},
+                                 'reshape_split_data': {'shape': np.array([1, 227, 227, 8, 2])},
+                                 'reshape_pack_data': {'shape': np.array([1, 2, 1 * 227 * 227 * 8])},
+                                 'reshape_pack/Permute_after_data': {'shape': np.array([1, 227 * 227 * 8, 2])},
+                                 'softmax_1_data': {'shape': np.array([1, 2, 1 * 227 * 227 * 8])},
+                                 'reshape_unpack_data': {'shape': np.array([1, 1, 227 * 227 * 8])},
+                                 'strided_slice_data': {'shape': np.array([1, 227, 227, 8])}
+                                 })
+
+        pattern = PixelLinkReshape()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'strided_slice_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
diff --git a/model-optimizer/extensions/middle/Reduce_test.py b/model-optimizer/extensions/middle/Reduce_test.py
new file mode 100644 (file)
index 0000000..1925df1
--- /dev/null
@@ -0,0 +1,320 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.middle.Reduce import ReduceReplacer
+from mo.middle.passes.eliminate_test import build_graph
+from mo.middle.passes.fusing.fuse_linear_ops_test import compare_graphs
+
+# The dictionary with nodes attributes used to build various graphs. A key is the name of the node and the value is the
+# dictionary with node attributes.
+nodes_attributes = {
+    # Placeholder layers
+    'placeholder_1': {'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+    'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'placeholder_3_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'placeholder_4_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+
+    # Reshape layers
+    'reduce_1': {'type': 'Reduce', 'kind': 'op', 'op': 'Reduce'},
+    'reduce_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+
+    # Reshape layers
+    'reshape_1': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},
+    'reshape_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+
+    'reshape_2': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},
+    'reshape_2_data': {'value': None, 'shape': None, 'kind': 'data'},
+
+    # Pooling
+    'pooling': {'type': 'Pooling', 'kind': 'op', 'op': 'Pooling'},
+    'pooling_data': {'value': None, 'shape': None, 'kind': 'data'},
+
+    # Power
+    'power': {'type': 'Power', 'kind': 'op', 'op': 'Power'},
+    'power_data': {'value': None, 'shape': None, 'kind': 'data'},
+
+    # Concat
+    'concat': {'type': 'Concat', 'kind': 'op', 'op': 'Concat'},
+}
+
+
+class ReduceReplacerTest(unittest.TestCase):
+    def test1(self):
+        #   Original graph
+        #   data(1,64,1)-->Reduce(axis=1,keep_dims=True)-->data(1,1,1)
+        #
+        #   Reference graph
+        #   data(1,61,1)->Reshape(1,1,64,1)->Pool(1,1,1,1)->Reshape(1,1,1)
+        #
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1_data', 'reduce_1'),
+                             ('reduce_1', 'reduce_1_data'),
+                             ('reduce_1_data', 'concat'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 64, 1])},
+                             'reduce_1': {'axis': np.array([1]), 'keep_dims': True, 'reduce_type': 'Mean'},
+                             'reduce_1_data': {'shape': np.array([1, 1, 1])},
+                             }, nodes_with_edges_only=True)
+
+        graph.graph['layout'] = 'NCHW'
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1_data', 'reshape_1'),
+                                 ('reshape_1', 'reshape_1_data'),
+                                 ('reshape_1_data', 'pooling'),
+                                 ('pooling', 'pooling_data'),
+                                 ('pooling_data', 'reshape_2'),
+                                 ('reshape_2', 'reshape_2_data'),
+                                 ('reshape_2_data', 'concat'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 64, 1])},
+                                 'reshape_1': {'dim': np.array([1, 1, 64, 1])},
+                                 'reshape_1_data': {'shape': np.array([1, 1, 64, 1])},
+                                 'pooling': {'window': np.array([1, 1, 64, 1])},
+                                 'pooling_data': {'shape': np.array([1, 1, 1, 1])},
+                                 'reshape_2': {'dim': np.array([1, 1, 1])},
+                                 'reshape_2_data': {'shape': np.array([1, 1, 1])},
+                                 }, nodes_with_edges_only=True)
+
+        pattern = ReduceReplacer()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    def test2(self):
+        #   Original graph
+        #   data(1,3,64,64)-->Reduce(axis=2,keep_dims=True)-->data(1,3,1,64)
+        #
+        #   Reference graph
+        #   data(1,3,64,64)->Reshape->Pool(1,3,1,64)->Reshape(1,3,1,64)
+        #
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1_data', 'reduce_1'),
+                             ('reduce_1', 'reduce_1_data'),
+                             ('reduce_1_data', 'concat'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 3, 64, 64])},
+                             'reduce_1': {'axis': np.array([2]), 'keep_dims': True, 'reduce_type': 'Mean'},
+                             'reduce_1_data': {'shape': np.array([1, 3, 1, 64])},
+                             }, nodes_with_edges_only=True)
+
+        graph.graph['layout'] = 'NCHW'
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1_data', 'reshape_1'),
+                                 ('reshape_1', 'reshape_1_data'),
+                                 ('reshape_1_data', 'pooling'),
+                                 ('pooling', 'pooling_data'),
+                                 ('pooling_data', 'reshape_2'),
+                                 ('reshape_2', 'reshape_2_data'),
+                                 ('reshape_2_data', 'concat'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 3, 64, 64])},
+                                 'reshape_1': {'dim': np.array([1, 3, 64, 64])},
+                                 'reshape_1_data': {'shape': np.array([1, 3, 64, 64])},
+                                 'pooling': {'window': np.array([1, 1, 64, 1])},
+                                 'pooling_data': {'shape': np.array([1, 3, 1, 64])},
+                                 'reshape_2': {'dim': np.array([1, 3, 1, 64])},
+                                 'reshape_2_data': {'shape': np.array([1, 3, 1, 64])},
+                                 }, nodes_with_edges_only=True)
+
+        pattern = ReduceReplacer()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    def test3(self):
+        #   Original graph
+        #   data(1,3,64,64)-->Reduce(axis=[2,3],keep_dims=True)-->data(1,3,1,1)
+        #
+        #   Reference graph
+        #   data(1,3,64,64)->Reshape->Pool(1,3,1,1)->Reshape(1,3,1,1)
+        #
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1_data', 'reduce_1'),
+                             ('reduce_1', 'reduce_1_data'),
+                             ('reduce_1_data', 'concat'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 3, 64, 64])},
+                             'reduce_1': {'axis': np.array([2, 3]), 'keep_dims': True, 'reduce_type': 'Mean'},
+                             'reduce_1_data': {'shape': np.array([1, 3, 1, 1])},
+                             }, nodes_with_edges_only=True)
+
+        graph.graph['layout'] = 'NCHW'
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1_data', 'reshape_1'),
+                                 ('reshape_1', 'reshape_1_data'),
+                                 ('reshape_1_data', 'pooling'),
+                                 ('pooling', 'pooling_data'),
+                                 ('pooling_data', 'reshape_2'),
+                                 ('reshape_2', 'reshape_2_data'),
+                                 ('reshape_2_data', 'concat'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 3, 64, 64])},
+                                 'reshape_1': {'dim': np.array([1, 3, 64 * 64, 1])},
+                                 'reshape_1_data': {'shape': np.array([1, 3, 64 * 64, 1])},
+                                 'pooling': {'window': np.array([1, 1, 64 * 64, 1])},
+                                 'pooling_data': {'shape': np.array([1, 3, 1, 1])},
+                                 'reshape_2': {'dim': np.array([1, 3, 1, 1])},
+                                 'reshape_2_data': {'shape': np.array([1, 3, 1, 1])},
+                                 }, nodes_with_edges_only=True)
+
+        pattern = ReduceReplacer()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    def test4(self):
+        #   Original graph
+        #   data(2,3,64,64)-->Reduce(axis=[1,2,3],keep_dims=False)-->data(2)
+        #
+        #   Reference graph
+        #   data(2,3,64,64)->Reshape(2,1,3*64*64,1)->Pool(2,1,1,1)->Reshape(2)
+        #
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1_data', 'reduce_1'),
+                             ('reduce_1', 'reduce_1_data'),
+                             ('reduce_1_data', 'concat'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([2, 3, 64, 64])},
+                             'reduce_1': {'axis': np.array([1, 2, 3]), 'keep_dims': False, 'reduce_type': 'Mean'},
+                             'reduce_1_data': {'shape': np.array([2])},
+                             }, nodes_with_edges_only=True)
+
+        graph.graph['layout'] = 'NCHW'
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1_data', 'reshape_1'),
+                                 ('reshape_1', 'reshape_1_data'),
+                                 ('reshape_1_data', 'pooling'),
+                                 ('pooling', 'pooling_data'),
+                                 ('pooling_data', 'reshape_2'),
+                                 ('reshape_2', 'reshape_2_data'),
+                                 ('reshape_2_data', 'concat'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([2, 3, 64, 64])},
+                                 'reshape_1': {'dim': np.array([2, 1, 3 * 64 * 64, 1])},
+                                 'reshape_1_data': {'shape': np.array([2, 1, 3 * 64 * 64, 1])},
+                                 'pooling': {'window': np.array([1, 1, 3 * 64 * 64, 1])},
+                                 'pooling_data': {'shape': np.array([2, 1, 1, 1])},
+                                 'reshape_2': {'dim': np.array([2])},
+                                 'reshape_2_data': {'shape': np.array([2])},
+                                 }, nodes_with_edges_only=True)
+
+        pattern = ReduceReplacer()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    def test5(self):
+        #   Original graph
+        #   data(1, 16, 64, 64, 64, 4)-->Reduce(axis=[5],keep_dims=False)-->data(1, 16, 64, 64, 64)
+        #
+        #   Reference graph
+        #   data(1, 16, 64, 64, 64, 4)->Reshape(1*16*64*64, 64, 4, 1)->Pool(1, 1, 4, 1)->Reshape(1, 16, 64, 64, 64)
+        #
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1_data', 'reduce_1'),
+                             ('reduce_1', 'reduce_1_data'),
+                             ('reduce_1_data', 'concat'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 16, 64, 64, 64, 4])},
+                             'reduce_1': {'axis': np.array([5]), 'keep_dims': False, 'reduce_type': 'max'},
+                             'reduce_1_data': {'shape': np.array([1, 16, 64, 64, 64])},
+                             }, nodes_with_edges_only=True)
+
+        graph.graph['layout'] = 'NCHW'
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1_data', 'reshape_1'),
+                                 ('reshape_1', 'reshape_1_data'),
+                                 ('reshape_1_data', 'pooling'),
+                                 ('pooling', 'pooling_data'),
+                                 ('pooling_data', 'reshape_2'),
+                                 ('reshape_2', 'reshape_2_data'),
+                                 ('reshape_2_data', 'concat'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 16, 64, 64, 64, 4])},
+                                 'reshape_1': {'dim': np.array([65536, 64, 4, 1])},
+                                 'reshape_1_data': {'shape': np.array([65536, 64, 4, 1])},
+                                 'pooling': {'window': np.array([1, 1, 4, 1])},
+                                 'pooling_data': {'shape': np.array([65536, 64, 1, 1])},
+                                 'reshape_2': {'dim': np.array([1, 16, 64, 64, 64])},
+                                 'reshape_2_data': {'shape': np.array([1, 16, 64, 64, 64])},
+                                 }, nodes_with_edges_only=True)
+
+        pattern = ReduceReplacer()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    def test6(self):
+        #   Original graph
+        #   data(1,64,1)-->Reduce(axis=-2,keep_dims=True, reduce_type=Sum)-->data(1,1,1)
+        #
+        #   Reference graph
+        #   data(1,61,1)->Reshape(1,1,64,1)->Pool(1,1,1,1)->Reshape(1,1,1)->Power(scale=64)
+        #
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1_data', 'reduce_1'),
+                             ('reduce_1', 'reduce_1_data'),
+                             ('reduce_1_data', 'concat'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 64, 1])},
+                             'reduce_1': {'axis': np.array([-2]), 'keep_dims': True, 'reduce_type': 'Sum'},
+                             'reduce_1_data': {'shape': np.array([1, 1, 1])},
+                             }, nodes_with_edges_only=True)
+
+        graph.graph['layout'] = 'NCHW'
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1_data', 'reshape_1'),
+                                 ('reshape_1', 'reshape_1_data'),
+                                 ('reshape_1_data', 'pooling'),
+                                 ('pooling', 'pooling_data'),
+                                 ('pooling_data', 'reshape_2'),
+                                 ('reshape_2', 'reshape_2_data'),
+                                 ('reshape_2_data', 'power'),
+                                 ('power', 'power_data'),
+                                 ('power_data', 'concat'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 64, 1])},
+                                 'reshape_1': {'dim': np.array([1, 1, 64, 1])},
+                                 'reshape_1_data': {'shape': np.array([1, 1, 64, 1])},
+                                 'pooling': {'window': np.array([1, 1, 64, 1])},
+                                 'pooling_data': {'shape': np.array([1, 1, 1, 1])},
+                                 'reshape_2': {'dim': np.array([1, 1, 1])},
+                                 'reshape_2_data': {'shape': np.array([1, 1, 1])},
+                                 'power': {'scale': 64.0},
+                                 'power_data': {'shape': np.array([1, 1, 1])},
+                                 }, nodes_with_edges_only=True)
+
+        pattern = ReduceReplacer()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True)
+        self.assertTrue(flag, resp)
diff --git a/model-optimizer/extensions/middle/ShuffleChannel_test.py b/model-optimizer/extensions/middle/ShuffleChannel_test.py
new file mode 100644 (file)
index 0000000..4b1e7e4
--- /dev/null
@@ -0,0 +1,53 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+import unittest
+
+from extensions.middle.ShuffleChannel import ShuffleChannel
+from mo.utils.unittest.graph import build_graph_with_attrs, compare_graphs
+
+
+class ShuffleChannelTests(unittest.TestCase):
+    def test_1(self):
+        graph = build_graph_with_attrs(
+                    nodes_with_attrs=[('data', {'shape': [1, 10, 128, 128], 'kind': 'data'}),
+                                      ('shuffle', {'type': 'ShuffleChannel', 'kind': 'op', 'op': 'ShuffleChannel', 'group': 2}),
+                                      ('out_data', {'shape': [1, 10, 128, 128], 'kind': 'data'}),
+                                      ],
+                    edges_with_attrs=[('data', 'shuffle'), ('shuffle', 'out_data')]
+                )
+        graph.graph['layout'] = 'NCHW'
+
+        graph_ref = build_graph_with_attrs(
+                        nodes_with_attrs=[('data', {'shape': [1, 10, 128, 128], 'kind': 'data'}),
+                                          ('split', {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape', 'dim': [1, 2, 5, -1]}),
+                                          ('split_data', {'shape': [1, 2, 5, 128*128], 'kind': 'data'}),
+                                          ('transpose', {'type': 'Permute', 'kind': 'op', 'op': 'Permute', 'order': [0, 2, 1, 3]}),
+                                          ('transpose_data', {'shape': [1, 5, 2, 128*128], 'kind': 'data'}),
+                                          ('concat', {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape', 'dim': [1, 10, 128, 128]}),
+                                          ('out_data', {'shape': [1, 10, 128, 128], 'kind': 'data'}),
+                                          ],
+                        edges_with_attrs=[('data', 'split'),
+                                          ('split', 'split_data'),
+                                          ('split_data', 'transpose'),
+                                          ('transpose', 'transpose_data'),
+                                          ('transpose_data', 'concat'),
+                                          ('concat', 'out_data')],
+                    )
+        pattern = ShuffleChannel()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'out_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
diff --git a/model-optimizer/extensions/middle/ShufflenetReshape_test.py b/model-optimizer/extensions/middle/ShufflenetReshape_test.py
new file mode 100644 (file)
index 0000000..d75c83d
--- /dev/null
@@ -0,0 +1,200 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.middle.ShufflenetReshape import FeatureShuffleReshape, ReshapeSoftmaxReshape
+from mo.utils.unittest.graph import build_graph, compare_graphs
+
+nodes_attributes = {
+    'placeholder_1': {'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+    'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    # Reshape layers
+    'reshape_1': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape', 'dim': None},
+    'reshape_1_data': {'name': 'reshape_1_data', 'value': None, 'shape': None, 'kind': 'data'},
+    'reshape_2': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},
+    'reshape_2_data': {'name': 'reshape_2_data', 'value': None, 'shape': None, 'kind': 'data'},
+    'reshape_3': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},
+    'reshape_3_data': {'name': 'reshape_3_data', 'value': None, 'shape': None, 'kind': 'data'},
+    # Transpose layer
+    'transpose_1': {'type': 'Permute', 'kind': 'op', 'op': 'Transpose'},
+    'transpose_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # Softmax layer
+    'softmax_1': {'type': 'SoftMax', 'kind': 'op', 'op': 'SoftMax'},
+    'softmax_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+}
+
+
+class FeatureShuffleReshapeTests(unittest.TestCase):
+    def test_1(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'reshape_1'),
+                             ('reshape_1', 'reshape_1_data'),
+                             ('reshape_1_data', 'transpose_1'),
+                             ('transpose_1', 'transpose_1_data'),
+                             ('transpose_1_data', 'reshape_2'),
+                             ('reshape_2', 'reshape_2_data')
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 112])},
+                             'reshape_1_data': {'shape': np.array([227, 227, 4, 28])},
+                             'transpose_1': {'order': np.array([0, 1, 3, 2])},
+                             'transpose_1_data': {'shape': np.array([227, 227, 28, 4])},
+                             'reshape_2_data': {'shape': np.array([1, 227, 227, 112])},
+                             })
+        graph.graph['layout'] = 'NHWC'
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'reshape_1'),
+                                 ('reshape_1', 'reshape_1_data'),
+                                 ('reshape_1_data', 'transpose_1'),
+                                 ('transpose_1', 'transpose_1_data'),
+                                 ('transpose_1_data', 'reshape_2'),
+                                 ('reshape_2', 'reshape_2_data')
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 112])},
+                                 'reshape_1_data': {'shape': np.array([1, 4, 28, 227 * 227])},
+                                 'transpose_1': {'order': np.array([0, 2, 1, 3])},
+                                 'transpose_1_data': {'shape': np.array([1, 28, 4, 227 * 227])},
+                                 'reshape_2_data': {'shape': np.array([1, 227, 227, 112])},
+                                 'reshape_3_data': {'shape': np.array([1, 227, 227, 112])},
+                                 })
+
+        pattern = FeatureShuffleReshape()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'reshape_2_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    def test_2(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'reshape_1'),
+                             ('reshape_1', 'reshape_1_data'),
+                             ('reshape_1_data', 'transpose_1'),
+                             ('transpose_1', 'transpose_1_data'),
+                             ('transpose_1_data', 'reshape_2'),
+                             ('reshape_2', 'reshape_2_data')
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 112, 227, 227])},
+                             'reshape_1_data': {'shape': np.array([1, 4, 28, 227, 227])},
+                             'transpose_1': {'order': np.array([0, 2, 1, 3, 4])},
+                             'transpose_1_data': {'shape': np.array([1, 28, 4, 227, 227])},
+                             'reshape_2_data': {'shape': np.array([1, 112, 227, 227])},
+                             })
+        graph.graph['layout'] = 'NCHW'
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'reshape_1'),
+                                 ('reshape_1', 'reshape_1_data'),
+                                 ('reshape_1_data', 'transpose_1'),
+                                 ('transpose_1', 'transpose_1_data'),
+                                 ('transpose_1_data', 'reshape_2'),
+                                 ('reshape_2', 'reshape_2_data')
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 112, 227, 227])},
+                                 'reshape_1_data': {'shape': np.array([1, 4, 28, 227 * 227])},
+                                 'transpose_1': {'order': np.array([0, 2, 1, 3])},
+                                 'transpose_1_data': {'shape': np.array([1, 28, 4, 227 * 227])},
+                                 'reshape_2_data': {'shape': np.array([1, 112, 227, 227])},
+                                 })
+
+        pattern = FeatureShuffleReshape()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'reshape_2_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+
+class ReshapeSoftmaxReshapeTests(unittest.TestCase):
+    def test_1(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'reshape_1'),
+                             ('reshape_1', 'reshape_1_data'),
+                             ('reshape_1_data', 'softmax_1'),
+                             ('softmax_1', 'softmax_1_data'),
+                             ('softmax_1_data', 'reshape_2'),
+                             ('reshape_2', 'reshape_2_data')
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 2])},
+                             'reshape_1': {'dim': np.array([1, 227 * 227, 2])},
+                             'reshape_1_data': {'shape': np.array([1 * 227 * 227, 2])},
+                             'reshape_2_data': {'shape': np.array([1, 227, 227, 2])},
+                             })
+        graph.graph['layout'] = 'NHWC'
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'reshape_1'),
+                                 ('reshape_1', 'reshape_1_data'),
+                                 ('reshape_1_data', 'softmax_1'),
+                                 ('softmax_1', 'softmax_1_data'),
+                                 ('softmax_1_data', 'reshape_3'),
+                                 ('reshape_3', 'reshape_3_data'),
+                                 ('reshape_3_data', 'reshape_2'),
+                                 ('reshape_2', 'reshape_2_data')
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 2])},
+                                 'reshape_1_data': {'shape': np.array([1, 2, 227 * 227])},
+                                 'reshape_2_data': {'shape': np.array([1, 227, 227, 2])},
+                                 })
+
+        pattern = ReshapeSoftmaxReshape()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'reshape_2_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    def test_2(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'reshape_1'),
+                             ('reshape_1', 'reshape_1_data'),
+                             ('reshape_1_data', 'softmax_1'),
+                             ('softmax_1', 'softmax_1_data'),
+                             ('softmax_1_data', 'reshape_2'),
+                             ('reshape_2', 'reshape_2_data')
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 2])},
+                             'reshape_1_data': {'shape': np.array([1 * 227 * 227, 2])},
+                             'reshape_2_data': {'shape': np.array([1, 227, 227, 2])},
+                             })
+        graph.graph['layout'] = 'NCHW'
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'reshape_1'),
+                                 ('reshape_1', 'reshape_1_data'),
+                                 ('reshape_1_data', 'softmax_1'),
+                                 ('softmax_1', 'softmax_1_data'),
+                                 ('softmax_1_data', 'reshape_2'),
+                                 ('reshape_2', 'reshape_2_data')
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 2])},
+                                 'reshape_1_data': {'shape': np.array([1 * 227 * 227, 2])},
+                                 'reshape_2_data': {'shape': np.array([1, 227, 227, 2])},
+                                 })
+
+        pattern = ReshapeSoftmaxReshape()
+        pattern.find_and_replace_pattern(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'reshape_2_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
diff --git a/model-optimizer/extensions/middle/SliceConvert_test.py b/model-optimizer/extensions/middle/SliceConvert_test.py
new file mode 100644 (file)
index 0000000..f282d5e
--- /dev/null
@@ -0,0 +1,124 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+import unittest
+
+import numpy as np
+
+from extensions.middle.SliceConverter import ConvertSlice
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph, compare_graphs
+from mo.ops.slice import Slice
+
+nodes_attributes = {
+    # input data
+    'placeholder_1': {'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+    'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    # Slice layer
+    'slice': {'type': 'Slice', 'kind': 'op', 'op': 'Slice'},
+    'slice_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # Output operation
+    'output_op': {'type': 'Const', 'value': None, 'kind': 'op', 'op': 'Const'},
+    'output_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    # Crop layer
+    'crop': {'type': 'Crop', 'kind': 'op', 'op': 'Crop', 'axis': None, 'offset': None, 'dim': None},
+    'dim': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    # StridedSlice layer
+    'strided_slice': {'type': 'StridedSlice', 'kind': 'op', 'op': 'StridedSlice', 'slices': None,
+                      'shrink_axis_mask': None}
+}
+
+
+class ConvertSliceTests(unittest.TestCase):
+    def test_1(self):
+        """
+        Testing case with non-constant path and multiple
+        slicing dimensions
+        :return:
+        """
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'slice'),
+                             ('slice', 'slice_data'),
+                             ('slice_data', 'output_op'),
+                             ('output_op', 'output_data')
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([4, 5, 6])},
+                             'slice': {'start': np.array([1, 2, 3]), 'end': np.array([3, 4, 4]), 'axis': None},
+                             'output_op': {'is_output': True},
+                             }
+                            )
+        slice_node = Node(graph, 'slice')
+        Slice.infer(slice_node)
+
+        pattern = ConvertSlice()
+        pattern.find_and_replace_pattern(graph)
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'crop'),
+                                 ('crop', 'slice_data'),
+                                 ('slice_data', 'output_op'),
+                                 ('output_op', 'output_data')
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([4, 5, 6])},
+                                 'crop': {'axis': np.array([0, 1, 2]), 'offset': np.array([1, 2, 3]),
+                                          },
+                                 'output_op': {'is_output': True},
+                                 'dim': {'dim': np.array([2, 2, 1])},
+                                 }
+                                )
+        (flag, resp) = compare_graphs(graph, graph_ref, 'output_op', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    def test_2(self):
+        """
+        Testing case with constant path and one
+         slicing dimension
+        """
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'slice'),
+                             ('slice', 'slice_data'),
+                             ('slice_data', 'output_op'),
+                             ('output_op', 'output_data')
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([4, 5, 6])},
+                             'slice': {'start': np.array([1]), 'end': np.array([3]), 'axis': None},
+                             'output_op': {'is_output': True}
+                             }
+                            )
+        slice_node = Node(graph, 'slice')
+        Slice.infer(slice_node)
+
+        pattern = ConvertSlice()
+        pattern.find_and_replace_pattern(graph)
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'strided_slice'),
+                                 ('strided_slice', 'slice_data'),
+                                 ('slice_data', 'output_op'),
+                                 ('output_op', 'output_data')
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([4, 5, 6])},
+                                 'strided_slice': {'slices': np.array([slice(1, 3, 1),slice(0, 5, 1),slice(0, 6, 1)]),
+                                                   'shrink_axis_mask': np.array([False, False, False])},
+                                 'output_op': {'is_output': True}
+                                 }
+                                )
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'output_op', check_op_attrs=True)
+        self.assertTrue(flag, resp)
diff --git a/model-optimizer/extensions/middle/TensorIteratorBackEdge_test.py b/model-optimizer/extensions/middle/TensorIteratorBackEdge_test.py
new file mode 100644 (file)
index 0000000..c4482c4
--- /dev/null
@@ -0,0 +1,84 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+from extensions.middle.TensorIteratorBackEdge import BackEdgesMatching
+from mo.utils.unittest.graph import compare_graphs, build_graph_with_attrs
+
+
+class BackEdgesMatchingTests(unittest.TestCase):
+    def test_no_exit(self):
+        pattern_matcher = BackEdgesMatching()
+        pattern = pattern_matcher.pattern()
+        graph = build_graph_with_attrs(nodes_with_attrs=pattern['nodes'], edges_with_attrs=pattern['edges'], update_edge_attrs=None,
+                                       new_nodes_with_attrs=[('from_body_data', {'kind':'data'})],
+                                       new_edges_with_attrs=[('from_body_data', 'NextIteration')])
+
+        pattern_matcher.find_and_replace_pattern(graph)
+        graph_ref = build_graph_with_attrs(nodes_with_attrs=[('condition', {'kind': 'op', 'op':'TensorIteratorCondition'}),
+                                                      ('condition_data', {'kind': 'data'}),
+                                                      ('back_edge', {'kind': 'op', 'op': 'TensorIteratorBackEdge'}),
+                                                      ('enter_data', {'kind': 'data'}),
+                                                      ('from_body_data', {'kind': 'data'}),
+                                                      ('Identity_1_data', {'kind': 'data'}),],
+                                    edges_with_attrs=[('condition', 'condition_data'),
+                                           ('enter_data', 'back_edge', {'in': 0}),
+                                           ('condition_data', 'back_edge', {'in': 2}),  # {in:2}
+                                           ('from_body_data', 'back_edge', {'in': 1}),
+                                           ('back_edge', 'Identity_1_data')],
+                                    update_edge_attrs=None,
+                                    new_nodes_with_attrs=[],
+                                    new_edges_with_attrs=[],
+                                )
+        (flag, resp) = compare_graphs(graph, graph_ref, 'Identity_1_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    def test_with_exit(self):
+        pattern_matcher = BackEdgesMatching()
+        pattern = pattern_matcher.pattern()
+        graph = build_graph_with_attrs(nodes_with_attrs=pattern['nodes'], edges_with_attrs=pattern['edges'], update_edge_attrs=None,
+                                new_nodes_with_attrs=[('from_body_data', {'kind':'data'}),
+                                           ('exit', {'kind': 'op', 'op': 'Exit', 'name': 'exit'}),
+                                           ('exit_data', {'kind':'data'})],
+                                new_edges_with_attrs=[('from_body_data', 'NextIteration'),
+                                           ('Switch_1', 'exit', {'out': 0}),
+                                           ('exit', 'exit_data')])
+
+        pattern_matcher.find_and_replace_pattern(graph)
+        graph_ref = build_graph_with_attrs(nodes_with_attrs=[('condition', {'kind': 'op', 'op':'TensorIteratorCondition'}),
+                                                      ('condition_data', {'kind': 'data'}),
+                                                      ('back_edge', {'kind': 'op', 'op': 'TensorIteratorBackEdge'}),
+                                                      ('enter_data', {'kind': 'data'}),
+                                                      ('from_body_data', {'kind': 'data'}),
+                                                      ('Identity_1_data', {'kind': 'data'}),
+                                                      ('output', {'kind':'op', 'op':'TensorIteratorOutput'}),
+                                                      ('exit_data', {'kind': 'data'})
+                                                        ],
+                                            edges_with_attrs=[('condition', 'condition_data'),
+                                                   ('enter_data', 'back_edge', {'in': 0}),
+                                                   ('condition_data', 'back_edge', {'in': 2}),
+                                                   ('from_body_data', 'back_edge', {'in': 1}),
+                                                   ('back_edge', 'Identity_1_data'),
+                                                   ('condition_data', 'output'),
+                                                   ('output', 'exit_data'),
+                                                   ('from_body_data', 'output')],
+                                            update_edge_attrs=None,
+                                            new_nodes_with_attrs=[],
+                                            new_edges_with_attrs=[],
+                                )
+        (flag, resp) = compare_graphs(graph, graph_ref, 'Identity_1_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
\ No newline at end of file
diff --git a/model-optimizer/extensions/middle/TensorIteratorCondition_test.py b/model-optimizer/extensions/middle/TensorIteratorCondition_test.py
new file mode 100644 (file)
index 0000000..8ebd9dd
--- /dev/null
@@ -0,0 +1,70 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+import unittest
+
+import numpy as np
+
+from extensions.middle.TensorIteratorCondition import LoopConditionMatcher
+from mo.utils.unittest.graph import build_graph_with_attrs, compare_graphs
+
+
+class TensorIteratorConditionTests(unittest.TestCase):
+    def test(self):
+        pattern_matcher = LoopConditionMatcher()
+        pattern = pattern_matcher.pattern()
+
+        graph = build_graph_with_attrs(nodes_with_attrs=pattern['nodes'], edges_with_attrs=pattern['edges'],
+                                       new_nodes_with_attrs=[('maximum', {'kind':'op', 'op': 'Maximum'}),
+                                                             ('maximum_data', {'kind': 'data'})],
+                                       new_edges_with_attrs=[('maximum', 'maximum_data'),
+                                                             ('maximum_data', 'minimum', {'in':1})],
+                                       update_nodes_attributes=[('init_1_data', {'value': np.array([0])}),
+                                                                ('init_2_data', {'value': np.array([0])}),
+                                                                ('add_1_y_data', {'value': np.array(1)}),
+                                                                ('add_2_y_data', {'value': np.array(1)}),
+                                                                ('loop_cond_data', {'value': None}),
+                                                                ('Identity_2_data', {'value': None}),
+                                                                ],
+                                       update_edge_attrs={('Strided_slice_data', 'minimum',0): {'in': 0}})
+
+        pattern_matcher.find_and_replace_pattern(graph)
+        graph_ref = build_graph_with_attrs(
+            nodes_with_attrs=[('TensorIteratorCondition', {'kind': 'op', 'op': 'TensorIteratorCondition'}),
+                              ('loop_cond_data', {'kind': 'data'}),
+                              ('identity_data', {'kind': 'data'}),
+                              ('StridedSlice', {'kind': 'op', 'op':'StridedSlice'}),
+                              ('StridedSlice_data', {'kind': 'data'}),
+                              ('Maximum', {'kind': 'op', 'op': 'Maximum'}),
+                              ('Maximum_data', {'kind': 'data'}),
+                              ('minimum', {'kind': 'op', 'op': 'Minimum'}),
+                              ('minimum_data', {'kind': 'data'}),
+                              ],
+            edges_with_attrs=[('Maximum', 'Maximum_data'),
+                              ('Maximum_data', 'minimum'),
+                              ('StridedSlice', 'StridedSlice_data'),
+                              ('StridedSlice_data', 'TensorIteratorCondition', {'in':0}),
+                              ('StridedSlice_data', 'minimum'),
+                              ('minimum', 'minimum_data'),
+                              ('minimum_data', 'TensorIteratorCondition', {'in':1}),
+                              ('TensorIteratorCondition', 'loop_cond_data'),
+                              ('TensorIteratorCondition', 'identity_data'),
+                              ],
+            update_edge_attrs=None,
+            new_nodes_with_attrs=[],
+            new_edges_with_attrs=[],
+            )
+        (flag, resp) = compare_graphs(graph, graph_ref, 'loop_cond_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
\ No newline at end of file
diff --git a/model-optimizer/extensions/middle/TensorIteratorInput_test.py b/model-optimizer/extensions/middle/TensorIteratorInput_test.py
new file mode 100644 (file)
index 0000000..efd560c
--- /dev/null
@@ -0,0 +1,163 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+import unittest
+
+import numpy as np
+
+from extensions.middle.TensorIteratorInput import SmartInputMatcher, SimpleInputMatcher, BackEdgeSimpleInputMatcher
+from mo.utils.unittest.graph import build_graph_with_attrs, compare_graphs
+
+
+class SmartInputMatcherTests(unittest.TestCase):
+    def test(self):
+        pattern_matcher = SmartInputMatcher()
+        pattern = pattern_matcher.pattern()
+
+        graph = build_graph_with_attrs(nodes_with_attrs=pattern['nodes'], edges_with_attrs=pattern['edges'],
+                                       update_edge_attrs={('range_data', 'TensorArrayScatter', 0): {'in': 1},
+                                                          ('TensorArray_handle', 'TensorArrayScatter', 0): {'in': 0},
+                                                          ('TensorArray_flow', 'TensorArrayScatter', 0): {'in': 3}},
+                                       new_nodes_with_attrs=[('ta_size', {'kind': 'data'}),
+                                                             ('ta_size_op', {'kind': 'op'}),
+                                                             ('value', {'kind': 'data'}),
+                                                             ],
+                                       new_edges_with_attrs=[
+                                           ('ta_size_op', 'ta_size'),
+                                           ('ta_size', 'TensorArray'),
+                                           ('value', 'TensorArrayScatter', {'in':2}),
+                                                             ],
+                                       update_nodes_attributes=[('Enter_data', {'value': np.array([1])}),
+                                                                ('stack_data', {'value': np.array([0])}),
+                                                                ('stack_1_data', {'value': np.array([1])}),
+                                                                ('stack_2_data', {'value': np.array([1])}),
+                                                                ('start_data', {'value': np.array([0])}),
+                                                                ('delta_data', {'value': np.array([1])})
+                                                                ])
+
+        pattern_matcher.find_and_replace_pattern(graph)
+        graph_ref = build_graph_with_attrs(
+            nodes_with_attrs=[('condition_data', {'kind': 'data'}),
+                              ('TensorIteratorInput', {'kind': 'op', 'op': 'TensorIteratorInput'}),
+                              ('TensorArrayRead_data', {'kind': 'data'}),
+                              ('condition_data', {'kind': 'data'}),
+                              ('value', {'kind': 'data'}),
+                              ('ta_size', {'kind': 'data'}),
+                              ('ta_size_op', {'kind': 'op'})],
+            edges_with_attrs=[('ta_size', 'TensorIteratorInput', {'in': 0}),
+                              ('condition_data', 'TensorIteratorInput', {'in': 2}),
+                              ('value', 'TensorIteratorInput', {'in': 1}),
+                              ('TensorIteratorInput', 'TensorArrayRead_data'),
+                              ('ta_size_op', 'ta_size')],
+            update_edge_attrs=None,
+            new_nodes_with_attrs=[],
+            new_edges_with_attrs=[],
+            )
+        (flag, resp) = compare_graphs(graph, graph_ref, 'TensorArrayRead_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+
+class SimpleInputMatcherTest(unittest.TestCase):
+    def test(self):
+        pattern_matcher = SimpleInputMatcher()
+        pattern = pattern_matcher.pattern()
+
+        graph = build_graph_with_attrs(nodes_with_attrs=pattern['nodes'], edges_with_attrs=pattern['edges'],
+                                       update_edge_attrs=None,
+                                       new_nodes_with_attrs=[('in_node', {'kind': 'data'}),
+                                                             ('Enter_data', {'kind': 'data'})],
+                                       new_edges_with_attrs=[('in_node', 'Enter'), ('Enter', 'Enter_data')],
+                                       update_nodes_attributes=[])
+
+        pattern_matcher.find_and_replace_pattern(graph)
+
+        graph_ref = build_graph_with_attrs(
+            nodes_with_attrs=[('TensorIteratorInput', {'kind': 'op', 'op': 'TensorIteratorInput'}),
+                              ('in_node', {'kind': 'data'}),
+                              ('Enter_data', {'kind': 'data'})
+                              ],
+            edges_with_attrs=[('in_node', 'TensorIteratorInput'), ('TensorIteratorInput', 'Enter_data')],
+        )
+        (flag, resp) = compare_graphs(graph, graph_ref, 'Enter_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+
+class BackEdgeInputMatcherTest(unittest.TestCase):
+    def test1(self):
+        """
+        Case with constant input to init
+        """
+        pattern_matcher = BackEdgeSimpleInputMatcher()
+        pattern = pattern_matcher.pattern()
+
+        graph = build_graph_with_attrs(nodes_with_attrs=pattern['nodes'], edges_with_attrs=pattern['edges'],
+                                       new_nodes_with_attrs=[('cycle_data', {'kind': 'data'}),
+                                                             ('condition', {'kind': 'data'}),
+                                                             ('init', {'kind': 'data', 'shape': np.array([1,3])}),
+                                                             ],
+                                       new_edges_with_attrs=[('condition', 'BackEdge', {'in': 2}),
+                                                             ('init', 'BackEdge', {'in': 0}),
+                                                             ('cycle_data', 'BackEdge', {'in': 1})],)
+
+        pattern_matcher.find_and_replace_pattern(graph)
+        graph_ref = build_graph_with_attrs(nodes_with_attrs=pattern['nodes'], edges_with_attrs=pattern['edges'],
+                                       new_nodes_with_attrs=[('cycle_data', {'kind': 'data'}),
+                                                             ('condition', {'kind': 'data'}),
+                                                             ('init', {'kind': 'data', 'shape': np.array([1,3])}),
+                                                             ('TensorIteratorInput', {'kind': 'op', 'op': 'TensorIteratorInput'}),
+                                                             ('TensorIteratorInput_data', {'kind': 'data', 'shape': np.array([1,3])}),
+                                                             ],
+                                       new_edges_with_attrs=[('TensorIteratorInput_data', 'TensorIteratorInput'),
+                                                             ('TensorIteratorInput', 'init'),
+                                                            ('condition', 'BackEdge', {'in': 2}),
+                                                             ('init', 'BackEdge', {'in': 0}),
+                                                             ('cycle_data', 'BackEdge', {'in': 1})],)
+        (flag, resp) = compare_graphs(graph, graph_ref, 'BackEdge', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    def test2(self):
+        """
+        Case with non-constant input to init.
+        Nothing should happen with graph.
+        """
+        pattern_matcher = BackEdgeSimpleInputMatcher()
+        pattern = pattern_matcher.pattern()
+
+        graph = build_graph_with_attrs(nodes_with_attrs=pattern['nodes'], edges_with_attrs=pattern['edges'],
+                                       new_nodes_with_attrs=[('cycle_data', {'kind': 'data'}),
+                                                             ('condition', {'kind': 'data'}),
+                                                             ('init', {'kind': 'data', 'shape': np.array([1, 3])}),
+                                                             ('Enter', {'kind': 'op', 'op': 'Enter'}),
+                                                             ],
+                                       new_edges_with_attrs=[('Enter', 'init'),
+                                                             ('condition', 'BackEdge', {'in': 2}),
+                                                             ('init', 'BackEdge', {'in': 0}),
+                                                             ('cycle_data', 'BackEdge', {'in': 1})])
+
+        pattern_matcher.find_and_replace_pattern(graph)
+
+        graph_ref = build_graph_with_attrs(nodes_with_attrs=pattern['nodes'], edges_with_attrs=pattern['edges'],
+                                       new_nodes_with_attrs=[('cycle_data', {'kind': 'data'}),
+                                                             ('condition', {'kind': 'data'}),
+                                                             ('init', {'kind': 'data', 'shape': np.array([1, 3])}),
+                                                             ('Enter', {'kind': 'op', 'op': 'Enter'}),
+                                                             ],
+                                       new_edges_with_attrs=[('Enter', 'init'),
+                                                             ('condition', 'BackEdge', {'in': 2}),
+                                                             ('init', 'BackEdge', {'in': 0}),
+                                                             ('cycle_data', 'BackEdge', {'in': 1})], )
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'BackEdge', check_op_attrs=True)
+        self.assertTrue(flag, resp)
diff --git a/model-optimizer/extensions/middle/TensorIteratorOutput_test.py b/model-optimizer/extensions/middle/TensorIteratorOutput_test.py
new file mode 100644 (file)
index 0000000..d6aa940
--- /dev/null
@@ -0,0 +1,63 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+import unittest
+
+import numpy as np
+
+from extensions.middle.TensorIteratorOutput import SmartOutputMatcher
+from mo.utils.unittest.graph import build_graph_with_attrs, compare_graphs
+
+
+class SmartOutputMatcherTests(unittest.TestCase):
+    def test(self):
+        pattern_matcher = SmartOutputMatcher()
+        pattern = pattern_matcher.pattern()
+
+        graph = build_graph_with_attrs(nodes_with_attrs=pattern['nodes'], edges_with_attrs=pattern['edges'],
+                                       # update_edge_attrs=None,
+                                        new_nodes_with_attrs=[('index', {'kind': 'data'}),
+                                                              ('value', {'kind': 'data'}),
+                                                              ('ta_size', {'kind': 'data'}),
+                                                              ],
+                                        new_edges_with_attrs=[('index', 'TensorArrayWrite', {'in':1}),
+                                                              ('value', 'TensorArrayWrite', {'in': 2}),
+                                                              ('ta_size', 'TensorArray')
+                                                              ],
+                                       update_nodes_attributes=[('WriteEnter_data', {'value': np.array([1, 1])}),
+
+                                                                ('start_data', {'value': np.array([0])}),
+                                                                ('delta_data', {'value': np.array([1])}),
+                                                                ])
+
+        pattern_matcher.find_and_replace_pattern(graph)
+
+        graph_ref = build_graph_with_attrs(
+            nodes_with_attrs=[
+                              ('TensorIteratorOutput', {'kind': 'op', 'op': 'TensorIteratorOutput'}),
+                              ('TensorArrayGather_data', {'kind': 'data'}),
+                              ('index', {'kind': 'data'}),
+                              ('value', {'kind': 'data'}),
+                              ('ta_size', {'kind': 'data'}), ],
+            edges_with_attrs=[('ta_size', 'TensorIteratorOutput', {'in': 0}),
+                              ('index', 'TensorIteratorOutput', {'in': 2}),
+                              ('value', 'TensorIteratorOutput', {'in': 1}),
+                              ('TensorIteratorOutput', 'TensorArrayGather_data')],
+            update_edge_attrs=None,
+            new_nodes_with_attrs=[],
+            new_edges_with_attrs=[],
+            )
+        (flag, resp) = compare_graphs(graph, graph_ref, 'TensorArrayGather_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
\ No newline at end of file
diff --git a/model-optimizer/extensions/middle/UselessSridedSlice_test.py b/model-optimizer/extensions/middle/UselessSridedSlice_test.py
new file mode 100644 (file)
index 0000000..8fbf240
--- /dev/null
@@ -0,0 +1,99 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+import unittest
+
+import numpy as np
+
+from extensions.middle.UselessStridedSlice import UselessStridedSliceEraser
+from mo.utils.unittest.graph import build_graph, compare_graphs
+
+nodes_attributes = {
+    # input data
+    'placeholder': {'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+    'placeholder_data': {'value': None, 'shape': np.array([4, 5, 6]), 'kind': 'data', 'data_type': None},
+    #
+    'strided_slice': {'type': 'StridedSlice', 'kind': 'op', 'op': 'StridedSlice', 'shrink_axis_mask': None,
+                      'slices': [slice(0, 4, 1), slice(0, 5, 1), slice(0, 6, 1)]},
+    'strided_slice_data': {'value': None, 'shape': np.array([4, 5, 6]), 'kind': 'data'},
+    'strided_slice_input_1_data': {'value': None, 'shape': np.array([3]), 'kind': 'data'},
+    'strided_slice_input_2_data': {'value': None, 'shape': np.array([3]), 'kind': 'data'},
+    'strided_slice_input_3_data': {'value': None, 'shape': np.array([3]), 'kind': 'data'},
+    #
+    'strided_slice_2': {'type': 'StridedSlice', 'kind': 'op', 'op': 'StridedSlice', 'shrink_axis_mask': None,
+                        'slices': [slice(0, 4, 1), slice(0, 5, 1), slice(0, 6, 1)]},
+    'strided_slice_2_data': {'value': None, 'shape': np.array([4, 5, 6]), 'kind': 'data'},
+    # Output operation
+    'output_op': {'type': 'OpOutput', 'kind': 'op', 'op': 'OpOutput', 'output_op': {'is_output': True}},
+}
+
+
+class UselessStridedSliceTests(unittest.TestCase):
+    def test_single_stride_slice_removal(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder', 'placeholder_data'),
+                             ('placeholder_data', 'strided_slice'),
+                             ('strided_slice_input_1_data', 'strided_slice'),
+                             ('strided_slice_input_2_data', 'strided_slice'),
+                             ('strided_slice_input_3_data', 'strided_slice'),
+                             ('strided_slice', 'strided_slice_data'),
+                             ('strided_slice_data', 'output_op'),
+                             ],
+                            {},
+                            nodes_with_edges_only=True
+                            )
+
+        pattern = UselessStridedSliceEraser()
+        pattern.find_and_replace_pattern(graph)
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder', 'placeholder_data'),
+                                 ('placeholder_data', 'output_op'),
+                                 ],
+                                {'placeholder_data': {'shape': np.array([4, 5, 6])}}
+                                )
+        (flag, resp) = compare_graphs(graph, graph_ref, 'output_op', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    def test_consecutive_stride_slices_removal(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder', 'placeholder_data'),
+                             ('placeholder_data', 'strided_slice'),
+                             ('strided_slice_input_1_data', 'strided_slice'),
+                             ('strided_slice_input_2_data', 'strided_slice'),
+                             ('strided_slice_input_3_data', 'strided_slice'),
+                             ('strided_slice', 'strided_slice_data'),
+                             ('strided_slice_data', 'strided_slice_2'),
+                             ('strided_slice_input_1_data', 'strided_slice_2'),
+                             ('strided_slice_input_2_data', 'strided_slice_2'),
+                             ('strided_slice_input_3_data', 'strided_slice_2'),
+                             ('strided_slice_2', 'strided_slice_2_data'),
+                             ('strided_slice_2_data', 'output_op'),
+                             ],
+                            {},
+                            nodes_with_edges_only=True
+                            )
+
+        pattern = UselessStridedSliceEraser()
+        pattern.find_and_replace_pattern(graph)
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder', 'placeholder_data'),
+                                 ('placeholder_data', 'output_op'),
+                                 ],
+                                {'placeholder_data': {'shape': np.array([4, 5, 6])}}
+                                )
+        (flag, resp) = compare_graphs(graph, graph_ref, 'output_op', check_op_attrs=True)
+        self.assertTrue(flag, resp)
diff --git a/model-optimizer/extensions/middle/lstm_sequence_normalize_test.py b/model-optimizer/extensions/middle/lstm_sequence_normalize_test.py
new file mode 100644 (file)
index 0000000..d15e680
--- /dev/null
@@ -0,0 +1,55 @@
+
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+import numpy as np
+
+from extensions.middle.lstm_sequence_normalize import LSTMSequenceNormalize
+from mo.utils.unittest.graph import compare_graphs, build_graph_with_attrs
+from mo.graph.graph import Node
+
+
+class LSTMSequenceNormalizeTest(unittest.TestCase):
+
+    def test_squeeze_num_directions(self):
+        tested_obj = LSTMSequenceNormalize()
+        pattern = tested_obj.pattern()
+        orig_shape = np.array([10, 1, 20, 128], dtype=np.int64)  # seq_length, num_dims, batch_size, data_size
+        new_shape = np.array([10, 20, 128], dtype=np.int64)
+        graph = build_graph_with_attrs(
+            nodes_with_attrs=pattern['nodes'],
+            edges_with_attrs=pattern['edges'],
+            update_edge_attrs={
+                ('W', 'lstm', 0): {'in': 1},
+                ('R', 'lstm', 0): {'in': 2},
+            },
+            new_nodes_with_attrs=[
+                ('output', {'shape': orig_shape}),
+            ],
+            new_edges_with_attrs=[
+                ('lstm', 'output', {'out': 0}),
+            ],
+        )
+
+        lstm = Node(graph, 'lstm')
+        match = {'lstm': lstm}
+        tested_obj.squeeze_num_directions(graph, match)
+        self.assertTrue(np.array_equal(lstm.out_node(0).shape, new_shape))
+        reshape_node = lstm.out_node(0).out_node(0)
+        self.assertTrue(reshape_node.op == 'Reshape')
+        self.assertTrue(np.array_equal(reshape_node.dim, orig_shape))
+        self.assertTrue(reshape_node.out_node(0).id == 'output')
diff --git a/model-optimizer/extensions/ops/accum_test.py b/model-optimizer/extensions/ops/accum_test.py
new file mode 100644 (file)
index 0000000..b2762f3
--- /dev/null
@@ -0,0 +1,120 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.ops.accum import AccumOp
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph
+
+wrong_attrs_graph = {'node_1': {'type': 'Identity', 'kind': 'op'},
+                     'accum': {'type': 'Accum', 'kind': 'op'},
+                     'node_3': {'type': 'Identity', 'kind': 'op'}}
+
+nodes_attributes = {'node_1': {'type': 'Identity', 'kind': 'op'},
+                    'node_2': {'type': 'Identity', 'kind': 'op'},
+                    'accum': {'type': 'Accum', 'kind': 'op'},
+                    'node_3': {'type': 'Identity', 'kind': 'op'}}
+
+
+class TestAccumOp(unittest.TestCase):
+    def test_accum_infer_assertion(self):
+        graph = build_graph(wrong_attrs_graph,
+                            [('node_1', 'accum'),
+                             ('accum', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 227, 227])},
+                             'accum': {
+                                 'top_height': 0,
+                                 'top_width': 0,
+                                 'size_divisible_by': 0,
+                                 'have_reference': 1
+                             }
+                             })
+
+        accum_node = Node(graph, 'accum')
+        self.assertRaises(AssertionError, AccumOp.accum_infer, accum_node)
+
+    def test_accum_infer_have_reference(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'accum'),
+                             ('node_2', 'accum'),
+                             ('accum', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 227, 227])},
+                             'node_2': {'shape': np.array([1, 3, 227, 227])},
+                             'accum': {
+                                 'top_height': 0,
+                                 'top_width': 0,
+                                 'size_divisible_by': 0,
+                                 'have_reference': 1
+                             }
+                             })
+
+        accum_node = Node(graph, 'accum')
+        AccumOp.accum_infer(accum_node)
+        exp_shape = np.array([1, 6, 227, 227])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_accum_infer(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'accum'),
+                             ('node_2', 'accum'),
+                             ('accum', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 227, 227])},
+                             'node_2': {'shape': np.array([1, 3, 227, 227])},
+                             'accum': {
+                                 'top_height': 0,
+                                 'top_width': 0,
+                                 'size_divisible_by': 0,
+                                 'have_reference': 0
+                             }
+                             })
+
+        accum_node = Node(graph, 'accum')
+        AccumOp.accum_infer(accum_node)
+        exp_shape = np.array([1, 6, 227, 227])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_accum_infer_top_height_top_width(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'accum'),
+                             ('node_2', 'accum'),
+                             ('accum', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 227, 227])},
+                             'node_2': {'shape': np.array([1, 3, 227, 227])},
+                             'accum': {
+                                 'top_height': 229,
+                                 'top_width': 229,
+                                 'size_divisible_by': 0,
+                                 'have_reference': 0
+                             }
+                             })
+
+        accum_node = Node(graph, 'accum')
+        AccumOp.accum_infer(accum_node)
+        exp_shape = np.array([1, 6, 229, 229])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
diff --git a/model-optimizer/extensions/ops/argmax_test.py b/model-optimizer/extensions/ops/argmax_test.py
new file mode 100644 (file)
index 0000000..14edf5e
--- /dev/null
@@ -0,0 +1,145 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.ops.argmax import ArgMaxOp
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {'node_1': {'type': 'Identity', 'kind': 'op'},
+                    'argmax': {'type': 'ArgMax', 'kind': 'op'},
+                    'node_3': {'type': 'Identity', 'kind': 'op'}
+                    }
+
+
+class TestArgMaxOp(unittest.TestCase):
+    def test_caffe_argmax_axis(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'argmax'),
+                             ('argmax', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 1025, 2049])},
+                             'argmax': {
+                                 'out_max_val': True,
+                                 'top_k': 100,
+                                 'axis': 2
+                             }
+                             })
+
+        argmax_node = Node(graph, 'argmax')
+        ArgMaxOp.argmax_infer(argmax_node)
+        exp_shape = np.array([1, 3, 100, 2049])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_caffe_argmax_axis_negative(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'argmax'),
+                             ('argmax', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 1025, 2049])},
+                             'argmax': {
+                                 'out_max_val': True,
+                                 'top_k': 100,
+                                 'axis': -1
+                             }
+                             })
+
+        argmax_node = Node(graph, 'argmax')
+        ArgMaxOp.argmax_infer(argmax_node)
+        exp_shape = np.array([1, 3, 1025, 100])
+        res_shape = graph.node['node_3']['shape']
+        self.assertEqual(argmax_node.axis, 3)
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_caffe_argmax_no_axis(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'argmax'),
+                             ('argmax', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 1025, 2049])},
+                             'argmax': {
+                                 'out_max_val': True,
+                                 'top_k': 100
+                             }
+                             })
+
+        argmax_node = Node(graph, 'argmax')
+        ArgMaxOp.argmax_infer(argmax_node)
+        exp_shape = np.array([1, 2, 100, 1])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_caffe_argmax_extend_shape(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'argmax'),
+                             ('argmax', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3])},
+                             'argmax': {
+                                 'out_max_val': True,
+                                 'top_k': 100
+                             }
+                             })
+
+        argmax_node = Node(graph, 'argmax')
+        ArgMaxOp.argmax_infer(argmax_node)
+        exp_shape = np.array([1, 2, 100])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_caffe_argmax_out_max_val_false(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'argmax'),
+                             ('argmax', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3])},
+                             'argmax': {
+                                 'out_max_val': False,
+                                 'top_k': 100
+                             }
+                             })
+
+        argmax_node = Node(graph, 'argmax')
+        ArgMaxOp.argmax_infer(argmax_node)
+        exp_shape = np.array([1, 1, 100])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_caffe_argmax_no_shape(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'argmax'),
+                             ('argmax', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': None},
+                             'argmax': {
+                                 'out_max_val': False,
+                                 'top_k': 100
+                             }
+                             })
+
+        argmax_node = Node(graph, 'argmax')
+        ArgMaxOp.argmax_infer(argmax_node)
+        res_shape = graph.node['node_3']['shape']
+        self.assertIsNone(res_shape)
diff --git a/model-optimizer/extensions/ops/assert_test.py b/model-optimizer/extensions/ops/assert_test.py
new file mode 100644 (file)
index 0000000..37417d5
--- /dev/null
@@ -0,0 +1,53 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+import unittest
+from unittest.mock import Mock
+
+from extensions.ops.assert_op import Assert
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph_with_edge_attrs
+
+
+class TestAssert(unittest.TestCase):
+    def test_assert_cf_true(self):
+        me_mock = Mock()
+        nodes = {
+            'input_data': {'kind': 'data', 'executable': True},
+            'assert': {'type': 'Assert', 'value': None, 'kind': 'op', 'op': 'Assert'},
+            'assert_data': {'value': True, 'kind': 'data', 'executable': True}}
+        edges = [
+            ('input_data', 'assert', {'in': 0}),
+            ('assert', 'assert_data', {'out': 0, 'control_flow_edge': False})]
+        graph = build_graph_with_edge_attrs(nodes, edges)
+        tested_class = Assert(graph=graph, attrs={})
+        node = Node(graph, 'assert')
+        tested_class.assert_control_flow_infer(node=node, is_executable=True, mark_executability=me_mock)
+        me_mock.assert_called_once_with('assert_data', True)
+
+    def test_assert_cf_false(self):
+        me_mock = Mock()
+        nodes = {
+            'input_data': {'name': 'input', 'kind': 'data', 'executable': True},
+            'assert': {'name': 'assert', 'type': 'Assert', 'value': None, 'kind': 'op', 'op': 'Assert'},
+            'assert_data': {'name': 'output', 'value': False, 'kind': 'data', 'executable': True}}
+        edges = [
+            ('input_data', 'assert', {'in': 0}),
+            ('assert', 'assert_data', {'out': 0, 'control_flow_edge': False})]
+        graph = build_graph_with_edge_attrs(nodes, edges)
+        tested_class = Assert(graph=graph, attrs={})
+        node = Node(graph, 'assert')
+        tested_class.assert_control_flow_infer(node=node, is_executable=True, mark_executability=me_mock)
+        me_mock.assert_called_once_with('assert_data', False)
diff --git a/model-optimizer/extensions/ops/correlation_test.py b/model-optimizer/extensions/ops/correlation_test.py
new file mode 100644 (file)
index 0000000..a47aec2
--- /dev/null
@@ -0,0 +1,58 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.ops.correlation import CorrelationOp
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {'node_1': {'type': 'Identity', 'kind': 'op'},
+                    'node_2': {'type': 'Identity', 'kind': 'op'},
+                    'corr': {'type': 'Correlation', 'kind': 'op'},
+                    'node_3': {'type': 'Identity', 'kind': 'op'}
+                    }
+
+
+class TestConcatPartialInfer(unittest.TestCase):
+    def test_tf_concat_infer(self):
+        graph = build_graph(nodes_attributes,
+                            [
+                                ('node_1', 'corr'),
+                                ('node_2', 'corr'),
+                                ('corr', 'node_3')],
+                            {
+                                'node_3': {'is_output': True, 'shape': None},
+                                'node_1': {'shape': np.array([1, 3, 227, 227])},
+                                'node_2': {'shape': np.array([1, 3, 227, 227])},
+                                'corr': {'pad': 20,
+                                         'kernel_size': 1,
+                                         'max_displacement': 20,
+                                         'stride_1': 1,
+                                         'stride_2': 2,
+                                         'single_direction': 0,
+                                         'do_abs': False,
+                                         'correlation_type': 0}
+                            })
+
+        corr_node = Node(graph, 'corr')
+        CorrelationOp.corr_infer(corr_node)
+        exp_shape = np.array([1, 441, 227, 227])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
diff --git a/model-optimizer/extensions/ops/ctc_greedy_decoder_test.py b/model-optimizer/extensions/ops/ctc_greedy_decoder_test.py
new file mode 100644 (file)
index 0000000..b5a9217
--- /dev/null
@@ -0,0 +1,51 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.ops.ctc_greedy_decoder import CTCGreedyDecoderOp
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {'node_1': {'type': 'Identity', 'kind': 'op'},
+                    'node_2': {'type': 'Identity', 'kind': 'op'},
+                    'ctc': {'type': 'CTCGreedyDecoder', 'kind': 'op'},
+                    'node_3': {'type': 'Identity', 'kind': 'op'}
+                    }
+
+
+class TestConcatPartialInfer(unittest.TestCase):
+    def test_tf_concat_infer(self):
+        graph = build_graph(nodes_attributes,
+                            [
+                                ('node_1', 'ctc'),
+                                ('node_2', 'ctc'),
+                                ('ctc', 'node_3')],
+                            {
+                                'node_3': {'is_output': True, 'shape': None},
+                                'node_1': {'shape': np.array([88, 2, 71])},
+                                'node_2': {'shape': np.array([88, 2])},
+                                'ctc': {'ctc_merge_repeated': 1}
+                            })
+
+        ctc_node = Node(graph, 'ctc')
+        CTCGreedyDecoderOp.ctc_greedy_decoder_infer(ctc_node)
+        exp_shape = np.array([2, 88, 1, 1])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
diff --git a/model-optimizer/extensions/ops/data_augmentation_test.py b/model-optimizer/extensions/ops/data_augmentation_test.py
new file mode 100644 (file)
index 0000000..d8b30e3
--- /dev/null
@@ -0,0 +1,62 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.ops.data_augmentation import DataAugmentationOp
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {
+    'node_1': {'type': 'Identity', 'kind': 'op'},
+    'da': {'type': 'DataAugmentation', 'kind': 'op'},
+    'node_3': {'type': 'Identity', 'kind': 'op'}
+}
+
+
+class TestConcatPartialInfer(unittest.TestCase):
+    def test_tf_concat_infer(self):
+        graph = build_graph(nodes_attributes,
+                            [
+                                ('node_1', 'da'),
+                                ('da', 'node_3')],
+                            {
+                                'node_3': {'is_output': True, 'shape': None},
+                                'node_1': {'shape': np.array([1, 3, 227, 227])},
+                                'da': {'crop_width': 225,
+                                       'crop_height': 225,
+                                       'write_augmented': "",
+                                       'max_multiplier': 255.0,
+                                       'augment_during_test': True,
+                                       'recompute_mean': 0,
+                                       'write_mean': "",
+                                       'mean_per_pixel': False,
+                                       'mean': 0,
+                                       'mode': "add",
+                                       'bottomwidth': 0,
+                                       'bottomheight': 0,
+                                       'num': 0,
+                                       'chromatic_eigvec': [0.0]}
+                            })
+
+        da_node = Node(graph, 'da')
+        DataAugmentationOp.data_augmentation_infer(da_node)
+        exp_shape = np.array([1, 3, 225, 225])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
diff --git a/model-optimizer/extensions/ops/depth_to_space_test.py b/model-optimizer/extensions/ops/depth_to_space_test.py
new file mode 100644 (file)
index 0000000..26b3c4e
--- /dev/null
@@ -0,0 +1,57 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.ops.depth_to_space import DepthToSpaceOp
+from mo.graph.graph import Node
+from mo.utils.error import Error
+from mo.utils.unittest.graph import build_graph
+
+nodes = {
+    'in_data_node': {'value': None, 'kind': 'data', 'shape': np.array([1, 1024, 576, 256])},
+    'DtS': {'op': 'DepthToSpace', 'kind': 'op', 'block_size': 2},
+    'out_data_node': {'value': None, 'kind': 'data', 'shape': None}
+}
+
+edges = [
+    ('in_data_node', 'DtS'),
+    ('DtS', 'out_data_node')
+]
+
+
+class TestDepthToSpacePartialInfer(unittest.TestCase):
+    def test_tf_depth_to_space_infer(self):
+        graph = build_graph(nodes, edges)
+        dts_node = Node(graph, 'DtS')
+        DepthToSpaceOp.depth_to_space_infer(dts_node)
+        exp_shape = np.array([1, 2048, 1152, 64])
+        res_shape = graph.node['out_data_node']['shape']
+        self.assertTrue(np.array_equal(exp_shape, res_shape))
+
+    def test_tf_depth_to_space_infer_error(self):
+        graph = build_graph(nodes, edges)
+        graph.node['in_data_node']['shape'] = np.array([1024, 576, 256])
+        dts_node = Node(graph, 'DtS')
+        self.assertRaises(Error, DepthToSpaceOp.depth_to_space_infer(dts_node))
+
+    def test_tf_depth_to_space_infer_error_1(self):
+        graph = build_graph(nodes, edges)
+        graph.node['in_data_node']['shape'] = np.array([1, 1024, 576, 255])
+        dts_node = Node(graph, 'DtS')
+        self.assertRaises(Error, DepthToSpaceOp.depth_to_space_infer(dts_node))
diff --git a/model-optimizer/extensions/ops/gather_test.py b/model-optimizer/extensions/ops/gather_test.py
new file mode 100644 (file)
index 0000000..4f749f7
--- /dev/null
@@ -0,0 +1,59 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.ops.gather import Gather
+from mo.front.common.partial_infer.utils import int64_array
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph
+
+
+class TestGatherPartialInfer(unittest.TestCase):
+    @staticmethod
+    def _create_graph():
+        nodes_attributes = {'gather_input': {'shape': None, 'value': None, 'kind': 'data'},
+                            'gather_input2': {'shape': None, 'value': None, 'kind': 'data'},
+                            'gather_node': {'op': 'Gather', 'kind': 'op'},
+                            'gather_output': {'shape': None, 'value': None, 'kind': 'data'}
+                            }
+        return build_graph(nodes_attributes,
+                           [
+                               ('gather_input', 'gather_node'), ('gather_node', 'gather_output'), ('gather_input2', 'gather_node')
+                           ],
+                           {
+                               'gather_input': {'shape': int64_array([10, 15]), 'value': np.ones((3, 15))},
+                               'gather_input2': {'shape': int64_array([2]), 'value': np.array([0, 2])},
+                               'gather_node': {'axis': 0},
+                           })
+
+    def test_gather_infer(self):
+        graph = self._create_graph()
+
+        gather_node = Node(graph, 'gather_node')
+        Gather.infer(gather_node)
+
+        exp_shape = int64_array([2, 15])
+        res_shape = graph.node['gather_output']['shape']
+        res_value = graph.node['gather_output']['value']
+
+        self.assertTrue(np.array_equal(exp_shape, res_shape),
+                        'shapes do not match expected: {} and given: {}'.format(exp_shape, res_shape))
+
+        self.assertTrue(np.array_equal(res_value, np.ones(exp_shape)),
+                        'shapes do not match expected: {} and given: {}'.format(exp_shape, res_shape))
diff --git a/model-optimizer/extensions/ops/grn_test.py b/model-optimizer/extensions/ops/grn_test.py
new file mode 100644 (file)
index 0000000..351023f
--- /dev/null
@@ -0,0 +1,45 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.front.common.partial_infer.elemental import copy_shape_infer
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {'node_1': {'type': 'Identity', 'kind': 'op'},
+                    'grn': {'type': 'GRN', 'kind': 'op'},
+                    'node_3': {'type': 'Identity', 'kind': 'op'}}
+
+
+class TestGRNOp(unittest.TestCase):
+    def test_grn_infer(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'grn'),
+                             ('grn', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 227, 227])},
+                             'grn': {'bias': 1}
+                             })
+
+        grn_node = Node(graph, 'grn')
+        copy_shape_infer(grn_node)
+        exp_shape = np.array([1, 3, 227, 227])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
diff --git a/model-optimizer/extensions/ops/instance_normalization_test.py b/model-optimizer/extensions/ops/instance_normalization_test.py
new file mode 100644 (file)
index 0000000..e106f47
--- /dev/null
@@ -0,0 +1,28 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import networkx as nx
+
+from extensions.ops.instance_normalization import InstanceNormalization
+
+
+class InstanceNormalizationOp(unittest.TestCase):
+    def test_constructor_supported_attrs(self):
+        graph = nx.MultiDiGraph()
+        op = InstanceNormalization(graph, attrs={'epsilon': 0.1})
+        self.assertEqual(op.supported_attrs(), ['epsilon'])
diff --git a/model-optimizer/extensions/ops/interp_test.py b/model-optimizer/extensions/ops/interp_test.py
new file mode 100644 (file)
index 0000000..cf2bbc9
--- /dev/null
@@ -0,0 +1,216 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.ops.interp import InterpOp
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {'node_1': {'type': 'Identity', 'kind': 'op'},
+                    'node_2': {'type': 'Identity', 'value': None, 'kind': 'data'},
+                    'interp': {'type': 'Interp', 'kind': 'op', 'factor': None, 'parse_2nd_input': 'value'},
+                    'node_3': {'type': 'Identity', 'shape': None, 'value': None, 'kind': 'data'}
+                    }
+
+
+class TestInterpOp(unittest.TestCase):
+    def test_caffe_interp_infer_shrink(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'interp'),
+                             ('interp', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 1025, 2049])},
+                             'interp': {'shrink_factor': 2,
+                                        'height': 0,
+                                        'width': 0,
+                                        'zoom_factor': 1,
+                                        'pad_beg': 0,
+                                        'pad_end': 0}
+                             })
+        graph.graph['layout'] = 'NCHW'
+
+        interp_node = Node(graph, 'interp')
+        InterpOp.interp_infer(interp_node)
+        exp_shape = np.array([1, 3, 513, 1025])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_caffe_interp_infer_wh(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'interp'),
+                             ('interp', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 1024, 1, 1])},
+                             'interp': {'width': 65,
+                                        'height': 33,
+                                        'zoom_factor': 1,
+                                        'shrink_factor': 1,
+                                        'pad_beg': 0,
+                                        'pad_end': 0}
+                             })
+        graph.graph['layout'] = 'NCHW'
+
+        interp_node = Node(graph, 'interp')
+        InterpOp.interp_infer(interp_node)
+        exp_shape = np.array([1, 1024, 33, 65])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_caffe_interp_infer_zoom(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'interp'),
+                             ('interp', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 256, 33, 65])},
+                             'interp': {'zoom_factor': 2,
+                                        'height': 0,
+                                        'width': 0,
+                                        'shrink_factor': 1,
+                                        'pad_beg': 0,
+                                        'pad_end': 0}
+                             })
+        graph.graph['layout'] = 'NCHW'
+
+        interp_node = Node(graph, 'interp')
+        InterpOp.interp_infer(interp_node)
+        exp_shape = np.array([1, 256, 66, 130])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_caffe_interp_infer_zoom_shrink(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'interp'),
+                             ('interp', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 256, 33, 65])},
+                             'interp': {'zoom_factor': 2,
+                                        'height': 0,
+                                        'width': 0,
+                                        'shrink_factor': 2,
+                                        'pad_beg': 0,
+                                        'pad_end': 0}
+                             })
+        graph.graph['layout'] = 'NCHW'
+
+        interp_node = Node(graph, 'interp')
+        InterpOp.interp_infer(interp_node)
+        exp_shape = np.array([1, 256, 33, 65])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_caffe_interp_infer_zoom_shrink_error(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'interp'),
+                             ('interp', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 256, 33, 65])},
+                             'interp': {'zoom_factor': 0,
+                                        'height': 0,
+                                        'width': 0,
+                                        'shrink_factor': 0,
+                                        'pad_beg': 0,
+                                        'pad_end': 0}
+                             })
+        graph.graph['layout'] = 'NCHW'
+
+        interp_node = Node(graph, 'interp')
+        InterpOp.interp_infer(interp_node)
+        self.assertIsNone(graph.node['node_3']['shape'])
+
+    def test_caffe_interp_infer_zoom_default(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'interp'),
+                             ('interp', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 256, 33, 65])},
+                             'interp': {'zoom_factor': 1,
+                                        'height': 0,
+                                        'width': 0,
+                                        'shrink_factor': 1,
+                                        'pad_beg': 0,
+                                        'pad_end': 0
+                                        }
+                             })
+        graph.graph['layout'] = 'NCHW'
+
+        interp_node = Node(graph, 'interp')
+        InterpOp.interp_infer(interp_node)
+        exp_shape = np.array([1, 256, 33, 65])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_caffe_interp_2_blobs(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'interp'),
+                             ('node_2', 'interp'),
+                             ('interp', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 256, 33, 66])},
+                             'node_2': {'shape': np.array([1, 1, 3, 6])},
+                             'interp': {'zoom_factor': 1,
+                                        'shrink_factor': 1,
+                                        'pad_beg': 0,
+                                        'pad_end': 0,
+                                        'parse_2nd_input': 'shape',
+                                        }
+                             })
+        graph.graph['layout'] = 'NCHW'
+
+        interp_node = Node(graph, 'interp')
+        InterpOp.interp_infer(interp_node)
+        exp_shape = np.array([1, 256, 3, 6])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_tf_interp_infer_two_inputs(self):
+
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'interp'),
+                             ('node_2', 'interp'),
+                             ('interp', 'node_3')],
+                            {'node_1': {'shape': np.array([1, 20, 30, 100])},
+                             'node_2': {'shape': np.array([2]), 'value': np.array([2, 3])}})
+        graph.graph['layout'] = 'NHWC'
+        interp_node = Node(graph, 'interp')
+        InterpOp.interp_infer(interp_node)
+        exp_shape = np.array([1, 2, 3, 100])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_tf_interp_infer_one_input_hw(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'interp'),
+                             ('interp', 'node_3')],
+                            {'node_1': {'shape': np.array([1, 20, 30, 100])},
+                             'interp': {'height': 4, 'width': 6, 'pad_beg': 0, 'pad_end': 0, 'zoom_factor': None,
+                                        'shrink_factor': None}})
+        graph.graph['layout'] = 'NHWC'
+        interp_node = Node(graph, 'interp')
+        InterpOp.interp_infer(interp_node)
+        exp_shape = np.array([1, 4, 6, 100])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
diff --git a/model-optimizer/extensions/ops/merge_test.py b/model-optimizer/extensions/ops/merge_test.py
new file mode 100644 (file)
index 0000000..755da1a
--- /dev/null
@@ -0,0 +1,99 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+import numpy as np
+
+from extensions.ops.merge import Merge
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph_with_attrs, compare_graphs
+
+
+class TestMerge(unittest.TestCase):
+    nodes = [
+        ('first', {'value': np.ones((2, 2)), 'kind': 'data', 'executable': True, 'shape': np.array([2, 2]),
+                   'is_partial_inferred': True}),
+        ('second', {'value': np.zeros((2, 2)), 'kind': 'data', 'executable': False, 'shape': np.array([2, 2]),
+                    'is_partial_inferred': True}),
+        ('merge', {'type': 'Merge', 'kind': 'op', 'op': 'Merge'}),
+        ('merge_output', {'value': None, 'kind': 'data', 'executable': True, 'shape': None}),
+    ]
+    edges = [
+        ('first', 'merge', {'in': 0}),
+        ('second', 'merge', {'in': 1}),
+        ('merge', 'merge_output', {'out': 0}),
+    ]
+
+    def test_merge_infer_simple_case_one_executable(self):
+        graph = build_graph_with_attrs(nodes_with_attrs=self.nodes, edges_with_attrs=self.edges)
+
+        # We should propagate value of the first input since only this input is executable
+        graph_ref = build_graph_with_attrs(nodes_with_attrs=self.nodes,
+                                           edges_with_attrs=self.edges,
+                                           update_nodes_attributes=[('merge_output', {'shape': np.array([2, 2]),
+                                                                                      'value': np.ones((2,2))}),
+                                                                    ('merge', {'is_not_fully_inferred': False})])
+
+        tested_class = Merge(graph=graph, attrs={})
+        node = Node(graph, 'merge')
+        tested_class.merge_infer(node)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'merge_output', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    def test_merge_infer_complex_case(self):
+        """
+        Case as in cycles when in first visit only one input are inferred and in the second -- both.
+        """
+        graph = build_graph_with_attrs(nodes_with_attrs=self.nodes, edges_with_attrs=self.edges,
+                                       update_nodes_attributes=[('first', {'is_partial_inferred': False,
+                                                                           'value': None}),
+                                                                ('second', {'executable': True})])
+
+        # In first visit we should propagate only shapes
+        graph_ref = build_graph_with_attrs(nodes_with_attrs=self.nodes,
+                                           edges_with_attrs=self.edges,
+                                           update_nodes_attributes=[('second', {'executable': True}),
+                                                                    ('first', {'is_partial_inferred': False,
+                                                                                'value': None}),
+                                                                    ('merge_output', {'shape': np.array([2, 2]),
+                                                                                      'value': None}),
+                                                                    ('merge', {'is_not_fully_inferred': True})])
+        tested_class = Merge(graph=graph, attrs={})
+        node = Node(graph, 'merge')
+        tested_class.merge_infer(node)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'merge_output', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+        # Imitate that inputs nodes now is inferred
+        graph.node['first']['is_partial_inferred'] = True
+
+        # Run infer second time
+        tested_class = Merge(graph=graph, attrs={})
+        node = Node(graph, 'merge')
+        tested_class.merge_infer(node)
+
+        graph_ref = build_graph_with_attrs(nodes_with_attrs=self.nodes,
+                                           edges_with_attrs=self.edges,
+                                           update_nodes_attributes=[('second', {'executable': True}),
+                                                                    ('first', {'is_partial_inferred': True,
+                                                                               'value': None}),
+                                                                    ('merge_output', {'shape': np.array([2, 2]),
+                                                                                      'value': None}),
+                                                                    ('merge', {'is_not_fully_inferred': False})])
+        (flag, resp) = compare_graphs(graph, graph_ref, 'merge_output', check_op_attrs=True)
+        self.assertTrue(flag, resp)
diff --git a/model-optimizer/extensions/ops/normalize_test.py b/model-optimizer/extensions/ops/normalize_test.py
new file mode 100644 (file)
index 0000000..8a15fd6
--- /dev/null
@@ -0,0 +1,46 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.front.common.partial_infer.elemental import copy_shape_infer
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {'node_1': {'type': 'Identity', 'kind': 'op'},
+                    'norm': {'type': 'Normalize', 'kind': 'op'},
+                    'node_3': {'type': 'Identity', 'kind': 'op'}
+                    }
+
+
+class TestNormalize(unittest.TestCase):
+    def test_region_infer(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'norm'),
+                             ('norm', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 227, 227])},
+                             'norm': {}
+                             })
+
+        norm_node = Node(graph, 'norm')
+        copy_shape_infer(norm_node)
+        exp_shape = np.array([1, 3, 227, 227])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
diff --git a/model-optimizer/extensions/ops/priorbox_clustered_test.py b/model-optimizer/extensions/ops/priorbox_clustered_test.py
new file mode 100644 (file)
index 0000000..849ba7e
--- /dev/null
@@ -0,0 +1,77 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.ops.priorbox_clustered import PriorBoxClusteredOp
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {'node_1': {'type': 'Identity', 'value': None, 'kind': 'data'},
+                    'node_2': {'type': 'Identity', 'value': None, 'kind': 'data'},
+                    'pbc': {'type': 'PriorBoxClustered', 'value': None, 'kind': 'op'},
+                    'node_3': {'type': 'Identity', 'value': None, 'kind': 'data'}
+                    }
+
+
+class TestPriorBoxClusteredPartialInfer(unittest.TestCase):
+    def test_caffe_priorboxclustered_infer(self):
+        graph = build_graph(nodes_attributes,
+                            [
+                                ('node_1', 'pbc'),
+                                ('node_2', 'pbc'),
+                                ('pbc', 'node_3')],
+                            {
+                                'node_3': {'is_output': True, 'shape': None},
+                                'node_1': {'shape': np.array([1, 384, 19, 19])},
+                                'node_2': {'shape': np.array([1, 3, 300, 300])},
+                                'pbc': {'flip': 0, 'clip': 0, 'variance': [0.1, 0.1, 0.2, 0.2],
+                                        'step': 0, 'offset': 0.5, 'width': [1., 1., 1., 1., 1., 1., 1., 1., 1.],
+                                        'height': [2., 2., 2., 2., 2., 2., 2., 2., 2.]}
+                            })
+        graph.graph['layout'] = 'NCHW'
+
+        pbc_node = Node(graph, 'pbc')
+        PriorBoxClusteredOp.priorbox_clustered_infer(pbc_node)
+        exp_shape = np.array([1, 2, 12996])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_tf_priorboxclustered_infer(self):
+        graph = build_graph(nodes_attributes,
+                            [
+                                ('node_1', 'pbc'),
+                                ('node_2', 'pbc'),
+                                ('pbc', 'node_3')],
+                            {
+                                'node_3': {'is_output': True, 'shape': None},
+                                'node_1': {'shape': np.array([1, 19, 19, 384])},
+                                'node_2': {'shape': np.array([1, 300, 300, 3])},
+                                'pbc': {'flip': 0, 'clip': 0, 'variance': [0.1, 0.1, 0.2, 0.2],
+                                        'step': 0, 'offset': 0.5, 'width': [1., 1., 1., 1., 1., 1., 1., 1., 1.],
+                                        'height': [2., 2., 2., 2., 2., 2., 2., 2., 2.]}
+                            })
+        graph.graph['layout'] = 'NHWC'
+
+        pbc_node = Node(graph, 'pbc')
+        PriorBoxClusteredOp.priorbox_clustered_infer(pbc_node)
+        exp_shape = np.array([1, 2, 12996])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
diff --git a/model-optimizer/extensions/ops/priorbox_test.py b/model-optimizer/extensions/ops/priorbox_test.py
new file mode 100644 (file)
index 0000000..fbb42a4
--- /dev/null
@@ -0,0 +1,122 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.ops.priorbox import PriorBoxOp
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {'node_1': {'type': 'Identity', 'value': None, 'kind': 'data'},
+                    'pb': {'type': 'PriorBox', 'value': None, 'kind': 'op'},
+                    'node_3': {'type': 'Identity', 'value': None, 'kind': 'data'}
+                   }
+
+
+class TestPriorBoxPartialInfer(unittest.TestCase):
+    def test_caffe_priorbox_infer(self):
+        graph = build_graph(nodes_attributes,
+                            [
+                                ('node_1', 'pb'),
+                                ('pb', 'node_3')],
+                            {
+                                'node_3': {'is_output': True, 'shape': None},
+                                'node_1': {'shape': np.array([1, 384, 19, 19])},
+                                'pb': {
+                                    'aspect_ratio': np.array([1]),
+                                    'flip': 0,
+                                    'min_size': np.array([1]),
+                                    'max_size': np.array([1])
+                                }
+                            })
+        graph.graph['layout'] = 'NCHW'
+        pb_node = Node(graph, 'pb')
+        PriorBoxOp.priorbox_infer(pb_node)
+        exp_shape = np.array([1, 2, 4*19*19*2])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_caffe_priorbox_flip_infer(self):
+        graph = build_graph(nodes_attributes,
+                            [
+                                ('node_1', 'pb'),
+                                ('pb', 'node_3')],
+                            {
+                                'node_3': {'is_output': True, 'shape': None},
+                                'node_1': {'shape': np.array([1, 384, 19, 19])},
+                                'pb': {
+                                    'aspect_ratio': np.array([1, 2, 0.5]),
+                                    'flip': 1,
+                                    'min_size': np.array([1]),
+                                    'max_size': np.array([1])
+                                }
+                            })
+        graph.graph['layout'] = 'NCHW'
+        pb_node = Node(graph, 'pb')
+        PriorBoxOp.priorbox_infer(pb_node)
+        exp_shape = np.array([1, 2, 4*19*19*4])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_tf_priorbox_infer(self):
+        graph = build_graph(nodes_attributes,
+                            [
+                                ('node_1', 'pb'),
+                                ('pb', 'node_3')],
+                            {
+                                'node_3': {'is_output': True, 'shape': None},
+                                'node_1': {'shape': np.array([1, 19, 19, 384])},
+                                'pb': {
+                                    'aspect_ratio': np.array([1]),
+                                    'flip': 0,
+                                    'min_size': np.array([1]),
+                                    'max_size': np.array([1])
+                                }
+                            })
+        graph.graph['layout'] = 'NHWC'
+        pb_node = Node(graph, 'pb')
+        PriorBoxOp.priorbox_infer(pb_node)
+        exp_shape = np.array([1, 2, 4*19*19*2])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_tf_priorbox_flip_infer(self):
+        graph = build_graph(nodes_attributes,
+                            [
+                                ('node_1', 'pb'),
+                                ('pb', 'node_3')],
+                            {
+                                'node_3': {'is_output': True, 'shape': None},
+                                'node_1': {'shape': np.array([1, 19, 19, 384])},
+                                'pb': {
+                                    'aspect_ratio': np.array([1, 2, 0.5]),
+                                    'flip': 1,
+                                    'min_size': np.array([1]),
+                                    'max_size': np.array([1])
+                                }
+                            })
+        graph.graph['layout'] = 'NHWC'
+        pb_node = Node(graph, 'pb')
+        PriorBoxOp.priorbox_infer(pb_node)
+        exp_shape = np.array([1, 2, 4*19*19*4])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
diff --git a/model-optimizer/extensions/ops/proposal_test.py b/model-optimizer/extensions/ops/proposal_test.py
new file mode 100644 (file)
index 0000000..0298468
--- /dev/null
@@ -0,0 +1,47 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.ops.proposal import ProposalOp
+from mo.front.common.extractors.utils import layout_attrs
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {'node_1': {'type': 'Identity', 'kind': 'op'},
+                    'proposal': {'type': 'proposal', 'kind': 'op'},
+                    'node_3': {'type': 'Identity', 'kind': 'op'}
+                    }
+
+
+class TestProposal(unittest.TestCase):
+    def test_proposal_infer(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'proposal'),
+                             ('proposal', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 227, 227])},
+                             'proposal': {'post_nms_topn': 2, **layout_attrs()}
+                             })
+
+        proposal_node = Node(graph, 'proposal')
+        ProposalOp.proposal_infer(proposal_node)
+        exp_shape = np.array([1 * 2, 5])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
diff --git a/model-optimizer/extensions/ops/psroipooling_test.py b/model-optimizer/extensions/ops/psroipooling_test.py
new file mode 100644 (file)
index 0000000..10cdee1
--- /dev/null
@@ -0,0 +1,84 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.ops.psroipooling import PSROIPoolingOp
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {'node_1': {'type': 'Identity', 'kind': 'op'},
+                    'node_2': {'type': 'Identity', 'kind': 'op'},
+                    'psroipool': {'type': 'PSROIPooling', 'kind': 'op'},
+                    'node_3': {'type': 'Identity', 'kind': 'op'}
+                    }
+
+
+class TestPSROIPooling(unittest.TestCase):
+    def test_psroipool_infer_nchw(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'psroipool'),
+                             ('node_2', 'psroipool'),
+                             ('psroipool', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 227, 227])},
+                             'node_2': {'shape': np.array([100, 5])},
+                             'psroipool': {'output_dim': 4, 'group_size': 15}
+                             })
+        graph.graph['layout'] = 'NCHW'
+        psroipool_node = Node(graph, 'psroipool')
+        PSROIPoolingOp.psroipooling_infer(psroipool_node)
+        exp_shape = np.array([100, 4, 15, 15])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_psroipool_infer_nhwc(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'psroipool'),
+                             ('node_2', 'psroipool'),
+                             ('psroipool', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 227, 227, 3])},
+                             'node_2': {'shape': np.array([100, 5])},
+                             'psroipool': {'output_dim': 4, 'group_size': 15}
+                             })
+        graph.graph['layout'] = 'NHWC'
+        psroipool_node = Node(graph, 'psroipool')
+        PSROIPoolingOp.psroipooling_infer(psroipool_node)
+        exp_shape = np.array([100, 15, 15, 4])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_psroipool_infer_no_shape(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'psroipool'),
+                             ('node_2', 'psroipool'),
+                             ('psroipool', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': None},
+                             'node_2': {'shape': np.array([100, 5])},
+                             'psroipool': {'output_dim': 4, 'group_size': 224}
+                             })
+        graph.graph['layout'] = 'NCHW'
+
+        psroipool_node = Node(graph, 'psroipool')
+        PSROIPoolingOp.psroipooling_infer(psroipool_node)
+        res_shape = graph.node['node_3']['shape']
+        self.assertIsNone(res_shape)
diff --git a/model-optimizer/extensions/ops/regionyolo_test.py b/model-optimizer/extensions/ops/regionyolo_test.py
new file mode 100644 (file)
index 0000000..715163a
--- /dev/null
@@ -0,0 +1,133 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.ops.regionyolo import RegionYoloOp
+from mo.front.common.extractors.utils import layout_attrs
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {'node_1': {'type': 'Identity', 'kind': 'op'},
+                    'region': {'type': 'RegionYolo', 'kind': 'op'},
+                    'node_3': {'type': 'Identity', 'kind': 'op'}
+                    }
+
+
+class TestRegionYOLOCaffe(unittest.TestCase):
+    def test_region_infer(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'region'),
+                             ('region', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 227, 227])},
+                             'region': {'axis': 1, 'end_axis': -1, 'do_softmax': 1, **layout_attrs()}
+                             })
+        graph.graph['layout'] = 'NCHW'
+        reorg_node = Node(graph, 'region')
+        RegionYoloOp.regionyolo_infer(reorg_node)
+        exp_shape = np.array([1, 3 * 227 * 227])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_region_infer_flatten(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'region'),
+                             ('region', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 227, 227])},
+                             'region': {'end_axis': 1, 'axis': 0, 'do_softmax': 1, **layout_attrs()}
+                             })
+        graph.graph['layout'] = 'NCHW'
+        reorg_node = Node(graph, 'region')
+        RegionYoloOp.regionyolo_infer(reorg_node)
+        exp_shape = np.array([1 * 3, 227, 227])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_region_infer_flatten_again(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'region'),
+                             ('region', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 227, 227])},
+                             'region': {'end_axis': 2, 'axis': 0, 'do_softmax': 1, **layout_attrs()}
+                             })
+        graph.graph['layout'] = 'NCHW'
+        reorg_node = Node(graph, 'region')
+        RegionYoloOp.regionyolo_infer(reorg_node)
+        exp_shape = np.array([1 * 3 * 227, 227])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_region_infer_do_softmax(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'region'),
+                             ('region', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 227, 227])},
+                             'region': {'do_softmax': 0, 'end_axis': -1, 'axis': 1, 'classes': 80, 'coords': 4,
+                                        'mask': np.array([6, 7, 8]), **layout_attrs()}
+                             })
+
+        graph.graph['layout'] = 'NCHW'
+        reorg_node = Node(graph, 'region')
+        RegionYoloOp.regionyolo_infer(reorg_node)
+        exp_shape = np.array([1, (80 + 4 + 1) * 3, 227, 227])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+
+class TestRegionYOLOTF(unittest.TestCase):
+    def test_region_infer(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'region'),
+                             ('region', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 227, 227, 3])},
+                             'region': {'axis': 1, 'end_axis': -1, 'do_softmax': 1, **layout_attrs()}
+                             })
+        graph.graph['layout'] = 'NHWC'
+        reorg_node = Node(graph, 'region')
+        RegionYoloOp.regionyolo_infer(reorg_node)
+        exp_shape = np.array([1, 3 * 227 * 227])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_region_infer_do_softmax(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'region'),
+                             ('region', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 227, 227, 3])},
+                             'region': {'do_softmax': 0, 'end_axis': -1, 'axis': 1, 'classes': 80, 'coords': 4,
+                                        'mask': np.array([6, 7, 8]), **layout_attrs()}
+                             })
+
+        graph.graph['layout'] = 'NHWC'
+        reorg_node = Node(graph, 'region')
+        RegionYoloOp.regionyolo_infer(reorg_node)
+        exp_shape = np.array([1, 227, 227, (80 + 4 + 1) * 3])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
diff --git a/model-optimizer/extensions/ops/reorgyolo_test.py b/model-optimizer/extensions/ops/reorgyolo_test.py
new file mode 100644 (file)
index 0000000..7021fd5
--- /dev/null
@@ -0,0 +1,57 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.ops.reorgyolo import ReorgYoloOp
+from mo.front.common.extractors.utils import layout_attrs
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {'node_1': {'type': 'Identity', 'kind': 'op'},
+                    'reorg': {'type': 'ReorgYolo', 'kind': 'op'},
+                    'node_3': {'type': 'Identity', 'kind': 'op'}
+                    }
+
+
+def calculate_reorgyolo_output(input, stride):
+    output = np.full_like(input, -1, dtype=np.int64)
+    output[0] = input[0]
+    output[1] = input[1] * stride ** 2
+    output[2] = np.round(input[2] / stride)
+    output[3] = np.round(input[3] / stride)
+    return output
+
+
+class TestReorgYOLO(unittest.TestCase):
+    def test_reorgyolo_infer(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'reorg'),
+                             ('reorg', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 227, 227])},
+                             'reorg': {'stride': 2,
+                                       **layout_attrs()}
+                             })
+
+        reorg_node = Node(graph, 'reorg')
+        ReorgYoloOp.reorgyolo_infer(reorg_node)
+        exp_shape = calculate_reorgyolo_output(np.array([1, 3, 227, 227]), 2)
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
diff --git a/model-optimizer/extensions/ops/resample_test.py b/model-optimizer/extensions/ops/resample_test.py
new file mode 100644 (file)
index 0000000..bf4c4f0
--- /dev/null
@@ -0,0 +1,94 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.ops.resample import ResampleOp
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {'node_1': {'type': 'Identity', 'kind': 'op'},
+                    'resample': {'type': 'Resample', 'kind': 'op'},
+                    'node_3': {'type': 'Identity', 'kind': 'op'}
+                    }
+
+
+class TestResampleOp(unittest.TestCase):
+    def test_tf_resample_infer(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'resample'),
+                             ('resample', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 227, 227])},
+                             'resample': {'antialias': 1,
+                                          'height': 384,
+                                          'width': 512,
+                                          'resample_type': 'LINEAR',
+                                          'factor': 1.0}
+                             })
+
+        graph.graph['layout'] = 'NCHW'
+        resample_node = Node(graph, 'resample')
+        ResampleOp.resample_infer(resample_node)
+        exp_shape = np.array([1, 3, 384, 512])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_caffe_factor_infer(self):
+        factor = 3.0
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'resample'),
+                             ('resample', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 224, 227])},
+                             'resample': {'antialias': 1,
+                                          'resample_type': 'LINEAR',
+                                          'factor': factor}
+                             })
+        graph.graph['layout'] = 'NCHW'
+        resample_node = Node(graph, 'resample')
+        ResampleOp.resample_infer(resample_node)
+        exp_shape = np.array([1, 3, 224 * factor, 227 * factor])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_tf_infer(self):
+        new_width = 100
+        new_height = 125
+        new_attrs = nodes_attributes.copy()
+        new_attrs.update({'new_shape': {'value': np.array([new_height, new_width]), 'type': 'Const', 'kind': 'op'}})
+        graph = build_graph(new_attrs,
+                            [('node_1', 'resample'),
+                             ('new_shape', 'resample'),
+                             ('resample', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 224, 227, 3])},
+                             'resample': {'antialias': 1,
+                                          'resample_type': 'LINEAR',
+                                          'factor': 1.0,
+                                          'fw': 'tf'}
+                             })
+        graph.graph['layout'] = 'NHWC'
+        resample_node = Node(graph, 'resample')
+        ResampleOp.resample_infer(resample_node)
+        exp_shape = np.array([1, new_height, new_width, 3])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
diff --git a/model-optimizer/extensions/ops/select_test.py b/model-optimizer/extensions/ops/select_test.py
new file mode 100644 (file)
index 0000000..15578d3
--- /dev/null
@@ -0,0 +1,114 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+import numpy as np
+
+from extensions.ops.select import Select
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph_with_attrs, compare_graphs
+
+
+class TestSelect(unittest.TestCase):
+    nodes = [
+        ('than', {'value': np.ones((2, 2)), 'kind': 'data', 'executable': True, 'shape': np.array([2, 2])}),
+        ('else', {'value': np.zeros((2, 2)), 'kind': 'data', 'executable': True, 'shape': np.array([2, 2])}),
+        ('condition', {'value': None, 'kind': 'data', 'executable': True}),
+        ('select', {'type': 'Select', 'kind': 'op', 'op': 'Select'}),
+        ('select_output', {'value': None, 'kind': 'data', 'executable': True, 'shape': None}),
+    ]
+    edges = [
+        ('condition', 'select', {'in': 0}),
+        ('than', 'select', {'in': 1}),
+        ('else', 'select', {'in': 2}),
+        ('select', 'select_output', {'out': 0}),
+    ]
+
+    def test_select_infer_no_condition(self):
+        graph = build_graph_with_attrs(nodes_with_attrs=self.nodes, edges_with_attrs=self.edges)
+
+        # We should propagate only shapes
+        graph_ref = build_graph_with_attrs(nodes_with_attrs=self.nodes,
+                                           edges_with_attrs=self.edges,
+                                           update_nodes_attributes=[('select_output', {'shape': np.array([2, 2])})])
+
+        tested_class = Select(graph=graph, attrs={})
+
+        node = Node(graph, 'select')
+        tested_class.infer(node)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'select_output', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    def test_select_infer_condition_true(self):
+        graph = build_graph_with_attrs(nodes_with_attrs=self.nodes, edges_with_attrs=self.edges,
+                                       update_nodes_attributes=[('condition', {'value': np.array([True])})])
+
+        # We should propagate shapes and values
+        graph_ref = build_graph_with_attrs(nodes_with_attrs=self.nodes,
+                                           edges_with_attrs=self.edges,
+                                           update_nodes_attributes=[('select_output', {'shape': np.array([2, 2]),
+                                                                                       'value': np.ones((2,2))})])
+
+        tested_class = Select(graph=graph, attrs={})
+
+        node = Node(graph, 'select')
+        tested_class.infer(node)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'select_output', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    def test_select_infer_condition_false(self):
+        graph = build_graph_with_attrs(nodes_with_attrs=self.nodes, edges_with_attrs=self.edges,
+                                       update_nodes_attributes=[('condition', {'value': np.array([False])})])
+
+        # We should propagate shapes and values
+        graph_ref = build_graph_with_attrs(nodes_with_attrs=self.nodes,
+                                           edges_with_attrs=self.edges,
+                                           update_nodes_attributes=[('select_output', {'shape': np.array([2, 2]),
+                                                                                       'value': np.zeros((2, 2))})])
+
+        tested_class = Select(graph=graph, attrs={})
+
+        node = Node(graph, 'select')
+        tested_class.infer(node)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'select_output', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    def test_select_infer_assert_shapes(self):
+        graph = build_graph_with_attrs(nodes_with_attrs=self.nodes, edges_with_attrs=self.edges,
+                                       update_nodes_attributes=[('else', {'shape': np.array([3,3]), 'value':np.zeros((3,3))})])
+
+        tested_class = Select(graph=graph, attrs={})
+
+        node = Node(graph, 'select')
+        with self.assertRaisesRegex(AssertionError, "TensorFlow \'Select\' operation has 3 inputs: \'condition\',"
+                                                    " \'then\' and \'else\' tensors.\'then\' and \'else\' tensors"
+                                                    " must have the same shape by TensorFlow reference"):
+            tested_class.infer(node)
+
+    def test_select_infer_assert_condition_bool(self):
+        graph = build_graph_with_attrs(nodes_with_attrs=self.nodes, edges_with_attrs=self.edges,
+                                       update_nodes_attributes=[('condition', {'value': np.array([3])})])
+
+        tested_class = Select(graph=graph, attrs={})
+
+        node = Node(graph, 'select')
+        with self.assertRaisesRegex(AssertionError, "TensorFlow \'Select\' operation has 3 inputs: \'condition\',"
+                                                    " \'then\' and \'else\' tensors. Value of \'condition\' tensor"
+                                                    " must be boolen by TensorFlow reference"):
+            tested_class.infer(node)
\ No newline at end of file
diff --git a/model-optimizer/extensions/ops/simplernms_test.py b/model-optimizer/extensions/ops/simplernms_test.py
new file mode 100644 (file)
index 0000000..08cbf53
--- /dev/null
@@ -0,0 +1,57 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.ops.simplernms import SimplerNMSOp
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {'SimplerNMS_1': {'type': 'SimplerNMS', 'kind': 'op'},
+                    'node_1': {'type': 'Identity', 'kind': 'op'}
+                    }
+
+
+class TestSimplerNMSInfer(unittest.TestCase):
+    def test_simplernms_infer_ideal(self):
+        graph = build_graph(nodes_attributes,
+                            [('SimplerNMS_1', 'node_1')],
+                            {'node_1': {'is_output': True, 'shape': None},
+                             'SimplerNMS_1': {'feat_stride': 16, 'post_nms_topn': 150, 'scale': [1, 2, 3]}
+                             })
+
+        simplernms_node = Node(graph, 'SimplerNMS_1')
+
+        SimplerNMSOp.simplernms_infer(simplernms_node)
+        exp_shape = np.array([150, 5])
+        res_shape = graph.node['node_1']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+        self.assertEqual(simplernms_node.scale, ['1', '2', '3'])
+
+    def test_simplernms_infer_no_shape(self):
+        graph = build_graph(nodes_attributes,
+                            [('SimplerNMS_1', 'node_1')],
+                            {'node_1': {'is_output': True, 'shape': None},
+                             'SimplerNMS_1': {'feat_stride': 12, 'post_nms_topn': 150, 'scale': [1, 2, 3]}
+                             })
+
+        simplernms_node = Node(graph, 'SimplerNMS_1')
+
+        SimplerNMSOp.simplernms_infer(simplernms_node)
+        self.assertIsNone(graph.node['node_1']['shape'])
diff --git a/model-optimizer/extensions/ops/spatial_transformer_test.py b/model-optimizer/extensions/ops/spatial_transformer_test.py
new file mode 100644 (file)
index 0000000..86b7ec2
--- /dev/null
@@ -0,0 +1,71 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from extensions.ops.spatial_transformer import SpatialTransformOp
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {'node_1': {'type': 'Identity', 'kind': 'op'},
+                    'node_2': {'type': 'Identity', 'kind': 'op'},
+                    'st': {'type': 'SpatialTransform', 'kind': 'op'},
+                    'node_3': {'type': 'Identity', 'kind': 'op'}
+                    }
+
+
+class TestSpatialTransformInfer(unittest.TestCase):
+    def test_sp_transform_concat_infer(self):
+        graph = build_graph(nodes_attributes,
+                            [
+                                ('node_1', 'st'),
+                                ('node_2', 'st'),
+                                ('st', 'node_3')],
+                            {
+                                'node_3': {'is_output': True, 'shape': None},
+                                'node_1': {'shape': np.array([1, 3, 227, 227])},
+                                'node_2': {'shape': np.array([1, 3, 227, 227])},
+                                'st': {}
+                            })
+
+        st_node = Node(graph, 'st')
+        SpatialTransformOp.sp_infer(st_node)
+        exp_shape = np.array([1, 3, 227, 227])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_sp_transform_with_output_params_infer(self):
+        graph = build_graph(nodes_attributes,
+                            [
+                                ('node_1', 'st'),
+                                ('node_2', 'st'),
+                                ('st', 'node_3')],
+                            {
+                                'node_3': {'is_output': True, 'shape': None},
+                                'node_1': {'shape': np.array([1, 3, 227, 227])},
+                                'node_2': {'shape': np.array([1, 3, 227, 227])},
+                                'st': {'output_H': 200, 'output_W': 15}
+                            })
+
+        st_node = Node(graph, 'st')
+        SpatialTransformOp.sp_infer(st_node)
+        exp_shape = np.array([1, 3, 200, 15])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
diff --git a/model-optimizer/extensions/ops/switch_test.py b/model-optimizer/extensions/ops/switch_test.py
new file mode 100644 (file)
index 0000000..c5bb759
--- /dev/null
@@ -0,0 +1,234 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import Mock, call
+
+import numpy as np
+
+from extensions.ops.switch import Switch
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph_with_edge_attrs, compare_graphs, build_graph_with_attrs
+
+
+class TestSwitch(unittest.TestCase):
+    def test_switch_infer_with_condition(self):
+        nodes = [
+            ('tensor', {'value': np.zeros((3, 3)), 'kind': 'data', 'executable': True, 'shape': np.array([3, 3])}),
+            ('pred_id', {'value': True, 'kind': 'data', 'executable': True}),
+            ('switch', {'type': 'Switch', 'kind': 'op', 'op': 'Switch'}),
+            ('switch_data_0', {'value': None, 'kind': 'data', 'executable': True}),
+            ('switch_data_1', {'value': None, 'kind': 'data', 'executable': True})
+        ]
+        edges = [
+            ('tensor', 'switch', {'in': 0}),
+            ('pred_id', 'switch', {'in': 1}),
+            ('switch', 'switch_data_0', {'out': 0}),
+            ('switch', 'switch_data_1', {'out': 1})
+        ]
+        graph = build_graph_with_attrs(nodes_with_attrs=nodes, edges_with_attrs=edges)
+
+        # We should propagate shapes and values
+        graph_ref = build_graph_with_attrs(nodes_with_attrs=nodes,
+                                           edges_with_attrs=edges,
+                                           update_nodes_attributes=[('switch_data_0', {'shape': np.array([3, 3]),
+                                                                                       'value': np.zeros((3,3))}),
+                                                                    ('switch_data_1', {'shape': np.array([3, 3]),
+                                                                                       'value': np.zeros((3,3))})])
+
+        tested_class = Switch(graph=graph, attrs={})
+
+        node = Node(graph, 'switch')
+        tested_class.infer(node)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'switch_data_0', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    def test_switch_infer_no_condition(self):
+        nodes = [
+            ('tensor', {'value': None, 'kind': 'data', 'executable': True, 'shape': np.array([1, 2, 1])}),
+            ('pred_id', {'value': None, 'kind': 'data', 'executable': True}),
+            ('switch', {'type': 'Switch', 'kind': 'op', 'op': 'Switch'}),
+            ('switch_data_0', {'value': None, 'kind': 'data', 'executable': True}),
+            ('switch_data_1', {'value': None, 'kind': 'data', 'executable': True})
+        ]
+        edges = [
+            ('tensor', 'switch', {'in': 0}),
+            ('pred_id', 'switch', {'in': 1}),
+            ('switch', 'switch_data_0', {'out': 0}),
+            ('switch', 'switch_data_1', {'out': 1})
+        ]
+        graph = build_graph_with_attrs(nodes_with_attrs=nodes, edges_with_attrs=edges)
+
+        # We should propagate only shapes
+        graph_ref = build_graph_with_attrs(nodes_with_attrs=nodes,
+                                           edges_with_attrs=edges,
+                                           update_nodes_attributes=[('switch_data_0', {'shape': np.array([1, 2, 1])}),
+                                                                    ('switch_data_1', {'shape': np.array([1, 2, 1])})])
+
+        tested_class = Switch(graph=graph, attrs={})
+
+        node = Node(graph, 'switch')
+        tested_class.infer(node)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'switch_data_0', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    def test_switch_cf_infer_no_condition(self):
+        me_mock = Mock()
+        nodes = {
+            'tensor': {'value': True, 'kind': 'data', 'executable': True},
+            'pred_id': {'value': None, 'kind': 'data', 'executable': True},
+            'switch': {'type': 'Switch', 'kind': 'op', 'op': 'Switch'},
+            'switch_data_0': {'value': None, 'kind': 'data', 'executable': True},
+            'switch_data_1': {'value': None, 'kind': 'data', 'executable': True}
+        }
+        edges = [
+            ('tensor', 'switch', {'in': 0}),
+            ('pred_id', 'switch', {'in': 1}),
+            ('switch', 'switch_data_0', {'out': 0}),
+            ('switch', 'switch_data_1', {'out': 1})
+        ]
+        graph = build_graph_with_edge_attrs(nodes, edges)
+
+        tested_class = Switch(graph=graph, attrs={})
+        node = Node(graph, 'switch')
+        tested_class.control_flow_infer(node, True, me_mock)
+        # In this case we should mark all ports as executable
+        me_mock.assert_has_calls([call('switch_data_0', True), call('switch_data_1', True)], any_order=True)
+
+    def test_switch_cf_true_both_ports(self):
+        me_mock = Mock()
+        nodes = {
+            'tensor': {'value': True, 'kind': 'data', 'executable': True},
+            'pred_id': {'value': np.array(True), 'kind': 'data', 'executable': True},
+            'switch': {'type': 'Switch', 'kind': 'op', 'op': 'Switch'},
+            'switch_data_0': {'value': None, 'kind': 'data', 'executable': True},
+            'switch_data_1': {'value': None, 'kind': 'data', 'executable': True}
+        }
+        edges = [
+            ('tensor', 'switch', {'in': 0}),
+            ('pred_id', 'switch', {'in': 1}),
+            ('switch', 'switch_data_0', {'out': 0}),
+            ('switch', 'switch_data_1', {'out': 1})
+        ]
+        graph = build_graph_with_edge_attrs(nodes, edges)
+        tested_class = Switch(graph=graph, attrs={})
+        node = Node(graph, 'switch')
+        tested_class.control_flow_infer(node, True, me_mock)
+        me_mock.assert_has_calls([call('switch_data_0', False), call('switch_data_1', True)], any_order=True)
+
+    def test_switch_cf_false_both_ports(self):
+        me_mock = Mock()
+
+        nodes = {
+            'tensor': {'value': True, 'kind': 'data', 'executable': True},
+            'pred_id': {'value': np.array(False), 'kind': 'data', 'executable': True},
+            'switch': {'type': 'Switch', 'kind': 'op', 'op': 'Switch'},
+            'switch_data_0': {'value': None, 'kind': 'data', 'executable': True},
+            'switch_data_1': {'value': None, 'kind': 'data', 'executable': True}
+        }
+        edges = [
+            ('tensor', 'switch', {'in': 0}),
+            ('pred_id', 'switch', {'in': 1}),
+            ('switch', 'switch_data_0', {'out': 0}),
+            ('switch', 'switch_data_1', {'out': 1})
+        ]
+        graph = build_graph_with_edge_attrs(nodes, edges)
+        tested_class = Switch(graph=graph, attrs={})
+        node = Node(graph, 'switch')
+        tested_class.control_flow_infer(node, True, me_mock)
+        me_mock.assert_has_calls([call('switch_data_0', True), call('switch_data_1', False)], any_order=True)
+
+    def test_switch_cf_true_one_exec_port(self):
+        me_mock = Mock()
+
+        nodes = {
+            'tensor': {'value': True, 'kind': 'data', 'executable': True},
+            'pred_id': {'value': np.array(True), 'kind': 'data', 'executable': True},
+            'switch': {'type': 'Switch', 'kind': 'op', 'op': 'Switch'},
+            'switch_data_1': {'value': None, 'kind': 'data', 'executable': True}
+        }
+        edges = [
+            ('tensor', 'switch', {'in': 0}),
+            ('pred_id', 'switch', {'in': 1}),
+            ('switch', 'switch_data_1', {'out': 1})
+        ]
+        graph = build_graph_with_edge_attrs(nodes, edges)
+        tested_class = Switch(graph=graph, attrs={})
+        node = Node(graph, 'switch')
+        tested_class.control_flow_infer(node, True, me_mock)
+        me_mock.assert_has_calls([call('switch_data_1', True)], any_order=True)
+
+    def test_switch_cf_false_one_exec_port(self):
+        me_mock = Mock()
+
+        nodes = {
+            'tensor': {'value': True, 'kind': 'data', 'executable': True},
+            'pred_id': {'value': np.array(False), 'kind': 'data', 'executable': True},
+            'switch': {'type': 'Switch', 'kind': 'op', 'op': 'Switch'},
+            'switch_data_0': {'value': None, 'kind': 'data', 'executable': True},
+        }
+        edges = [
+            ('tensor', 'switch', {'in': 0}),
+            ('pred_id', 'switch', {'in': 1}),
+            ('switch', 'switch_data_0', {'out': 0}),
+        ]
+        graph = build_graph_with_edge_attrs(nodes, edges)
+        tested_class = Switch(graph=graph, attrs={})
+        node = Node(graph, 'switch')
+        tested_class.control_flow_infer(node, True, me_mock)
+        me_mock.assert_has_calls([call('switch_data_0', True)], any_order=True)
+
+    def test_switch_cf_true_no_exec(self):
+        me_mock = Mock()
+
+        nodes = {
+            'tensor': {'value': True, 'kind': 'data', 'executable': True},
+            'pred_id': {'value':  np.array(True), 'kind': 'data', 'executable': True},
+            'switch': {'type': 'Switch', 'kind': 'op', 'op': 'Switch'},
+            'switch_data_0': {'value': None, 'kind': 'data', 'executable': True}
+        }
+        edges = [
+            ('tensor', 'switch', {'in': 0}),
+            ('pred_id', 'switch', {'in': 1}),
+            ('switch', 'switch_data_0', {'out': 0}),
+        ]
+        graph = build_graph_with_edge_attrs(nodes, edges)
+        tested_class = Switch(graph=graph, attrs={})
+        node = Node(graph, 'switch')
+        tested_class.control_flow_infer(node, True, me_mock)
+        me_mock.assert_has_calls([call('switch_data_0', False)], any_order=True)
+
+    def test_switch_cf_false_no_exec(self):
+        me_mock = Mock()
+
+        nodes = {
+            'tensor': {'value': True, 'kind': 'data', 'executable': True},
+            'pred_id': {'value': np.array(False), 'kind': 'data', 'executable': True},
+            'switch': {'type': 'Switch', 'kind': 'op', 'op': 'Switch'},
+            'switch_data_1': {'value': None, 'kind': 'data', 'executable': True}
+        }
+        edges = [
+            ('tensor', 'switch', {'in': 0}),
+            ('pred_id', 'switch', {'in': 1}),
+            ('switch', 'switch_data_1', {'out': 1})
+        ]
+        graph = build_graph_with_edge_attrs(nodes, edges)
+        tested_class = Switch(graph=graph, attrs={})
+        node = Node(graph, 'switch')
+        tested_class.control_flow_infer(node, True, me_mock)
+        me_mock.assert_has_calls([call('switch_data_1', False)], any_order=True)
diff --git a/model-optimizer/mo/back/ie_ir_ver_2/emitter_test.py b/model-optimizer/mo/back/ie_ir_ver_2/emitter_test.py
new file mode 100644 (file)
index 0000000..44830dd
--- /dev/null
@@ -0,0 +1,74 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import MagicMock
+from xml.etree.ElementTree import Element, tostring
+
+import numpy as np
+
+from mo.back.ie_ir_ver_2.emitter import soft_get, xml_shape
+from mo.utils.error import Error
+
+expected_result = b'<net><dim>2</dim><dim>10</dim><dim>50</dim><dim>50</dim></net>'
+
+
+class TestEmitter(unittest.TestCase):
+    def test_xml_shape(self):
+        net = Element('net')
+        xml_shape(np.array([2, 10, 50, 50], dtype=np.int64), net)
+        self.assertEqual(tostring(net), expected_result)
+
+    def test_xml_shape_float_values(self):
+        net = Element('net')
+        xml_shape(np.array([2.0, 10.0, 50.0, 50.0], dtype=np.float32), net)
+        self.assertEqual(tostring(net), expected_result)
+
+    def test_xml_shape_non_integer_values(self):
+        net = Element('net')
+        with self.assertRaises(Error):
+            xml_shape(np.array([2.0, 10.0, 50.0, 50.5], dtype=np.float32), net)
+
+    def test_xml_shape_negative_values(self):
+        net = Element('net')
+        with self.assertRaises(Error):
+            xml_shape(np.array([2, 10, 50, -50], dtype=np.int64), net)
+
+    def test_xml_shape_zero_values(self):
+        net = Element('net')
+        with self.assertRaises(Error):
+            xml_shape(np.array([2, 0, 50, 50], dtype=np.int64), net)
+
+
+class TestSoftGet(unittest.TestCase):
+
+    def test_node(self):
+        node = MagicMock()
+        node.soft_get = lambda attr: attr
+        self.assertEqual(soft_get(node, 'string'), 'string')
+
+    def test_not_callable(self):
+        node = MagicMock()
+        node.soft_get = 'foo'
+        self.assertEqual(soft_get(node, 'string'), '<SUB-ELEMENT>')
+
+    def test_not_node_1(self):
+        node = {'soft_get': lambda attr: attr}
+        self.assertEqual(soft_get(node, 'string'), '<SUB-ELEMENT>')
+
+    def test_not_node_2(self):
+        node = 'something-else'
+        self.assertEqual(soft_get(node, 'string'), '<SUB-ELEMENT>')
diff --git a/model-optimizer/mo/front/caffe/custom_layers_mapping_test.py b/model-optimizer/mo/front/caffe/custom_layers_mapping_test.py
new file mode 100644 (file)
index 0000000..84ce9b5
--- /dev/null
@@ -0,0 +1,63 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+import unittest
+
+from google.protobuf import text_format
+
+from mo.front.caffe.custom_layers_mapping import proto_extractor
+from mo.front.caffe.proto import caffe_pb2
+
+
+class TestCustomLayerMapping(unittest.TestCase):
+    def test_extractor_custom_layer(self):
+        expected_conv_params = {
+            'num_output': 64,
+            'pad': 1,
+            'kernel_size': 3,
+            'stride': 1,
+            'bias_term': True,
+            'axis': 1,
+            'engine': 'caffe.ConvolutionParameter.DEFAULT',
+            'group': 1,
+            'force_nd_im2col': False,
+            'pad_h': 0,
+            'pad_w': 0
+        }
+        layer = """
+                name: "conv"
+                type: "Convolution"
+                bottom: "input"
+                top: "conv"
+                convolution_param {
+                    num_output: 64
+                    pad: 1
+                    kernel_size: 3
+                    stride: 1
+                }
+                """
+        mapping = {
+            'NativeType': 'Convolution',
+            'hasParam': 'true',
+            'protoParamName': 'convolution_param'
+        }
+        proto = caffe_pb2.LayerParameter()
+        text_format.Merge(layer, proto)
+        attrs = proto_extractor(proto, None, mapping, False, False)
+        for key, val in expected_conv_params.items():
+            if key == 'bias_term' or key == 'force_nd_im2col':
+                self.assertTrue(str(int(val)) == attrs[key])
+            else:
+                self.assertTrue(str(val) == attrs[key])
diff --git a/model-optimizer/mo/front/caffe/extractor_test.py b/model-optimizer/mo/front/caffe/extractor_test.py
new file mode 100644 (file)
index 0000000..b5b2925
--- /dev/null
@@ -0,0 +1,124 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import patch
+
+from mo.front.caffe.extractor import check_phase, register_caffe_python_extractor
+from mo.front.extractor import CaffePythonFrontExtractorOp
+from mo.graph.graph import Node
+from mo.utils.unittest.extractors import FakeMultiParam
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {'node_1': {'type': 'Identity', 'kind': 'op'},
+                    'node_2': {'type': 'Identity', 'kind': 'op'}}
+
+
+class TestExtractor(unittest.TestCase):
+    def test_check_phase_train_phase(self):
+        phase_param = {
+            'phase': 0
+        }
+
+        include_param = {
+            'include': [FakeMultiParam(phase_param)]
+        }
+
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'node_2')],
+                            {
+                                'node_1': {'pb': FakeMultiParam(include_param)}
+                            })
+
+        node = Node(graph, 'node_1')
+        res = check_phase(node)
+        exp_res = {'phase': 0}
+        self.assertEqual(res, exp_res)
+
+    def test_check_phase_test_phase(self):
+        phase_param = {
+            'phase': 1
+        }
+
+        include_param = {
+            'include': [FakeMultiParam(phase_param)]
+        }
+
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'node_2')],
+                            {
+                                'node_1': {'pb': FakeMultiParam(include_param)}
+                            })
+
+        node = Node(graph, 'node_1')
+        res = check_phase(node)
+        exp_res = {'phase': 1}
+        self.assertEqual(res, exp_res)
+
+    def test_check_phase_no_phase(self):
+        phase_param = {}
+
+        include_param = {
+            'include': [FakeMultiParam(phase_param)]
+        }
+
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'node_2')],
+                            {
+                                'node_1': {'pb': FakeMultiParam(include_param)}
+                            })
+
+        node = Node(graph, 'node_1')
+        res = check_phase(node)
+        exp_res = {}
+        self.assertEqual(res, exp_res)
+
+    def test_check_phase_no_include(self):
+        include_param = {}
+
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'node_2')],
+                            {
+                                'node_1': {'pb': FakeMultiParam(include_param)}
+                            })
+
+        node = Node(graph, 'node_1')
+        res = check_phase(node)
+        exp_res = {}
+        self.assertEqual(res, exp_res)
+
+    def test_check_phase_no_pb(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'node_2')],
+                            {})
+
+        node = Node(graph, 'node_1')
+        res = check_phase(node)
+        exp_res = {}
+        self.assertEqual(res, exp_res)
+
+    @patch('mo.ops.activation.Activation')
+    def test_register_caffe_python_extractor_by_name(self, op_mock):
+        op_mock.op = 'TestLayer'
+        name = 'myTestLayer'
+        register_caffe_python_extractor(op_mock, name)
+        self.assertIn(name, CaffePythonFrontExtractorOp.registered_ops)
+
+    @patch('mo.ops.activation.Activation')
+    def test_register_caffe_python_extractor_by_op(self, op_mock):
+        op_mock.op = 'TestLayer'
+        register_caffe_python_extractor(op_mock)
+        self.assertIn(op_mock.op, CaffePythonFrontExtractorOp.registered_ops)
diff --git a/model-optimizer/mo/front/caffe/extractors/batchnorm_test.py b/model-optimizer/mo/front/caffe/extractors/batchnorm_test.py
new file mode 100644 (file)
index 0000000..eeb441d
--- /dev/null
@@ -0,0 +1,129 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.front.caffe.extractors.batchnorm import batch_norm_ext
+from mo.front.common.partial_infer.elemental import copy_shape_infer
+from mo.utils.unittest.extractors import FakeParam, FakeModelLayer
+
+
+class FakeBNProtoLayer:
+    def __init__(self, eps):
+        self.batch_norm_param = FakeParam('eps', eps)
+
+
+class TestShapesParsing(unittest.TestCase):
+    def test_bn_ext_no_ml_no_pb(self):
+        self.assertRaises(AssertionError, batch_norm_ext, None, None)
+
+    def test_bn_ext_no_ml(self):
+        res = batch_norm_ext(FakeBNProtoLayer(10), None)
+        exp_res = {
+            'op': 'BatchNormalization',
+            'type': 'BatchNormalization',
+            'epsilon': 10,
+            'infer': copy_shape_infer
+        }
+        self.assertEqual(res, exp_res)
+
+    def test_bn_ext_ml_one_blob(self):
+        self.assertRaises(AssertionError, batch_norm_ext, FakeBNProtoLayer(10), FakeModelLayer([np.array([1, 2])]))
+
+    def test_bn_ext_ml_two_blobs(self):
+        mean_blob = np.array([1., 2.])
+        variance_blob = np.array([3., 4.])
+        blobs = [mean_blob, variance_blob]
+        res = batch_norm_ext(FakeBNProtoLayer(10),
+                             FakeModelLayer(blobs))
+        exp_res = {
+            'type': 'BatchNormalization',
+            'epsilon': 10,
+            'infer': copy_shape_infer,
+            'mean': mean_blob,
+            'variance': variance_blob,
+            'embedded_inputs': [
+                (1, 'mean', {
+                    'bin': 'biases'
+                }),
+                (2, 'variance', {
+                    'bin': 'weights'
+                })
+            ]
+        }
+        for i in exp_res:
+            if i in ('mean', 'variance'):
+                np.testing.assert_array_equal(res[i], exp_res[i])
+            else:
+                self.assertEqual(res[i], exp_res[i])
+
+    def test_bn_ext_ml_three_blobs(self):
+        mean_blob = np.array([1., 2.])
+        variance_blob = np.array([3., 4.])
+        scale_blob = np.array([5., ])
+        blobs = [mean_blob, variance_blob, scale_blob]
+        res = batch_norm_ext(FakeBNProtoLayer(10),
+                             FakeModelLayer(blobs))
+        exp_res = {
+            'type': 'BatchNormalization',
+            'epsilon': 10,
+            'infer': copy_shape_infer,
+            'mean': mean_blob * 0.2,
+            'variance': variance_blob * 0.2,
+            'embedded_inputs': [
+                (1, 'mean', {
+                    'bin': 'biases'
+                }),
+                (2, 'variance', {
+                    'bin': 'weights'
+                })
+            ]
+        }
+        for i in exp_res:
+            if i in ('mean', 'variance'):
+                np.testing.assert_array_equal(res[i], exp_res[i])
+            else:
+                self.assertEqual(res[i], exp_res[i])
+
+    def test_bn_ext_ml_three_blobs_zero_scale(self):
+        mean_blob = np.array([1., 2.])
+        variance_blob = np.array([3., 4.])
+        scale_blob = np.array([0., ])
+        blobs = [mean_blob, variance_blob, scale_blob]
+        res = batch_norm_ext(FakeBNProtoLayer(10),
+                             FakeModelLayer(blobs))
+        exp_res = {
+            'type': 'BatchNormalization',
+            'epsilon': 10,
+            'infer': copy_shape_infer,
+            'mean': mean_blob * 0.,
+            'variance': variance_blob * 0.,
+            'embedded_inputs': [
+                (1, 'mean', {
+                    'bin': 'biases'
+                }),
+                (2, 'variance', {
+                    'bin': 'weights'
+                })
+            ]
+        }
+        for i in exp_res:
+            if i in ('mean', 'variance'):
+                np.testing.assert_array_equal(res[i], exp_res[i])
+            else:
+                self.assertEqual(res[i], exp_res[i])
diff --git a/model-optimizer/mo/front/caffe/extractors/concat_test.py b/model-optimizer/mo/front/caffe/extractors/concat_test.py
new file mode 100644 (file)
index 0000000..117ce04
--- /dev/null
@@ -0,0 +1,37 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+from mo.front.caffe.extractors.concat import concat_ext
+from mo.front.common.partial_infer.concat import concat_infer
+from mo.utils.unittest.extractors import FakeParam
+
+
+class FakeProtoLayer:
+    def __init__(self, axis):
+        self.concat_param = FakeParam('axis', axis)
+
+
+class TestConcat(unittest.TestCase):
+    def test_concat(self):
+        res = concat_ext(FakeProtoLayer(10), None)
+        exp_res = {
+            'axis': 10,
+            'infer': concat_infer,
+            'type': 'Concat'
+        }
+        self.assertEqual(res, exp_res)
diff --git a/model-optimizer/mo/front/caffe/extractors/crop_test.py b/model-optimizer/mo/front/caffe/extractors/crop_test.py
new file mode 100644 (file)
index 0000000..9405e70
--- /dev/null
@@ -0,0 +1,66 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import patch
+
+from mo.front.caffe.extractors.crop import CropFrontExtractor
+from mo.front.common.partial_infer.crop import crop_infer
+from mo.ops.crop import Crop
+from mo.ops.op import Op
+from mo.utils.unittest.extractors import FakeMultiParam
+from mo.utils.unittest.graph import FakeNode
+
+
+class FakeCropProtoLayer:
+    def __init__(self, val):
+        self.crop_param = val
+
+
+class TestCropExt(unittest.TestCase):
+    @classmethod
+    def setUpClass(cls):
+        Op.registered_ops['Crop'] = Crop
+
+    def test_da_no_pb_no_ml(self):
+        self.assertRaises(AttributeError, CropFrontExtractor.extract, None)
+
+    @patch('mo.front.caffe.collect_attributes')
+    def test_crop_ext(self, collect_attributes_mock):
+        params = {
+            'axis': 0,
+            'offset': 0,
+        }
+        collect_attributes_mock.return_value = {
+            **params,
+            'test': 54,
+            'test2': 'test3'
+        }
+        fake_pl = FakeCropProtoLayer(FakeMultiParam(params))
+        fake_node = FakeNode(fake_pl, None)
+
+        CropFrontExtractor.extract(fake_node)
+
+        exp_res = {
+            'type': 'Crop',
+            'axis': 0,
+            'offset': 0,
+            'dim': None,  # set in infer
+            'infer': crop_infer
+        }
+
+        for key in exp_res.keys():
+            self.assertEqual(exp_res[key], fake_node[key])
diff --git a/model-optimizer/mo/front/caffe/extractors/eltwise_test.py b/model-optimizer/mo/front/caffe/extractors/eltwise_test.py
new file mode 100644 (file)
index 0000000..e077c42
--- /dev/null
@@ -0,0 +1,93 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import patch
+
+from mo.front.caffe.extractors.eltwise import eltwise_ext
+from mo.utils.unittest.extractors import FakeMultiParam
+
+
+class FakeProtoLayer:
+    def __init__(self, operation, coeff=[1]):
+        self.eltwise_param = FakeMultiParam({'operation': operation,
+                                             'coeff': coeff})
+
+
+class TestEltwise(unittest.TestCase):
+    @patch('mo.front.caffe.extractors.eltwise.eltwise_infer')
+    def test_eltwise_op_mul(self, eltwise_infer_mock):
+        eltwise_infer_mock.return_value = {}
+        res = eltwise_ext(FakeProtoLayer(0), None)
+        exp_res = {
+            'op': 'Mul',
+            'operation': 'mul',
+            'infer': None
+        }
+
+        for i in exp_res.keys():
+            if i == 'infer':
+                res['infer'](None)
+                args = eltwise_infer_mock.call_args
+                actual_lambda = args[0][1]
+                self.assertTrue(eltwise_infer_mock.called)
+                self.assertEqual(actual_lambda(3, 5), 3 * 5)
+            else:
+                self.assertEqual(res[i], exp_res[i])
+
+    @patch('mo.front.caffe.extractors.eltwise.eltwise_infer')
+    def test_eltwise_op_add(self, eltwise_infer_mock):
+        eltwise_infer_mock.return_value = {}
+        res = eltwise_ext(FakeProtoLayer(1, coeff=[0.39]), None)
+        exp_res = {
+            'op': 'Add',
+            'operation': 'sum',
+            'coeff': '0.39',
+            'infer': None
+        }
+
+        for i in exp_res.keys():
+            if i == 'infer':
+                res['infer'](None)
+                args = eltwise_infer_mock.call_args
+                actual_lambda = args[0][1]
+                self.assertTrue(eltwise_infer_mock.called)
+                self.assertEqual(actual_lambda(3, 5), 3 + 5)
+            else:
+                self.assertEqual(res[i], exp_res[i])
+
+    @patch('mo.front.caffe.extractors.eltwise.eltwise_infer')
+    def test_eltwise_op_max(self, eltwise_infer_mock):
+        eltwise_infer_mock.return_value = {}
+        res = eltwise_ext(FakeProtoLayer(2), None)
+        exp_res = {
+            'op': 'Max',
+            'operation': 'max',
+            'infer': None
+        }
+
+        for i in exp_res.keys():
+            if i == 'infer':
+                res['infer'](None)
+                args = eltwise_infer_mock.call_args
+                actual_lambda = args[0][1]
+                self.assertTrue(eltwise_infer_mock.called)
+                self.assertEqual(actual_lambda(3, 5), 5)
+            else:
+                self.assertEqual(res[i], exp_res[i])
+
+    def test_eltwise_op_exeption(self):
+        self.assertRaises(Exception, eltwise_ext, FakeProtoLayer(4), None)
diff --git a/model-optimizer/mo/front/caffe/extractors/elu_test.py b/model-optimizer/mo/front/caffe/extractors/elu_test.py
new file mode 100644 (file)
index 0000000..c482888
--- /dev/null
@@ -0,0 +1,53 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+from unittest.mock import patch
+
+from mo.front.caffe.extractors.elu import ELUFrontExtractor
+from mo.utils.unittest.extractors import FakeMultiParam
+from mo.utils.unittest.graph import FakeNode
+
+
+class FakeProtoLayer:
+    def __init__(self, val):
+        self.elu_param = val
+
+
+class TestElu(unittest.TestCase):
+    @patch('mo.front.caffe.extractors.elu.collect_attributes')
+    def test_elu_ext(self, collect_attrs_mock):
+        params = {
+            'alpha': 4
+        }
+        collect_attrs_mock.return_value = {
+            **params,
+            'test': 54,
+            'test2': 'test3'
+        }
+
+        fn = FakeNode(FakeProtoLayer(FakeMultiParam(params)), None)
+        ELUFrontExtractor.extract(fn)
+
+        exp_res = {
+            'type': 'Activation',
+            'operation': 'elu',
+            'alpha': 4
+        }
+
+        for i in exp_res:
+            self.assertEqual(fn[i], exp_res[i])
diff --git a/model-optimizer/mo/front/caffe/extractors/inner_product_test.py b/model-optimizer/mo/front/caffe/extractors/inner_product_test.py
new file mode 100644 (file)
index 0000000..44501c3
--- /dev/null
@@ -0,0 +1,61 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.front.caffe.extractors.inner_product import inner_product_ext
+from mo.front.common.partial_infer.inner_product import caffe_inner_product
+from mo.utils.unittest.extractors import FakeMultiParam, FakeModelLayer
+
+
+class FakeProtoLayer:
+    def __init__(self, val):
+        self.inner_product_param = val
+
+
+class TestInnerProduct(unittest.TestCase):
+    def test_inner_product_ext(self):
+        params = {
+            'num_output': 10,
+            'bias_term': True
+        }
+        mean_blob = np.array([1., 2.])
+        variance_blob = np.array([3., 4.])
+        blobs = [mean_blob, variance_blob]
+        res = inner_product_ext(FakeProtoLayer(FakeMultiParam(params)),
+                                FakeModelLayer(blobs))
+        exp_res = {
+            'type': 'FullyConnected',
+            'out-size': 10,
+            'infer': caffe_inner_product,
+            'weights': mean_blob,
+            'biases': variance_blob,
+            'embedded_inputs': [
+                (1, 'weights', {
+                    'bin': 'weights'
+                }),
+                (2, 'biases', {
+                    'bin': 'biases'
+                })
+            ]
+        }
+        for i in exp_res:
+            if i in ('weights', 'biases'):
+                np.testing.assert_array_equal(res[i], exp_res[i])
+            else:
+                self.assertEqual(res[i], exp_res[i])
diff --git a/model-optimizer/mo/front/caffe/extractors/input_test.py b/model-optimizer/mo/front/caffe/extractors/input_test.py
new file mode 100644 (file)
index 0000000..37d1fc1
--- /dev/null
@@ -0,0 +1,57 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import patch
+
+from mo.front.caffe.extractors.input import input_ext, global_input_ext
+from mo.utils.unittest.extractors import FakeParam
+
+
+class FakeProtoLayer:
+    def __init__(self, shape):
+        self.input_param = FakeParam('shape', shape)
+
+
+class TestInput(unittest.TestCase):
+    @patch('mo.front.caffe.extractors.input.single_output_infer')
+    def test_input_ext(self, single_output_infer_mock):
+        single_output_infer_mock.return_value = {}
+        shape = [FakeParam('dim', 1)]
+        res = input_ext(FakeProtoLayer(shape), None)
+        exp_res = {
+            'op': 'Placeholder',
+            'shape': [1],
+            'infer': None
+        }
+        for i in exp_res.keys():
+            if i == 'infer':
+                res['infer'](None)
+                self.assertTrue(single_output_infer_mock.called)
+
+    @patch('mo.front.caffe.extractors.input.single_output_infer')
+    def test_global_input_ext(self, single_output_infer_mock):
+        single_output_infer_mock.return_value = {}
+        res = global_input_ext(None, None)
+        exp_res = {
+            'op': 'Placeholder',
+            'type': 'input',
+            'infer': None
+        }
+        for i in exp_res.keys():
+            if i == 'infer':
+                res['infer'](None)
+                self.assertTrue(single_output_infer_mock.called)
diff --git a/model-optimizer/mo/front/caffe/extractors/lrn_test.py b/model-optimizer/mo/front/caffe/extractors/lrn_test.py
new file mode 100644 (file)
index 0000000..e5c7f8b
--- /dev/null
@@ -0,0 +1,66 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+from mo.front.caffe.extractors.lrn import lrn_ext
+from mo.front.common.partial_infer.elemental import copy_shape_infer
+from mo.utils.unittest.extractors import FakeMultiParam
+
+
+class FakeProtoLayer:
+    def __init__(self, val):
+        self.lrn_param = val
+
+
+class TestLRN(unittest.TestCase):
+    def test_lrn_ext(self):
+        params = {
+            'alpha': 10,
+            'beta': 15,
+            'local_size': 20,
+            'norm_region': 0
+        }
+        res = lrn_ext(FakeProtoLayer(FakeMultiParam(params)), None)
+        exp_res = {
+            'op': 'LRN',
+            'type': 'Norm',
+            'alpha': 10,
+            'beta': 15,
+            'local_size': 20,
+            'region': 'across',
+            'infer': copy_shape_infer
+        }
+        self.assertEqual(res, exp_res)
+
+    def test_lrn_ext_norm_reg(self):
+        params = {
+            'alpha': 10,
+            'beta': 15,
+            'local_size': 20,
+            'norm_region': 1
+        }
+        res = lrn_ext(FakeProtoLayer(FakeMultiParam(params)), None)
+        exp_res = {
+            'op': 'LRN',
+            'type': 'Norm',
+            'alpha': 10,
+            'beta': 15,
+            'local_size': 20,
+            'region': 'same',
+            'infer': copy_shape_infer
+        }
+        self.assertEqual(res, exp_res)
diff --git a/model-optimizer/mo/front/caffe/extractors/permute_test.py b/model-optimizer/mo/front/caffe/extractors/permute_test.py
new file mode 100644 (file)
index 0000000..232e520
--- /dev/null
@@ -0,0 +1,48 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.front.caffe.extractors.permute import permute_ext
+from mo.front.common.partial_infer.transpose import transpose_infer
+from mo.utils.unittest.extractors import FakeMultiParam
+
+
+class FakePermuteProtoLayer:
+    def __init__(self, val):
+        self.permute_param = val
+
+
+class TestPermuteParsing(unittest.TestCase):
+    def test_permute_check_attrs(self):
+        attrs = {
+            'order': np.array([0, 1, 3, 2])
+        }
+
+        res = permute_ext(FakePermuteProtoLayer(FakeMultiParam(attrs)), None)
+        exp_attrs = {
+            'type': 'Permute',
+            'op': 'Permute',
+            'order': np.array([0, 1, 3, 2]),
+            'infer': transpose_infer
+        }
+        for key in exp_attrs.keys():
+            if key == 'order':
+                np.testing.assert_equal(res[key], exp_attrs[key])
+            else:
+                self.assertEqual(res[key], exp_attrs[key])
diff --git a/model-optimizer/mo/front/caffe/extractors/power_test.py b/model-optimizer/mo/front/caffe/extractors/power_test.py
new file mode 100644 (file)
index 0000000..5281bbb
--- /dev/null
@@ -0,0 +1,46 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+from mo.front.caffe.extractors.power import power_ext
+from mo.front.common.partial_infer.elemental import copy_shape_infer
+from mo.utils.unittest.extractors import FakeMultiParam
+
+
+class FakeProtoLayer:
+    def __init__(self, val):
+        self.power_param = val
+
+
+class TestPowerExt(unittest.TestCase):
+    def test_power_ext(self):
+        params = {
+            'power': 1,
+            'scale': 2,
+            'shift': 3
+        }
+        res = power_ext(FakeProtoLayer(FakeMultiParam(params)), None)
+        exp_res = {
+            'power': 1,
+            'scale': 2,
+            'shift': 3,
+            'infer': copy_shape_infer,
+            'op': "Power",
+            'type': 'Power',
+            'output_spatial_shape': None,
+        }
+        self.assertEqual(res, exp_res)
diff --git a/model-optimizer/mo/front/caffe/extractors/relu_test.py b/model-optimizer/mo/front/caffe/extractors/relu_test.py
new file mode 100644 (file)
index 0000000..b807166
--- /dev/null
@@ -0,0 +1,43 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.front.caffe.extractors.relu import relu_ext
+from mo.front.common.extractors.utils import layout_attrs
+from mo.front.common.partial_infer.elemental import copy_shape_infer
+from mo.utils.unittest.extractors import FakeParam, FakeMultiParam
+
+
+class TestReLU(unittest.TestCase):
+    def test_relu_ext(self):
+        params = {
+            'negative_slope': 0.1,
+        }
+
+        res = relu_ext(FakeParam('relu_param', FakeMultiParam(params)), None)
+        exp_res = {
+            'negative_slope': 0.1,
+            'infer': copy_shape_infer,
+        }
+        exp_res.update(layout_attrs())
+        for i in exp_res.keys():
+            if i == 'negative_slope':
+                self.assertEqual(res[i], exp_res[i])
+            else:
+                np.testing.assert_array_equal(res[i], exp_res[i])
diff --git a/model-optimizer/mo/front/caffe/extractors/reshape_test.py b/model-optimizer/mo/front/caffe/extractors/reshape_test.py
new file mode 100644 (file)
index 0000000..4551eb7
--- /dev/null
@@ -0,0 +1,56 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.front.caffe.extractors.reshape import reshape_ext
+from mo.utils.unittest.extractors import FakeMultiParam
+
+
+class FakeReshapeProtoLayer:
+    def __init__(self, val):
+        self.reshape_param = val
+
+
+class Shape:
+    def __init__(self, val):
+        self.dim = val
+
+
+class TestReshapeParsing(unittest.TestCase):
+    def test_reshape_check_attrs(self):
+        attrs = {
+            'axis': 0,
+            'num_axes': -1,
+            'shape': Shape(np.array([0, -1])),
+        }
+
+        res = reshape_ext(FakeReshapeProtoLayer(FakeMultiParam(attrs)), None)
+        exp_attrs = {
+            'op': 'Reshape',
+            'type': 'Reshape',
+            'axis': 0,
+            'num_axes': -1,
+            'dim': [0, -1]
+        }
+
+        for key in exp_attrs.keys():
+            if key == 'dim':
+                np.testing.assert_equal(res[key], exp_attrs[key])
+            else:
+                self.assertEqual(res[key], exp_attrs[key])
diff --git a/model-optimizer/mo/front/caffe/extractors/scale_test.py b/model-optimizer/mo/front/caffe/extractors/scale_test.py
new file mode 100644 (file)
index 0000000..9258295
--- /dev/null
@@ -0,0 +1,144 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.front.caffe.extractors.scale import scale_ext
+from mo.front.common.partial_infer.elemental import copy_shape_infer
+from mo.utils.unittest.extractors import FakeMultiParam, FakeModelLayer
+
+
+class FakeProtoLayer:
+    def __init__(self, val, bottom2=False):
+        self.scale_param = val
+        if bottom2:
+            self.bottom = {"bottom1", "bottom2"}
+        else:
+            self.bottom = {"bottom1"}
+
+
+class TestScale(unittest.TestCase):
+    def test_scale_ext(self):
+        mean_blob = np.array([1., 2.])
+        variance_blob = np.array([3., 4.])
+        blobs = [mean_blob, variance_blob]
+        params = {
+            'type': 'Scale',
+            'axis': 0,
+            'bias_term': True
+        }
+
+        res = scale_ext(FakeProtoLayer(FakeMultiParam(params)), FakeModelLayer(blobs))
+        exp_res = {
+            'op': 'ScaleShift',
+            'type': 'ScaleShift',
+            'axis': 0,
+            'infer': copy_shape_infer,
+            'weights': mean_blob,
+            'biases': variance_blob,
+            'embedded_inputs': [
+                (1, 'weights', {
+                    'bin': 'weights'
+                }),
+                (2, 'biases', {
+                    'bin': 'biases'
+                })
+            ]
+        }
+        for i in exp_res:
+            if i in ('weights', 'biases'):
+                np.testing.assert_array_equal(res[i], exp_res[i])
+            else:
+                self.assertEqual(res[i], exp_res[i])
+
+    def test_scale_2inputs_ext(self):
+        params = {
+            'type': 'Scale',
+            'axis': 0,
+            'bias_term': False
+        }
+
+        res = scale_ext(FakeProtoLayer(FakeMultiParam(params), True), None)
+        exp_res = {
+            'op': 'ScaleShift',
+            'type': 'ScaleShift',
+            'axis': 0,
+            'infer': copy_shape_infer,
+        }
+        for i in exp_res:
+            self.assertEqual(res[i], exp_res[i])
+
+    def test_scale_2inputs_bias_ext(self):
+        variance_blob = np.array([3., 4.])
+        blobs = [variance_blob]
+
+        params = {
+            'type': 'Scale',
+            'axis': 0,
+            'bias_term': True
+        }
+
+        res = scale_ext(FakeProtoLayer(FakeMultiParam(params), True), FakeModelLayer(blobs))
+        exp_res = {
+            'op': 'ScaleShift',
+            'type': 'ScaleShift',
+            'axis': 0,
+            'infer': copy_shape_infer,
+            'biases': variance_blob,
+            'embedded_inputs': [
+                (1, 'biases', {
+                    'bin': 'biases'
+                })]
+        }
+        for i in exp_res:
+            if i in ('biases'):
+                np.testing.assert_array_equal(res[i], exp_res[i])
+            else:
+                self.assertEqual(res[i], exp_res[i])
+
+    def test_create_default_weights(self):
+        """
+        There are situations when scale layer doesn't have weights and biases. This test checks that if they are not
+        available in the caffemodel file then default values [1] and [0] are generated.
+        """
+        scale_blob = np.array([1])
+        bias_blob = np.array([0])
+        params = {
+            'type': 'Scale',
+            'axis': 0,
+            'bias_term': True
+        }
+
+        res = scale_ext(FakeProtoLayer(FakeMultiParam(params)), None)
+        exp_res = {
+            'op': 'ScaleShift',
+            'type': 'ScaleShift',
+            'axis': 0,
+            'infer': copy_shape_infer,
+            'weights': scale_blob,
+            'biases': bias_blob,
+            'embedded_inputs': [
+                (1, 'weights', {
+                    'bin': 'weights'
+                }),
+                (2, 'biases', {
+                    'bin': 'biases'
+                })
+            ]
+        }
+        self.assertDictEqual(exp_res, res)
diff --git a/model-optimizer/mo/front/caffe/extractors/slice_test.py b/model-optimizer/mo/front/caffe/extractors/slice_test.py
new file mode 100644 (file)
index 0000000..22b43b8
--- /dev/null
@@ -0,0 +1,102 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import patch
+
+import numpy as np
+
+from mo.front.caffe.extractors.slice import slice_ext
+from mo.front.common.partial_infer.slice import caffe_slice_infer
+from mo.utils.unittest.extractors import FakeMultiParam
+
+
+class FakeProtoLayer:
+    def __init__(self, val):
+        self.slice_param = val
+
+
+class TestSlice(unittest.TestCase):
+    @patch('mo.front.caffe.extractors.slice.merge_attrs')
+    def test_slice_ext(self, merge_attrs_mock):
+        params = {
+            'type': 'Slice',
+            'axis': 2,
+            'slice_point': np.array([256]),
+            'slice_dim': 3,
+            'infer': caffe_slice_infer
+        }
+        merge_attrs_mock.return_value = {
+            **params,
+            'test': 54,
+            'test2': 'test3'
+        }
+        res = slice_ext(FakeProtoLayer(FakeMultiParam(params)), None)
+        exp_res = {
+            'type': 'Slice',
+            'axis': 2,
+            'slice_point': np.array([256]),
+            'infer': caffe_slice_infer
+        }
+        for i in exp_res:
+            self.assertEqual(res[i], exp_res[i])
+
+    @patch('mo.front.caffe.extractors.slice.merge_attrs')
+    def test_slice_ext_slice_dim(self, merge_attrs_mock):
+        params = {
+            'type': 'Slice',
+            'axis': 1,
+            'slice_point': np.array([256]),
+            'slice_dim': 3,
+            'infer': caffe_slice_infer
+        }
+        merge_attrs_mock.return_value = {
+            **params,
+            'axis': 3
+        }
+        res = slice_ext(FakeProtoLayer(FakeMultiParam(params)), None)
+        exp_res = {
+            'type': 'Slice',
+            'axis': 3,
+            'slice_point': np.array([256]),
+            'infer': caffe_slice_infer
+        }
+        for i in exp_res:
+            self.assertEqual(res[i], exp_res[i])
+
+    @patch('mo.front.caffe.extractors.slice.merge_attrs')
+    def test_slice_ext_no_params(self, merge_attrs_mock):
+        params = {
+            'type': 'Slice',
+            'axis': 1,
+            'slice_dim': 1,
+            'slice_point': [],
+            'infer': caffe_slice_infer
+        }
+        merge_attrs_mock.return_value = {
+            'type': 'Slice',
+            'axis': 1,
+            'infer': caffe_slice_infer
+        }
+        res = slice_ext(FakeProtoLayer(FakeMultiParam(params)), None)
+        exp_res = {
+            'type': 'Slice',
+            'axis': 1,
+            'slice_point': [],
+            'infer': caffe_slice_infer
+        }
+        for i in exp_res:
+            self.assertEqual(res[i], exp_res[i])
diff --git a/model-optimizer/mo/front/caffe/extractors/utils_test.py b/model-optimizer/mo/front/caffe/extractors/utils_test.py
new file mode 100644 (file)
index 0000000..6983a0f
--- /dev/null
@@ -0,0 +1,93 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import patch, call
+
+import numpy as np
+
+from mo.front.caffe.extractors.utils import weights_biases, embed_input, get_canonical_axis_index
+from mo.utils.unittest.extractors import FakeModelLayer
+
+
+class TestWeightsBiases(unittest.TestCase):
+    def test_weights_biases_no_layer_no_bias(self):
+        res = weights_biases(False, None)
+        self.assertEqual(res, {})
+
+    @patch('mo.front.caffe.extractors.utils.embed_input')
+    def test_weights_biases_layer_no_bias(self, embed_input_mock):
+        weights_biases(False, FakeModelLayer([[1, 2], ]))
+        calls = [call({}, 1, 'weights', [1, 2])]
+        embed_input_mock.assert_has_calls(calls)
+
+    @patch('mo.front.caffe.extractors.utils.embed_input')
+    def test_weights_biases_layer_bias(self, embed_input_mock):
+        weights_biases(True, FakeModelLayer([[1, 2], [3, 4]]))
+        calls = [call({}, 1, 'weights', [1, 2]), call({}, 2, 'biases', [3, 4])]
+        embed_input_mock.assert_has_calls(calls)
+
+
+class TestEmbedInput(unittest.TestCase):
+    def test_embed_input_no_bin_name_no_bias(self):
+        attrs = {}
+        blob = np.array([1, 2])
+        name = 'weights'
+        embed_input(attrs, 1, name, blob, None)
+        exp_res = {
+            'weights': blob,
+            'embedded_inputs': [
+                (1, name, {'bin': name})
+            ]
+        }
+        for key in exp_res.keys():
+            if key == name:
+                np.testing.assert_equal(attrs[key], exp_res[key])
+            else:
+                self.assertEqual(attrs[key], exp_res[key])
+
+    def test_embed_input_w_bin_name(self):
+        attrs = {}
+        blob = np.array([1, 2])
+        name = 'weights'
+        embed_input(attrs, 1, name, blob, 'special_name')
+        exp_res = {
+            'weights': blob,
+            'embedded_inputs': [
+                (1, name, {'bin': 'special_name'})
+            ]
+        }
+        for key in exp_res.keys():
+            if key == name:
+                np.testing.assert_equal(attrs[key], exp_res[key])
+            else:
+                self.assertEqual(attrs[key], exp_res[key])
+
+
+class TestCanonicalAxisIndex(unittest.TestCase):
+    def test_negative_index(self):
+        shape = [1, 2, 3, 4]
+        inds = [-4, -3, -2, -1]
+        expected_inds = [0, 1, 2, 3]
+        for i in range(len(inds)):
+            assert get_canonical_axis_index(shape, inds[i]) == expected_inds[i]
+
+    def test_posirive_index(self):
+        shape = [1, 2, 3, 4]
+        inds = [0, 1, 2, 3]
+        expected_inds = [0, 1, 2, 3]
+        for i in range(len(inds)):
+            assert get_canonical_axis_index(shape, inds[i]) == expected_inds[i]
diff --git a/model-optimizer/mo/front/caffe/loader_test.py b/model-optimizer/mo/front/caffe/loader_test.py
new file mode 100644 (file)
index 0000000..b61f6d3
--- /dev/null
@@ -0,0 +1,160 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+from google.protobuf import text_format
+
+from mo.front.caffe.loader import caffe_pb_to_nx
+from mo.front.caffe.proto import caffe_pb2
+from mo.utils.error import Error
+
+proto_str_one_input = 'name: "network" ' \
+                      'layer { ' \
+                      'name: "Input0" ' \
+                      'type: "Input" ' \
+                      'top: "Input0" ' \
+                      'input_param { ' \
+                      'shape: { ' \
+                      'dim: 1 ' \
+                      'dim: 3 ' \
+                      'dim: 224 ' \
+                      'dim: 224 ' \
+                      '} ' \
+                      '} ' \
+                      '}'
+
+proto_str_old_styled_multi_input = 'name: "network" ' \
+                                   'input: "Input0" ' \
+                                   'input_dim: 1 ' \
+                                   'input_dim: 3 ' \
+                                   'input_dim: 224 ' \
+                                   'input_dim: 224 ' \
+                                   'input: "data"' \
+                                   'input_dim: 1 ' \
+                                   'input_dim: 3 '
+
+proto_str_input = 'name: "network" ' \
+                  'input: "data" ' \
+                  'input_shape ' \
+                  '{ ' \
+                  'dim: 1 ' \
+                  'dim: 3 ' \
+                  'dim: 224 ' \
+                  'dim: 224 ' \
+                  '}'
+
+proto_str_multi_input = 'name: "network" ' \
+                        'input: "data" ' \
+                        'input_shape ' \
+                        '{ ' \
+                        'dim: 1 ' \
+                        'dim: 3 ' \
+                        'dim: 224 ' \
+                        'dim: 224 ' \
+                        '} ' \
+                        'input: "data1"' \
+                        'input_shape ' \
+                        '{ ' \
+                        'dim: 1 ' \
+                        'dim: 3 ' \
+                        '}'
+
+proto_str_old_styled_input = 'name: "network" ' \
+                             'input: "data" ' \
+                             'input_dim: 1 ' \
+                             'input_dim: 3 ' \
+                             'input_dim: 224 ' \
+                             'input_dim: 224 '
+
+layer_proto_str = 'layer { ' \
+                  'name: "conv1" ' \
+                  'type: "Convolution" ' \
+                  'bottom: "data" ' \
+                  'top: "conv1" ' \
+                  '}'
+
+proto_same_name_layers = 'layer { ' \
+                         'name: "conv1" ' \
+                         'type: "Convolution" ' \
+                         'bottom: "data" ' \
+                         'top: "conv1" ' \
+                         '}' \
+                         'layer { ' \
+                         'name: "conv1" ' \
+                         'type: "Convolution" ' \
+                         'bottom: "data1" ' \
+                         'top: "conv1_2" ' \
+                         '}'
+
+class TestLoader(unittest.TestCase):
+    def test_caffe_pb_to_nx_one_input(self):
+        proto = caffe_pb2.NetParameter()
+        text_format.Merge(proto_str_one_input, proto)
+        graph, input_shapes = caffe_pb_to_nx(proto, None)
+        expected_input_shapes = {
+            'Input0': np.array([1, 3, 224, 224])
+        }
+
+        for i in expected_input_shapes:
+            np.testing.assert_array_equal(input_shapes[i], expected_input_shapes[i])
+
+    def test_caffe_pb_to_nx_old_styled_multi_input(self):
+        proto = caffe_pb2.NetParameter()
+        text_format.Merge(proto_str_old_styled_multi_input + layer_proto_str, proto)
+        self.assertRaises(Error, caffe_pb_to_nx, proto, None)
+
+    def test_caffe_pb_to_nx_old_styled_input(self):
+        proto = caffe_pb2.NetParameter()
+        text_format.Merge(proto_str_old_styled_input + layer_proto_str, proto)
+        graph, input_shapes = caffe_pb_to_nx(proto, None)
+        expected_input_shapes = {
+            'data': np.array([1, 3, 224, 224])
+        }
+
+        for i in expected_input_shapes:
+            np.testing.assert_array_equal(input_shapes[i], expected_input_shapes[i])
+
+    def test_caffe_pb_to_standart_input(self):
+        proto = caffe_pb2.NetParameter()
+        text_format.Merge(proto_str_input + layer_proto_str, proto)
+        graph, input_shapes = caffe_pb_to_nx(proto, None)
+        expected_input_shapes = {
+            'data': np.array([1, 3, 224, 224])
+        }
+
+        for i in expected_input_shapes:
+            np.testing.assert_array_equal(input_shapes[i], expected_input_shapes[i])
+
+    def test_caffe_pb_to_multi_input(self):
+        proto = caffe_pb2.NetParameter()
+        text_format.Merge(proto_str_multi_input + layer_proto_str, proto)
+        graph, input_shapes = caffe_pb_to_nx(proto, None)
+        expected_input_shapes = {
+            'data': np.array([1, 3, 224, 224]),
+            'data1': np.array([1, 3])
+        }
+
+        for i in expected_input_shapes:
+            np.testing.assert_array_equal(input_shapes[i], expected_input_shapes[i])
+
+    def test_caffe_same_name_layer(self):
+        proto = caffe_pb2.NetParameter()
+        text_format.Merge(proto_str_multi_input + proto_same_name_layers, proto)
+        graph, input_shapes = caffe_pb_to_nx(proto, None)
+        # 6 nodes because: 2 inputs + 2 convolutions + 2 output nodes  
+        np.testing.assert_equal(len(graph.nodes()), 6)
diff --git a/model-optimizer/mo/front/caffe/python_layer_extractor_test.py b/model-optimizer/mo/front/caffe/python_layer_extractor_test.py
new file mode 100644 (file)
index 0000000..35f6760
--- /dev/null
@@ -0,0 +1,61 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+from mo.front.caffe.python_layer_extractor import PythonFrontExtractorOp
+from mo.front.extractor import CaffePythonFrontExtractorOp
+from mo.graph.graph import Node
+from mo.utils.unittest.extractors import FakeMultiParam
+from mo.utils.unittest.graph import FakeNode
+
+
+class FakePythonProtoLayer:
+    def __init__(self, params: FakeMultiParam):
+        self.type = 'Python'
+        self.python_param = params
+
+
+class FakePythonExtractor:
+    @staticmethod
+    def extract(node: Node):
+        return True
+
+
+class TestPythonLayerExtractor(unittest.TestCase):
+    def test_python_extractor_for_op(self):
+        module = 'test_module'
+        layer = 'test_layer'
+        CaffePythonFrontExtractorOp.registered_ops['{}.{}'.format(module, layer)] = \
+            lambda node: CaffePythonFrontExtractorOp.parse_param_str(node.pb.python_param.param_str)
+        params = FakeMultiParam({
+            'module': module,
+            'layer': layer,
+            'param_str': "'feat_stride': 16"
+        })
+        ext = PythonFrontExtractorOp.extract(FakeNode(FakePythonProtoLayer(params), None))
+        self.assertEqual({'feat_stride': 16}, ext)
+
+    def test_python_extractor_for_extractors(self):
+        module = 'test_module'
+        layer = 'test_layer'
+        CaffePythonFrontExtractorOp.registered_ops['{}.{}'.format(module, layer)] = FakePythonExtractor
+        params = FakeMultiParam({
+            'module': module,
+            'layer': layer,
+            'param_str': "'feat_stride': 16"
+        })
+        self.assertTrue(PythonFrontExtractorOp.extract(FakeNode(FakePythonProtoLayer(params), None)))
diff --git a/model-optimizer/mo/front/common/layout_test.py b/model-optimizer/mo/front/common/layout_test.py
new file mode 100644 (file)
index 0000000..e3865e4
--- /dev/null
@@ -0,0 +1,115 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+from mo.front.common.layout import get_batch_dim, get_width_dim, get_height_dim, get_features_dim, get_depth_dim, \
+    shape_for_layout
+from mo.utils.error import Error
+
+
+class TestLayoutFunctions(unittest.TestCase):
+    def test_get_batch_dim_NCHW(self):
+        self.assertEqual(get_batch_dim('NCHW', 4), 0)
+
+    def test_get_batch_dim_NHWC(self):
+        self.assertEqual(get_batch_dim('NHWC', 4), 0)
+
+    def test_get_batch_dim_NCDHW(self):
+        self.assertEqual(get_batch_dim('NCHW', 5), 0)
+
+    def test_get_batch_dim_NDHWC(self):
+        self.assertEqual(get_batch_dim('NHWC', 5), 0)
+
+    def test_get_features_dim_NCHW(self):
+        self.assertEqual(get_features_dim('NCHW', 4), 1)
+
+    def test_get_features_dim_NHWC(self):
+        self.assertEqual(get_features_dim('NHWC', 4), 3)
+
+    def test_get_features_dim_NCDHW(self):
+        self.assertEqual(get_features_dim('NCHW', 5), 1)
+
+    def test_get_features_dim_NDHWC(self):
+        self.assertEqual(get_features_dim('NHWC', 5), 4)
+
+    def test_get_width_dim_NCHW(self):
+        self.assertEqual(get_width_dim('NCHW', 4), 3)
+
+    def test_get_width_dim_NHWC(self):
+        self.assertEqual(get_width_dim('NHWC', 4), 2)
+
+    def test_get_width_dim_NCDHW(self):
+        self.assertEqual(get_width_dim('NCHW', 5), 4)
+
+    def test_get_width_dim_NDHWC(self):
+        self.assertEqual(get_width_dim('NHWC', 5), 3)
+
+    def test_get_height_dim_NCHW(self):
+        self.assertEqual(get_height_dim('NCHW', 4), 2)
+
+    def test_get_height_dim_NHWC(self):
+        self.assertEqual(get_height_dim('NHWC', 4), 1)
+
+    def test_get_height_dim_NCDHW(self):
+        self.assertEqual(get_height_dim('NCHW', 5), 3)
+
+    def test_get_height_dim_NDHWC(self):
+        self.assertEqual(get_height_dim('NHWC', 5), 2)
+
+    def test_get_depth_dim_NCDHW(self):
+        self.assertEqual(get_depth_dim('NCHW', 5), 2)
+
+    def test_get_depth_dim_NDHWC(self):
+        self.assertEqual(get_depth_dim('NHWC', 5), 1)
+
+    def test_get_batch_dim_wrong_layout(self):
+        self.assertRaises(AssertionError, get_batch_dim, 'NCDHW', 5)
+
+    def test_get_width_dim_wrong_layout(self):
+        self.assertRaises(AssertionError, get_width_dim, 'NCDHW', 5)
+
+    def test_get_height_dim_wrong_layout(self):
+        self.assertRaises(AssertionError, get_height_dim, 'NCDHW', 5)
+
+    def test_get_features_dim_wrong_layout(self):
+        self.assertRaises(AssertionError, get_features_dim, 'NCDHW', 5)
+
+    def test_shape_for_layout_NCHW(self):
+        self.assertListEqual([2, 3, 4, 5], list(shape_for_layout('NCHW', batch=2, features=3, height=4, width=5)))
+
+    def test_shape_for_layout_NHWC(self):
+        self.assertListEqual([2, 4, 5, 3], list(shape_for_layout('NHWC', batch=2, features=3, height=4, width=5)))
+
+    def test_shape_for_layout_missing_batch(self):
+        with self.assertRaises(Error):
+            shape_for_layout('NCHW', features=3, height=4, width=5)
+
+    def test_shape_for_layout_missing_features(self):
+        with self.assertRaises(Error):
+            shape_for_layout('NCHW', batch=2, height=4, width=5)
+
+    def test_shape_for_layout_missing_height(self):
+        with self.assertRaises(Error):
+            shape_for_layout('NHWC', batch=2, features=3, width=5)
+
+    def test_shape_for_layout_missing_width(self):
+        with self.assertRaises(Error):
+            shape_for_layout('NHWC', batch=2, features=3, height=4)
+
+    def test_shape_for_layout_unknown_parameter(self):
+        with self.assertRaises(Error):
+            shape_for_layout('NHWC', batch=2, features=3, height=4, width=5, unknown_parameter=123)
diff --git a/model-optimizer/mo/front/common/partial_infer/caffe_fallback_test.py b/model-optimizer/mo/front/common/partial_infer/caffe_fallback_test.py
new file mode 100644 (file)
index 0000000..1d03857
--- /dev/null
@@ -0,0 +1,109 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import MagicMock
+
+import numpy as np
+
+from mo.front.common.partial_infer.caffe_fallback import build_net
+from mo.utils.unittest.extractors import FakeMultiParam, FakeValue
+from mo.utils.unittest.graph import build_graph
+
+
+class Net:
+    def __init__(self, blobs):
+        self.blobs = blobs
+        self.reshape_blob = MagicMock(return_value=np.array([1, 1, 1, 1]))
+        self.reshape = MagicMock(return_value=np.array([1, 1, 1, 1]))
+        self.forward = MagicMock(return_value={'top_node': FakeValue(np.array([1, 3, 112, 112]))})
+
+
+my_mock_net = None
+
+
+class Caffe:
+    def __init__(self):
+        self.TEST = 'TEST'
+
+    def Net(self, *args):
+        return my_mock_net
+
+
+class TestCaffeNativePartialInfer(unittest.TestCase):
+    @classmethod
+    def setUpClass(cls):
+        import sys
+        sys.modules['caffe'] = Caffe()
+        cls.nodes_attributes = {
+            'node_1': {'type': 'Input', 'kind': 'op'},
+            'node_2': {'type': 'Input', 'kind': 'op'},
+            'node_3': {'type': 'Identity', 'kind': 'op'},
+            'node_4': {'type': 'Identity', 'kind': 'op'}
+        }
+
+    def test_build_net_equal_inputs(self):
+        global my_mock_net
+        my_blobs = {
+            'node_1': FakeValue(np.array([1, 3, 227, 227])),
+            'node_2': FakeValue(np.array([1, 3, 224, 224]))
+        }
+        my_mock_net = Net(my_blobs)
+        graph = build_graph(self.nodes_attributes,
+                            [
+                                ('node_1', 'node_3'),
+                                ('node_2', 'node_3'),
+                                ('node_3', 'node_4')
+                            ],
+                            {
+                                'node_4': {'is_output': True, 'shape': None},
+                                'node_1': {'shape': np.array([1, 3, 227, 227])},
+                                'node_2': {'shape': np.array([1, 3, 224, 224])},
+                                'node_3': {'top': 'top_node'}
+                            })
+        graph.proto_path = 'path_to_proto'
+        graph.caffemodel_path = 'path_to_proto'
+        build_net(graph)
+        my_mock_net.reshape.assert_not_called()
+        my_mock_net.forward.assert_called_once_with()
+        self.assertIsNotNone(graph.caffe_net)
+
+    def test_build_net_not_equal_inputs(self):
+        global my_mock_net
+        input_node_param = {
+            'shape': np.array([1, 3, 112, 112]),
+            'reshape': MagicMock(return_value=134)
+        }
+        my_blobs = {
+            'node_1': FakeMultiParam(input_node_param),
+        }
+        my_mock_net = Net(my_blobs)
+        graph = build_graph(self.nodes_attributes,
+                            [
+                                ('node_1', 'node_3'),
+                                ('node_3', 'node_4')
+                            ],
+                            {'node_4': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 227, 227])},
+                             'node_3': {'top': 'top_node'}
+                             },
+                            nodes_with_edges_only=True)
+        graph.proto_path = 'path_to_proto'
+        graph.caffemodel_path = 'path_to_proto'
+        build_net(graph)
+        my_mock_net.reshape.assert_called_once_with()
+        my_mock_net.forward.assert_called_once_with()
+        self.assertIsNotNone(graph.caffe_net)
diff --git a/model-optimizer/mo/front/common/partial_infer/concat_test.py b/model-optimizer/mo/front/common/partial_infer/concat_test.py
new file mode 100644 (file)
index 0000000..07b53a1
--- /dev/null
@@ -0,0 +1,99 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.front.common.partial_infer.concat import concat_infer
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {'node_1': {'kind': 'data', 'value': None},
+                    'node_2': {'kind': 'data', 'value': None},
+                    'concat': {'type': 'Concat', 'kind': 'op'},
+                    'node_3': {'kind': 'data'}
+                    }
+
+
+class TestConcatPartialInfer(unittest.TestCase):
+    def test_tf_concat_infer(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'concat'),
+                             ('node_2', 'concat'),
+                             ('concat', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 227, 227])},
+                             'node_2': {'shape': np.array([1, 3, 227, 227])},
+                             'concat': {'axis': 2}
+                             })
+
+        concat_node = Node(graph, 'concat')
+        concat_infer(concat_node)
+        exp_shape = np.array([1, 3, 454, 227])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_tf_concat_infer_negative_axis(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'concat'),
+                             ('node_2', 'concat'),
+                             ('concat', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 227, 227])},
+                             'node_2': {'shape': np.array([1, 3, 227, 227])},
+                             'concat': {'axis': -1}
+                             })
+
+        concat_node = Node(graph, 'concat')
+        concat_infer(concat_node)
+        exp_shape = np.array([1, 3, 227, 454])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_tf_concat_infer_not_match(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'concat'),
+                             ('node_2', 'concat'),
+                             ('concat', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 227, 227])},
+                             'node_2': {'shape': np.array([1, 2, 227, 227])},
+                             'concat': {'axis': 2}
+                             })
+
+        concat_node = Node(graph, 'concat')
+        concat_infer(concat_node)
+        res_shape = graph.node['node_3']['shape']
+        self.assertIsNone(res_shape)
+
+    def test_tf_concat_infer_no_shape(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'concat'),
+                             ('node_2', 'concat'),
+                             ('concat', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 227, 227])},
+                             'node_2': {'shape': None},
+                             'concat': {'axis': 2}
+                             })
+
+        concat_node = Node(graph, 'concat')
+        concat_infer(concat_node)
+        res_shape = graph.node['node_3']['shape']
+        self.assertIsNone(res_shape)
diff --git a/model-optimizer/mo/front/common/partial_infer/crop_test.py b/model-optimizer/mo/front/common/partial_infer/crop_test.py
new file mode 100644 (file)
index 0000000..d1eb97b
--- /dev/null
@@ -0,0 +1,123 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.front.common.partial_infer.crop import crop_infer
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {'node_1': {'value': None, 'kind': 'data'},
+                    'node_2': {'value': None, 'kind': 'data'},
+                    'crop_1': {'type': 'Crop', 'kind': 'op'},
+                    'node_3': {'value': None, 'kind': 'data'}
+                    }
+
+
+class TestCropInfer(unittest.TestCase):
+    def test_crop_infer_ideal(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'crop_1'),
+                             ('node_2', 'crop_1'),
+                             ('crop_1', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 2, 500, 500])},
+                             'node_2': {'shape': np.array([1, 2, 256, 256])},
+                             'crop_1': {'axis': 2, 'offset': [0, 0], 'dim': None}
+                             })
+
+        crop_node = Node(graph, 'crop_1')
+
+        crop_infer(crop_node)
+        exp_shape = np.array([1, 2, 256, 256])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+        self.assertEqual(crop_node.axis, [2, 3])
+        self.assertEqual(crop_node.offset, [0, 0])
+        self.assertEqual(crop_node.dim, [256, 256])
+
+    def test_crop_infer_negative_axis(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'crop_1'),
+                             ('node_2', 'crop_1'),
+                             ('crop_1', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 2, 500, 500])},
+                             'node_2': {'shape': np.array([1, 2, 256, 256])},
+                             'crop_1': {'axis': -1, 'offset': [0, 0], 'dim': None}
+                             })
+
+        crop_node = Node(graph, 'crop_1')
+
+        crop_infer(crop_node)
+        exp_shape = np.array([1, 2, 500, 256])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+        self.assertEqual(crop_node.axis, [3])
+        self.assertEqual(crop_node.offset, [0])
+        self.assertEqual(crop_node.dim, [256])
+
+    def test_crop_infer_no_shape(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'crop_1'),
+                             ('node_2', 'crop_1'),
+                             ('crop_1', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 2, 500, 500])},
+                             'node_2': {'shape': None},
+                             'crop_1': {'axis': 2, 'offset': [0, 0], 'dim': None}
+                             })
+
+        crop_node = Node(graph, 'crop_1')
+
+        crop_infer(crop_node)
+        self.assertIsNone(graph.node['node_3']['shape'])
+
+    def test_crop_infer_one_shape(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'crop_1'),
+                             ('crop_1', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 2, 500, 500])},
+                             'crop_1': {'axis': 2, 'offset': [0], 'dim': None}
+                             })
+
+        crop_node = Node(graph, 'crop_1')
+
+        crop_infer(crop_node)
+        self.assertIsNone(graph.node['node_3']['shape'])
+
+    def test_crop_infer_out_offset(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'crop_1'),
+                             ('node_2', 'crop_1'),
+                             ('crop_1', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 2, 500, 500])},
+                             'node_2': {'shape': np.array([1, 2, 256, 256])},
+                             'crop_1': {'axis': 2, 'offset': [300], 'dim': None}
+                             })
+
+        crop_node = Node(graph, 'crop_1')
+
+        crop_infer(crop_node)
+        self.assertIsNone(graph.node['node_3']['shape'])
diff --git a/model-optimizer/mo/front/common/partial_infer/elemental_test.py b/model-optimizer/mo/front/common/partial_infer/elemental_test.py
new file mode 100644 (file)
index 0000000..78d1dae
--- /dev/null
@@ -0,0 +1,39 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import patch
+
+import numpy as np
+
+from mo.front.common.partial_infer.elemental import copy_shape_infer
+
+
+class FakeNode:
+    def __init__(self, blob):
+        self.blob = blob
+
+    def in_shape(self):
+        return self.blob
+
+
+class TestElementalInference(unittest.TestCase):
+    @patch('mo.front.common.partial_infer.elemental.single_output_infer')
+    def test_copy_shape_infer(self, single_output_infer_mock):
+        single_output_infer_mock.return_value = 0
+        node = FakeNode(np.array([1, 2]))
+        copy_shape_infer(node)
+        self.assertTrue(single_output_infer_mock.called)
diff --git a/model-optimizer/mo/front/common/partial_infer/eltwise_test.py b/model-optimizer/mo/front/common/partial_infer/eltwise_test.py
new file mode 100644 (file)
index 0000000..5b57bf6
--- /dev/null
@@ -0,0 +1,139 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.front.common.partial_infer.eltwise import eltwise_infer
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {'node_1': {'value': 2, 'kind': 'data'},
+                    'node_2': {'value': 3, 'kind': 'data'},
+                    'eltw_1': {'type': 'Eltwise', 'kind': 'op'},
+                    'node_3': {'value': None, 'kind': 'data'}
+                    }
+
+
+class TestEltwiseInfer(unittest.TestCase):
+    def test_eltwise_infer_max(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'eltw_1'),
+                             ('node_2', 'eltw_1'),
+                             ('eltw_1', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 256, 256])},
+                             'node_2': {'shape': np.array([1, 3, 256, 256])},
+                             'eltw_1': {}
+                             })
+
+        graph.graph['layout'] = 'NCHW'
+
+        eltwise_node = Node(graph, 'eltw_1')
+
+        eltwise_infer(eltwise_node, lambda a, b: np.maximum(a, b))
+        exp_shape = np.array([1, 3, 256, 256])
+        exp_value = 3
+        res_shape = graph.node['node_3']['shape']
+        res_value = eltwise_node.out_node().value
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+        self.assertEqual(exp_value, res_value)
+
+    def test_eltwise_infer_sum(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'eltw_1'),
+                             ('node_2', 'eltw_1'),
+                             ('eltw_1', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 256, 256])},
+                             'node_2': {'shape': np.array([1, 3, 256, 256])}
+                             })
+        graph.graph['layout'] = 'NCHW'
+        eltwise_node = Node(graph, 'eltw_1')
+
+        eltwise_infer(eltwise_node, lambda a, b: a + b)
+        exp_shape = np.array([1, 3, 256, 256])
+        exp_value = 5
+        res_shape = graph.node['node_3']['shape']
+        res_value = eltwise_node.out_node().value
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+        self.assertEqual(exp_value, res_value)
+
+    def test_eltwise_infer_mul(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'eltw_1'),
+                             ('node_2', 'eltw_1'),
+                             ('eltw_1', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 256, 256])},
+                             'node_2': {'shape': np.array([1, 3, 256, 256])}
+                             })
+        graph.graph['layout'] = 'NCHW'
+        eltwise_node = Node(graph, 'eltw_1')
+
+        eltwise_infer(eltwise_node, lambda a, b: a * b)
+        exp_shape = np.array([1, 3, 256, 256])
+        exp_value = 6
+        res_shape = graph.node['node_3']['shape']
+        res_value = eltwise_node.out_node().value
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+        self.assertEqual(exp_value, res_value)
+
+    def test_eltwise_infer_none_val(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'eltw_1'),
+                             ('node_2', 'eltw_1'),
+                             ('eltw_1', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 256, 256]), 'value': None},
+                             'node_2': {'shape': np.array([1, 3, 256, 256])}
+                             })
+        graph.graph['layout'] = 'NCHW'
+        eltwise_node = Node(graph, 'eltw_1')
+
+        eltwise_infer(eltwise_node, lambda a, b: a * b)
+        exp_shape = np.array([1, 3, 256, 256])
+        res_shape = graph.node['node_3']['shape']
+        res_value = eltwise_node.out_node().value
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+        self.assertIsNone(res_value)
+
+    def test_eltwise_infer_none_min_max(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'eltw_1'),
+                             ('node_2', 'eltw_1'),
+                             ('eltw_1', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 257, 256])},
+                             'node_2': {'shape': np.array([1, 3, 256, 257])}
+                             })
+        graph.graph['layout'] = 'NCHW'
+        eltwise_node = Node(graph, 'eltw_1')
+
+        eltwise_infer(eltwise_node)
+        exp_shape = np.array([1, 3, -1, -1])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
diff --git a/model-optimizer/mo/front/common/partial_infer/expand_dims_test.py b/model-optimizer/mo/front/common/partial_infer/expand_dims_test.py
new file mode 100644 (file)
index 0000000..69dbc44
--- /dev/null
@@ -0,0 +1,160 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.front.common.partial_infer.expand_dims import tf_expand_dims_infer
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {'input_1': {'kind': 'data', 'value': None},
+                    'input_2': {'kind': 'data', 'value': None},
+                    'expand_dims': {'kind': 'op'},
+                    'out': {'value': None, 'shape': None, 'kind': 'data'}
+                    }
+
+
+class TestExpandDimsInfer(unittest.TestCase):
+    def test_expand_dims_infer_two_inputs(self):
+        graph = build_graph(nodes_attributes,
+                            [('input_1', 'expand_dims'),
+                             ('input_2', 'expand_dims'),
+                             ('expand_dims', 'out')],
+                            {'input_1': {'shape': np.array([3, 256, 256])},
+                             'input_2': {'shape': np.array([1]), 'value': np.array([1], dtype=np.int32)},
+                             })
+
+        expand_dims_node = Node(graph, 'expand_dims')
+
+        tf_expand_dims_infer(expand_dims_node)
+        exp_shape = np.array([3, 1, 256, 256])
+        res_shape = expand_dims_node.out_node().shape
+        self.assertEqual(len(exp_shape), len(res_shape))
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_expand_dims_infer_two_inputs_2(self):
+        graph = build_graph(nodes_attributes,
+                            [('input_1', 'expand_dims'),
+                             ('input_2', 'expand_dims'),
+                             ('expand_dims', 'out')],
+                            {'input_1': {'shape': np.array([3, 256, 256])},
+                             'input_2': {'shape': np.array([1]), 'value': np.array([2], dtype=np.int32)},
+                             })
+
+        expand_dims_node = Node(graph, 'expand_dims')
+
+        tf_expand_dims_infer(expand_dims_node)
+        exp_shape = np.array([3, 256, 1, 256])
+        res_shape = expand_dims_node.out_node().shape
+        self.assertEqual(len(exp_shape), len(res_shape))
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_expand_dims_infer_two_inputs_3(self):
+        graph = build_graph(nodes_attributes,
+                            [('input_1', 'expand_dims'),
+                             ('input_2', 'expand_dims'),
+                             ('expand_dims', 'out')],
+                            {'input_1': {'shape': np.array([3, 256, 256])},
+                             'input_2': {'shape': np.array([]), 'value': np.array(3, dtype=np.int32)},
+                             })
+
+        expand_dims_node = Node(graph, 'expand_dims')
+
+        tf_expand_dims_infer(expand_dims_node)
+        exp_shape = np.array([3, 256, 256, 1])
+        res_shape = expand_dims_node.out_node().shape
+        self.assertEqual(len(exp_shape), len(res_shape))
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_expand_dims_infer_two_inputs_negative(self):
+        graph = build_graph(nodes_attributes,
+                            [('input_1', 'expand_dims'),
+                             ('input_2', 'expand_dims'),
+                             ('expand_dims', 'out')],
+                            {'input_1': {'shape': np.array([3, 256, 256])},
+                             'input_2': {'shape': np.array([1]), 'value': np.array([2, 3], dtype=np.int32)},
+                             })
+
+        expand_dims_node = Node(graph, 'expand_dims')
+
+        tf_expand_dims_infer(expand_dims_node)
+        self.assertIsNone(expand_dims_node.out_node().shape)
+
+    def test_expand_dims_infer_two_inputs_negative_2(self):
+        graph = build_graph(nodes_attributes,
+                            [('input_1', 'expand_dims'),
+                             ('input_2', 'expand_dims'),
+                             ('expand_dims', 'out')],
+                            {'input_1': {'shape': None},
+                             'input_2': {'shape': np.array([1]), 'value': np.array([2, 3], dtype=np.int32)},
+                             })
+
+        expand_dims_node = Node(graph, 'expand_dims')
+
+        tf_expand_dims_infer(expand_dims_node)
+        self.assertIsNone(expand_dims_node.out_node().shape)
+
+    def test_expand_dims_infer_one_input(self):
+        graph = build_graph(nodes_attributes,
+                            [('input_1', 'expand_dims'),
+                             ('expand_dims', 'out')],
+                            {'input_1': {'shape': np.array([3, 256, 256])},
+                             'expand_dims': {'expand_axis': 1}
+                             })
+
+        expand_dims_node = Node(graph, 'expand_dims')
+
+        tf_expand_dims_infer(expand_dims_node)
+        exp_shape = np.array([3, 1, 256, 256])
+        res_shape = expand_dims_node.out_node().shape
+        self.assertEqual(len(exp_shape), len(res_shape))
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_expand_dims_infer_one_input_2(self):
+        graph = build_graph(nodes_attributes,
+                            [('input_1', 'expand_dims'),
+                             ('expand_dims', 'out')],
+                            {'input_1': {'shape': np.array([3, 256, 256])},
+                             'expand_dims': {'expand_axis': 2}
+                             })
+
+        expand_dims_node = Node(graph, 'expand_dims')
+
+        tf_expand_dims_infer(expand_dims_node)
+        exp_shape = np.array([3, 256, 1, 256])
+        res_shape = expand_dims_node.out_node().shape
+        self.assertEqual(len(exp_shape), len(res_shape))
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_expand_dims_infer_one_input_negative(self):
+        graph = build_graph(nodes_attributes,
+                            [('input_1', 'expand_dims'),
+                             ('expand_dims', 'out')],
+                            {'input_1': {'shape': np.array([3, 256, 256])},
+                             'expand_dims': {'expand_axis': None}
+                             })
+
+        expand_dims_node = Node(graph, 'expand_dims')
+
+        tf_expand_dims_infer(expand_dims_node)
+        self.assertIsNone(expand_dims_node.out_node().shape)
diff --git a/model-optimizer/mo/front/common/partial_infer/inner_product_test.py b/model-optimizer/mo/front/common/partial_infer/inner_product_test.py
new file mode 100644 (file)
index 0000000..8b39312
--- /dev/null
@@ -0,0 +1,74 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.front.common.partial_infer.inner_product import caffe_inner_product
+from mo.graph.graph import Node
+from mo.utils.unittest.extractors import FakeValue
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {'node_1': {'value': None, 'kind': 'data'},
+                    'inner': {'type': 'FullyConnected', 'value': None, 'kind': 'op'},
+                    'node_2': {'value': FakeValue(None), 'kind': 'data'},
+                    'node_3': {'value': None, 'kind': 'data'}
+                    }
+
+
+class TestInnerPartialInfer(unittest.TestCase):
+    def test_inner_product_infer(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'inner'),
+                             ('node_2', 'inner'),
+                             ('inner', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 256, 256])},
+                             'node_2': {'shape': np.array([1, 3, 256, 256]),
+                                        'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
+                             'inner': {'out-size': 4}
+                             })
+
+        inner_node = Node(graph, 'inner')
+
+        caffe_inner_product(inner_node)
+        exp_shape = np.array([1, 4])
+        exp_shape_in_node = np.array([4, 3 * 256 * 256])
+        res_shape = graph.node['node_3']['shape']
+        res_shape_in_node = inner_node.in_node(1).shape
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+        for i in range(0, len(exp_shape_in_node)):
+            self.assertEqual(exp_shape_in_node[i], res_shape_in_node[i])
+
+    def test_inner_product_infer_no_shape(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'inner'),
+                             ('node_2', 'inner'),
+                             ('inner', 'node_3')],
+                            {'node_3': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': None},
+                             'node_2': {'shape': np.array([1, 3, 256, 256])},
+                             'inner': {'out-size': 4}
+                             })
+
+        inner_node = Node(graph, 'inner')
+
+        caffe_inner_product(inner_node)
+        res_shape = graph.node['node_3']['shape']
+        self.assertIsNone(res_shape)
diff --git a/model-optimizer/mo/front/common/partial_infer/multi_box_detection_test.py b/model-optimizer/mo/front/common/partial_infer/multi_box_detection_test.py
new file mode 100644 (file)
index 0000000..ad9859f
--- /dev/null
@@ -0,0 +1,129 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.front.common.partial_infer.multi_box_detection import multi_box_detection_infer
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {'node_1': {'value': None, 'kind': 'data'},
+                    'node_2': {'value': None, 'kind': 'data'},
+                    'node_3': {'value': None, 'kind': 'data'},
+                    'detection_output_1': {'type': 'DetectionOutput', 'value': None, 'kind': 'op'},
+                    'node_4': {'value': None, 'kind': 'data'}
+                    }
+
+
+class TestMultiBoxDetectionInfer(unittest.TestCase):
+    def test_prior_box_infer_ideal(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'detection_output_1'),
+                             ('node_2', 'detection_output_1'),
+                             ('node_3', 'detection_output_1'),
+                             ('detection_output_1', 'node_4')],
+                            {'node_1': {'shape': np.array([1, 34928])},
+                             'node_2': {'shape': np.array([1, 183372])},
+                             'node_3': {'shape': np.array([1, 2, 34928])},
+                             'detection_output_1': {"background_label_id": "0", "clip": "1",
+                                                    "code_type": "caffe.PriorBoxParameter.CENTER_SIZE",
+                                                    "confidence_threshold": "0.01", "keep_top_k": "200",
+                                                    "nms_threshold": "0.5", "num_classes": "21",
+                                                    "share_location": "1", "top_k": "200",
+                                                    "variance_encoded_in_target": "0"},
+                             'node_4': {'shape': np.array([1, 1, 200, 7])},
+                             })
+
+        multi_box_detection_node = Node(graph, 'detection_output_1')
+        print(multi_box_detection_node)
+
+        multi_box_detection_infer(multi_box_detection_node)
+        exp_shape = np.array([1, 1, 200, 7])
+        res_shape = graph.node['node_4']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+        self.assertEqual(multi_box_detection_node.background_label_id, '0')
+        self.assertEqual(multi_box_detection_node.clip, '1')
+        self.assertEqual(multi_box_detection_node.code_type, 'caffe.PriorBoxParameter.CENTER_SIZE')
+        self.assertEqual(multi_box_detection_node.confidence_threshold, '0.01')
+        self.assertEqual(multi_box_detection_node.keep_top_k, '200')
+        self.assertEqual(multi_box_detection_node.nms_threshold, '0.5')
+        self.assertEqual(multi_box_detection_node.num_classes, 21)
+        self.assertEqual(multi_box_detection_node.share_location, '1')
+        self.assertEqual(multi_box_detection_node.top_k, '200')
+        self.assertEqual(multi_box_detection_node.variance_encoded_in_target, '0')
+
+    def test_prior_box_infer_without_top_k(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'detection_output_1'),
+                             ('node_2', 'detection_output_1'),
+                             ('node_3', 'detection_output_1'),
+                             ('detection_output_1', 'node_4')],
+                            {'node_1': {'shape': np.array([1, 34928])},
+                             'node_2': {'shape': np.array([1, 183372])},
+                             'node_3': {'shape': np.array([1, 2, 34928])},
+                             'detection_output_1': {"background_label_id": "0", "clip": "1",
+                                                    "code_type": "caffe.PriorBoxParameter.CENTER_SIZE",
+                                                    "confidence_threshold": "0.01", "keep_top_k": -1,
+                                                    "nms_threshold": "0.5", "num_classes": "21",
+                                                    "share_location": "1", "top_k": -1,
+                                                    "variance_encoded_in_target": "0"},
+                             'node_4': {'shape': np.array([1, 1, 69856, 7])},
+                             })
+
+        multi_box_detection_node = Node(graph, 'detection_output_1')
+
+        multi_box_detection_infer(multi_box_detection_node)
+        exp_shape = np.array([1, 1, 8732, 7])
+        res_shape = graph.node['node_4']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+        self.assertEqual(multi_box_detection_node.background_label_id, '0')
+        self.assertEqual(multi_box_detection_node.clip, '1')
+        self.assertEqual(multi_box_detection_node.code_type, 'caffe.PriorBoxParameter.CENTER_SIZE')
+        self.assertEqual(multi_box_detection_node.confidence_threshold, '0.01')
+        self.assertEqual(multi_box_detection_node.keep_top_k, 8732)
+        self.assertEqual(multi_box_detection_node.nms_threshold, '0.5')
+        self.assertEqual(multi_box_detection_node.num_classes, 21)
+        self.assertEqual(multi_box_detection_node.share_location, '1')
+        self.assertEqual(multi_box_detection_node.top_k, -1)
+        self.assertEqual(multi_box_detection_node.variance_encoded_in_target, '0')
+
+    def test_prior_box_infer_raise_error(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'detection_output_1'),
+                             ('node_2', 'detection_output_1'),
+                             ('node_3', 'detection_output_1'),
+                             ('detection_output_1', 'node_4')],
+                            {'node_1': {'shape': np.array([1, 34928])},
+                             'node_2': {'shape': np.array([1, 183372])},
+                             'node_3': {'shape': np.array([1, 3, 34928])},
+                             'detection_output_1': {"background_label_id": "0", "clip": "1",
+                                                    "code_type": "caffe.PriorBoxParameter.CENTER_SIZE",
+                                                    "confidence_threshold": "0.01", "keep_top_k": -1,
+                                                    "nms_threshold": "0.5", "num_classes": "21",
+                                                    "share_location": "1", "top_k": -1,
+                                                    "variance_encoded_in_target": 0},
+                             'node_4': {'shape': np.array([1, 1, 69856, 7])},
+                             })
+
+        multi_box_detection_node = Node(graph, 'detection_output_1')
+
+        self.assertIsNone(multi_box_detection_infer(multi_box_detection_node))
diff --git a/model-optimizer/mo/front/common/partial_infer/multi_box_prior_test.py b/model-optimizer/mo/front/common/partial_infer/multi_box_prior_test.py
new file mode 100644 (file)
index 0000000..6e1ce7c
--- /dev/null
@@ -0,0 +1,58 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.front.common.partial_infer.multi_box_prior import multi_box_prior_infer_mxnet
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {'node_1': {'value': None, 'kind': 'data'},
+                    'node_2': {'value': None, 'kind': 'data'},
+                    'prior_box_1': {'type': 'PriorBox', 'kind': 'op'},
+                    'node_3': {'type': 'Identity', 'value': None, 'kind': 'data'}
+                    }
+
+
+class TestMultiBoxPriorInfer(unittest.TestCase):
+    def test_prior_box_infer_ideal(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'prior_box_1'),
+                             ('node_2', 'prior_box_1'),
+                             ('prior_box_1', 'node_3')],
+                            {'node_1': {'shape': np.array([1, 1024, 19, 19])},
+                             'node_2': {'shape': np.array([1, 3, 300, 300])},
+                             'prior_box_1': {'aspect_ratio': [1.0, 2.0, 0.5, 3.0, 0.333333333333],
+                                             'min_size': [0.2, 0.272],
+                                             'max_size': '', 'offset': 0.5, 'step': 0.2, 'sizes': [0.2, 0.272]},
+                             'node_3': {'shape': np.array([1, 2, 3])},
+                             })
+
+        multi_box_prior_node = Node(graph, 'prior_box_1')
+
+        multi_box_prior_infer_mxnet(multi_box_prior_node)
+        exp_shape = np.array([1, 2, 8664])
+        res_shape = graph.node['node_3']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+        self.assertEqual(multi_box_prior_node.min_size, [60.0, 81.600000000000009])
+        self.assertEqual(multi_box_prior_node.max_size, '')
+        self.assertEqual(multi_box_prior_node.aspect_ratio, [1.0, 2.0, 0.5, 3.0, 0.333333333333])
+        self.assertEqual(round(multi_box_prior_node.step, 1), 60.0)
+        self.assertEqual(round(multi_box_prior_node.offset, 1), 0.5)
diff --git a/model-optimizer/mo/front/common/partial_infer/range_test.py b/model-optimizer/mo/front/common/partial_infer/range_test.py
new file mode 100644 (file)
index 0000000..113c49b
--- /dev/null
@@ -0,0 +1,114 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.front.common.partial_infer.range import tf_range_infer
+from mo.graph.graph import Node
+from mo.utils.unittest.extractors import FakeParam
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {'start': {'kind': 'data'},
+                    'limit': {'kind': 'data'},
+                    'delta': {'kind': 'data'},
+                    'range': {'kind': 'op'},
+                    'output': {'value': None, 'shape': None, 'kind': 'data'},
+                    }
+edges = [('start', 'range'), ('limit', 'range'), ('delta', 'range'), ('range', 'output')]
+
+
+class TestRangePartialInfer(unittest.TestCase):
+    def test_int32_specific_data_type_range_infer(self):
+        # import tensorflow to use TF data types
+        import tensorflow as tf
+        graph = build_graph(nodes_attributes, edges,
+                            {'start': {'value': np.array([1])},
+                             'limit': {'value': np.array([5])},
+                             'delta': {'value': np.array([1])},
+                             'range': {'pb': FakeParam('attr', dict(type=FakeParam('type', tf.int32)))},
+                             })
+
+        range_node = Node(graph, 'range')
+
+        tf_range_infer(range_node)
+        exp_value = np.array([1, 2, 3, 4], dtype=np.int32)
+        out_value = graph.node['output']['value']
+
+        self.assertTrue(exp_value.dtype == out_value.dtype)
+        self.assertTrue(np.array_equal(exp_value.shape, out_value.shape))
+        self.assertTrue(np.array_equal(exp_value, out_value))
+
+    def test_automatic_data_type_range_infer(self):
+        graph = build_graph(nodes_attributes, edges,
+                            {'start': {'value': np.array([2], dtype=np.float32)},
+                             'limit': {'value': np.array([5])},
+                             'delta': {'value': np.array([1])},
+                             'range': {'pb': FakeParam('attr', dict())},
+                             })
+
+        range_node = Node(graph, 'range')
+
+        tf_range_infer(range_node)
+        exp_value = np.array([2.0, 3.0, 4.0], dtype=np.float32)
+        out_value = graph.node['output']['value']
+
+        self.assertTrue(exp_value.dtype == out_value.dtype)
+        self.assertTrue(np.array_equal(exp_value.shape, out_value.shape))
+        self.assertTrue(np.array_equal(exp_value, out_value))
+
+    def test_non_constant_start_range_infer(self):
+        graph = build_graph(nodes_attributes, edges,
+                            {'start': {},
+                             'limit': {'value': np.array([5])},
+                             'delta': {'value': np.array([1])},
+                             'range': {'pb': FakeParam('attr', dict())},
+                             })
+
+        range_node = Node(graph, 'range')
+
+        tf_range_infer(range_node)
+        out_value = graph.node['output']['value']
+        self.assertIsNone(out_value)
+
+    def test_non_constant_limit_range_infer(self):
+        graph = build_graph(nodes_attributes, edges,
+                            {'start': {'value': np.array([1])},
+                             'limit': {},
+                             'delta': {'value': np.array([1])},
+                             'range': {'pb': FakeParam('attr', dict())},
+                             })
+
+        range_node = Node(graph, 'range')
+
+        tf_range_infer(range_node)
+        out_value = graph.node['output']['value']
+        self.assertIsNone(out_value)
+
+    def test_non_constant_delta_range_infer(self):
+        graph = build_graph(nodes_attributes, edges,
+                            {'start': {'value': np.array([1])},
+                             'limit': {'value': np.array([10])},
+                             'delta': {},
+                             'range': {'pb': FakeParam('attr', dict())},
+                             })
+
+        range_node = Node(graph, 'range')
+
+        tf_range_infer(range_node)
+        out_value = graph.node['output']['value']
+        self.assertIsNone(out_value)
diff --git a/model-optimizer/mo/front/common/partial_infer/roipooling_test.py b/model-optimizer/mo/front/common/partial_infer/roipooling_test.py
new file mode 100644 (file)
index 0000000..f6b9eba
--- /dev/null
@@ -0,0 +1,92 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.front.common.partial_infer.roipooling import roipooling_infer
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {'node_1': {'kind': 'data'},
+                    'node_2': {'kind': 'data'},
+                    'node_3': {'kind': 'data'},
+                    'node_4': {'kind': 'data'},
+                    'roipool': {'type': 'ROIPooling', 'kind': 'op', 'pooled_h': None, 'pooled_w': None},
+                    'output': {'value': None, 'kind': 'data'}
+                    }
+
+
+class TestRoipoolingInfer(unittest.TestCase):
+    def test_roipooling_infer_ideal(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'roipool'),
+                             ('node_2', 'roipool'),
+                             ('roipool', 'output')],
+                            {'output': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 256, 20, 20])},
+                             'node_2': {'shape': np.array([150, 5])},
+                             'roipool': {'pooled_h': 6, 'pooled_w': 6}
+                             })
+        graph.graph['layout'] = 'NCHW'
+        roipooling_node = Node(graph, 'roipool')
+
+        roipooling_infer(roipooling_node)
+        exp_shape = np.array([150, 256, 6, 6])
+        res_shape = graph.node['output']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_roipooling_infer_no_shape(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'roipool'),
+                             ('node_2', 'roipool'),
+                             ('roipool', 'output')],
+                            {'output': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': None},
+                             'node_2': {'shape': np.array([1, 256])},
+                             'roipool': {'pooled_h': 6, 'pooled_w': 6}
+                             })
+        graph.graph['layout'] = 'NCHW'
+
+        roipooling_node = Node(graph, 'roipool')
+
+        roipooling_infer(roipooling_node)
+        self.assertIsNone(graph.node['output']['shape'])
+
+    def test_roipooling_infer_tf(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'roipool'),
+                             ('node_2', 'roipool'),
+                             ('node_3', 'roipool'),
+                             ('node_4', 'roipool'),
+                             ('roipool', 'output')],
+                            {'output': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 20, 20, 256])},
+                             'node_2': {'shape': np.array([150, 5])},
+                             'node_3': {'shape': np.array([150])},
+                             'node_4': {'shape': np.array([2], dtype=np.int64), 'value': np.array([7, 6],
+                                                                                                  dtype=np.int64)},
+                             })
+        graph.graph['layout'] = 'NHWC'
+        roipooling_node = Node(graph, 'roipool')
+
+        roipooling_infer(roipooling_node)
+        exp_shape = np.array([150, 7, 6, 256])
+        res_shape = graph.node['output']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
diff --git a/model-optimizer/mo/front/common/partial_infer/slice_test.py b/model-optimizer/mo/front/common/partial_infer/slice_test.py
new file mode 100644 (file)
index 0000000..cdd674d
--- /dev/null
@@ -0,0 +1,381 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.front.common.partial_infer.slice import caffe_slice_infer, tf_strided_slice_infer, \
+    convert_negative_indices, mxnet_slice_axis_infer
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {'node_1': {'value': None, 'kind': 'data'},
+                    'Slice_node': {'type': 'Slice', 'kind': 'op'},
+                    'node_2': {'value': None, 'kind': 'data'},
+                    'node_3': {'value': None, 'kind': 'data'},
+                    'node_4': {'value': None, 'kind': 'data'},
+                    # StridedSlice node with attrs
+                    'sslice_input': {'value': None, 'shape': None, 'kind': 'data'},
+                    'sslice_1': {'type': 'StridedSlice', 'value': None, 'kind': 'op', 'op': 'StridedSlice'},
+                    'sslice_begin_1': {'value': None, 'shape': None, 'kind': 'data'},
+                    'sslice_end_1': {'value': None, 'shape': None, 'kind': 'data'},
+                    'sslice_stride_1': {'value': None, 'shape': None, 'kind': 'data'},
+                    'sslice_data_1': {'value': None, 'shape': None, 'kind': 'data'},
+                    # TF slice
+                    'tf_slice_input': {'value': None, 'shape': None, 'kind': 'data'},
+                    'tf_slice_begin': {'value': None, 'shape': None, 'kind': 'data'},
+                    'tf_slice_size': {'value': None, 'shape': None, 'kind': 'data'},
+                    'tf_slice': {'kind': 'op'},
+                    'tf_slice_output': {'value': None, 'shape': None, 'kind': 'data'},
+                    }
+
+tf_slice_edges = [('tf_slice_input', 'tf_slice'), ('tf_slice_begin', 'tf_slice'), ('tf_slice_size', 'tf_slice'),
+                  ('tf_slice', 'tf_slice_output')]
+
+
+class TestSSliceInfer(unittest.TestCase):
+    def test_slice_infer_ideal(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'Slice_node'),
+                             ('Slice_node', 'node_2'),
+                             ('Slice_node', 'node_3')],
+                            {'node_1': {'shape': np.array([1, 288, 56, 56])},
+                             'node_2': {'is_output': True, 'shape': None},
+                             'node_3': {'is_output': True, 'shape': None},
+                             'Slice_node': {'axis': 1, 'slice_point': np.array([256])}
+                             })
+
+        slice_node = Node(graph, 'Slice_node')
+
+        caffe_slice_infer(slice_node)
+        exp_shape1 = np.array([1, 256, 56, 56])
+        exp_shape2 = np.array([1, 32, 56, 56])
+        res_shape1 = graph.node['node_2']['shape']
+        res_shape2 = graph.node['node_3']['shape']
+
+        for i in range(0, len(exp_shape1)):
+            self.assertEqual(exp_shape1[i], res_shape1[i])
+
+        for i in range(0, len(exp_shape2)):
+            self.assertEqual(exp_shape2[i], res_shape2[i])
+
+    def test_slice_infer_no_slice_point(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'Slice_node'),
+                             ('Slice_node', 'node_2'),
+                             ('Slice_node', 'node_3')],
+                            {'node_1': {'shape': np.array([1, 288, 56, 56])},
+                             'node_2': {'is_output': True, 'shape': None},
+                             'node_3': {'is_output': True, 'shape': None},
+                             'Slice_node': {'axis': 1, 'slice_point': []}
+                             })
+
+        slice_node = Node(graph, 'Slice_node')
+
+        caffe_slice_infer(slice_node)
+        exp_shape = np.array([1, 144, 56, 56])
+        res_shape1 = graph.node['node_2']['shape']
+        res_shape2 = graph.node['node_3']['shape']
+
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape1[i])
+
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape2[i])
+
+    def test_slice_infer_3_outs_no_slice_point(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'Slice_node'),
+                             ('Slice_node', 'node_2'),
+                             ('Slice_node', 'node_3'),
+                             ('Slice_node', 'node_4')],
+                            {'node_1': {'shape': np.array([1, 288, 56, 56])},
+                             'node_2': {'is_output': True, 'shape': None},
+                             'node_3': {'is_output': True, 'shape': None},
+                             'node_4': {'is_output': True, 'shape': None},
+                             'Slice_node': {'axis': 1, 'slice_point': []}
+                             })
+
+        slice_node = Node(graph, 'Slice_node')
+
+        caffe_slice_infer(slice_node)
+        exp_shape = np.array([1, 96, 56, 56])
+        res_shape1 = graph.node['node_2']['shape']
+        res_shape2 = graph.node['node_3']['shape']
+        res_shape3 = graph.node['node_4']['shape']
+
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape1[i])
+
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape2[i])
+
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape3[i])
+
+    def test_slice_infer_3_outs(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'Slice_node'),
+                             ('Slice_node', 'node_2'),
+                             ('Slice_node', 'node_3'),
+                             ('Slice_node', 'node_4')],
+                            {'node_1': {'shape': np.array([1, 288, 56, 56])},
+                             'node_2': {'is_output': True, 'shape': None},
+                             'node_3': {'is_output': True, 'shape': None},
+                             'node_4': {'is_output': True, 'shape': None},
+                             'Slice_node': {'axis': 1, 'slice_point': [100, 150]}
+                             })
+
+        slice_node = Node(graph, 'Slice_node')
+
+        caffe_slice_infer(slice_node)
+        exp_shape1 = np.array([1, 100, 56, 56])
+        exp_shape2 = np.array([1, 50, 56, 56])
+        exp_shape3 = np.array([1, 138, 56, 56])
+        res_shape1 = graph.node['node_2']['shape']
+        res_shape2 = graph.node['node_3']['shape']
+        res_shape3 = graph.node['node_4']['shape']
+
+        for i in range(0, len(exp_shape1)):
+            self.assertEqual(exp_shape1[i], res_shape1[i])
+
+        for i in range(0, len(exp_shape2)):
+            self.assertEqual(exp_shape2[i], res_shape2[i])
+
+        for i in range(0, len(exp_shape3)):
+            self.assertEqual(exp_shape3[i], res_shape3[i])
+
+
+class TestTFStridedSliceInfer(unittest.TestCase):
+    def build_test_graph2(self):
+        return build_graph(nodes_attributes,
+                           [('sslice_input', 'sslice_1'),
+                            ('sslice_begin_1', 'sslice_1'),
+                            ('sslice_end_1', 'sslice_1'),
+                            ('sslice_stride_1', 'sslice_1'),
+                            ('sslice_1', 'sslice_data_1'),
+                            ],
+                           {'sslice_data_1': {'is_output': True},
+                            'sslice_input': {'value': np.array([1, 34, 34, 62]),
+                                             'shape': np.array([3])},
+                            'sslice_begin_1': {'value': np.array([0]), 'shape': np.array([1])},
+                            'sslice_end_1': {'value': np.array([4]), 'shape': np.array([1])},
+                            'sslice_stride_1': {'value': np.array([1]), 'shape': np.array([1])},
+                            'sslice_1': {'shrink_axis_mask': 0, 'ellipsis_mask': 0, 'new_axis_mask': 0,
+                                         'begin_mask': 0, 'end_mask': 0},
+                            })
+
+    def build_test_graph(self):
+        return build_graph(nodes_attributes,
+                           [('sslice_input', 'sslice_1'),
+                            ('sslice_begin_1', 'sslice_1'),
+                            ('sslice_end_1', 'sslice_1'),
+                            ('sslice_stride_1', 'sslice_1'),
+                            ('sslice_1', 'sslice_data_1'),
+                            ],
+                           {'sslice_data_1': {'is_output': True},
+                            'sslice_input': {'value': None, 'shape': np.array([1, 35, 35, 3])},
+                            'sslice_begin_1': {'value': np.array([0, 0, 0, 0]), 'shape': np.array([4])},
+                            'sslice_end_1': {'value': np.array([1, 34, 30, 2]), 'shape': np.array([4])},
+                            'sslice_stride_1': {'value': np.array([1, 1, 1, 1]),
+                                                'shape': np.array([4])},
+                            'sslice_1': {'shrink_axis_mask': 0, 'ellipsis_mask': 0, 'new_axis_mask': 0,
+                                         'begin_mask': 0, 'end_mask': 0},
+                            })
+    def build_test_graph_dim_beg(self):
+        return build_graph(nodes_attributes,
+                           [('sslice_input', 'sslice_1'),
+                            ('sslice_begin_1', 'sslice_1'),
+                            ('sslice_end_1', 'sslice_1'),
+                            ('sslice_stride_1', 'sslice_1'),
+                            ('sslice_1', 'sslice_data_1'),
+                            ],
+                           {'sslice_data_1': {'is_output': True},
+                            'sslice_input': {'value': np.array([[1, 34, 34, 62]]),
+                                             'shape': np.array([1, 4])},
+                            'sslice_begin_1': {'value': np.array([0]), 'shape': np.array([1])},
+                            'sslice_end_1': {'value': np.array([4]), 'shape': np.array([1])},
+                            'sslice_stride_1': {'value': np.array([1]), 'shape': np.array([1])},
+                            'sslice_1': {'shrink_axis_mask': 0, 'ellipsis_mask': 0, 'new_axis_mask': 0,
+                                         'begin_mask': 0, 'end_mask': 0},
+                            })
+
+
+    def test_slice_infer_1(self):
+        graph = self.build_test_graph()
+        node = Node(graph, 'sslice_1')
+        tf_strided_slice_infer(node)
+        self.assertTrue(np.array_equal(node.out_node().shape, np.array([1, 34, 30, 2])), 'Wrong output shape detected')
+
+    def test_slice_infer_2(self):
+        graph = self.build_test_graph()
+        node = Node(graph, 'sslice_1')
+        node.end_mask = 6  # 0110
+        tf_strided_slice_infer(node)
+        self.assertTrue(np.array_equal(node.out_node().shape, np.array([1, 35, 35, 2])), 'Wrong output shape detected')
+
+    def test_slice_infer_3(self):
+        graph = self.build_test_graph()
+        node = Node(graph, 'sslice_1')
+        node.in_node(1).value = np.array([0, 10, 10, 0])
+        node.end_mask = 6  # 0110
+        tf_strided_slice_infer(node)
+        self.assertTrue(np.array_equal(node.out_node().shape, np.array([1, 25, 25, 2])), 'Wrong output shape detected')
+
+    def test_slice_infer_4(self):
+        graph = self.build_test_graph()
+        node = Node(graph, 'sslice_1')
+        node.in_node(1).value = np.array([0, 10, 10, 0])
+        node.begin_mask = 6  # 0110
+        tf_strided_slice_infer(node)
+        self.assertTrue(np.array_equal(node.out_node().shape, np.array([1, 34, 30, 2])), 'Wrong output shape detected')
+
+    def test_slice_infer_5(self):
+        graph = self.build_test_graph()
+        node = Node(graph, 'sslice_1')
+        node.in_node(1).value = np.array([0, 10, 10, 0])
+        node.begin_mask = 15  # 1111
+        node.end_mask = 15  # 1111
+        tf_strided_slice_infer(node)
+        self.assertTrue(np.array_equal(node.out_node().shape, np.array([1, 35, 35, 3])), 'Wrong output shape detected')
+
+    def test_slice_infer_6(self):
+        graph = self.build_test_graph2()
+        node = Node(graph, 'sslice_1')
+        tf_strided_slice_infer(node)
+        self.assertTrue(np.array_equal(node.out_node().shape, np.array([4])), 'Wrong output shape detected')
+        self.assertTrue(np.array_equal(node.out_node().value, np.array([1, 34, 34, 62])), 'Wrong output value detected')
+
+    def test_slice_infer_7(self):
+        graph = self.build_test_graph2()
+        node = Node(graph, 'sslice_1')
+        node.in_node(1).value = np.array([1])
+        node.in_node(2).value = np.array([3])
+        tf_strided_slice_infer(node)
+        self.assertTrue(np.array_equal(node.out_node().shape, np.array([2])), 'Wrong output shape detected')
+        self.assertTrue(np.array_equal(node.out_node().value, np.array([34, 34])), 'Wrong output value detected')
+
+    def test_slice_infer_8(self):
+        graph = self.build_test_graph2()
+        node = Node(graph, 'sslice_1')
+        node.new_axis_mask = 1
+        tf_strided_slice_infer(node)
+        self.assertTrue(np.array_equal(node.out_node().shape, np.array([1, 4])), 'Wrong output shape detected')
+        self.assertTrue(np.array_equal(node.out_node().value, np.array([[1, 34, 34, 62]])),
+                        'Wrong output value detected')
+
+    def test_slice_infer_9(self):
+        graph = self.build_test_graph()
+        node = Node(graph, 'sslice_1')
+        node.begin_mask = 15  # 1111
+        node.end_mask = 15  # 1111
+        node.shrink_axis_mask = 1
+        tf_strided_slice_infer(node)
+        self.assertTrue(np.array_equal(node.out_node().shape, np.array([35, 35, 3])), 'Wrong output shape detected')
+
+    def test_slice_infer_10(self):
+        graph = self.build_test_graph()
+        node = Node(graph, 'sslice_1')
+        node.begin_mask = 15  # 1111
+        node.end_mask = 15  # 1111
+        node.shrink_axis_mask = 1
+        node.new_axis_mask = 8
+        tf_strided_slice_infer(node)
+        self.assertTrue(np.array_equal(node.out_node().shape, np.array([35, 35, 1, 3])), 'Wrong output shape detected')
+
+    def test_slice_infer_11(self):
+        graph = self.build_test_graph()
+        node = Node(graph, 'sslice_1')
+        node.begin_mask = 15  # 1111
+        node.end_mask = 15  # 1111
+        node.shrink_axis_mask = 5  # 0101
+        tf_strided_slice_infer(node)
+        self.assertTrue(np.array_equal(node.out_node().shape, np.array([35, 3])), 'Wrong output shape detected')
+
+    def test_slice_infer_12(self):
+        graph = self.build_test_graph()
+        node = Node(graph, 'sslice_1')
+        node.begin_mask = 15  # 1111
+        node.end_mask = 15  # 1111
+        node.shrink_axis_mask = 7  # 0111
+        tf_strided_slice_infer(node)
+        self.assertTrue(np.array_equal(node.out_node().shape, np.array([3])), 'Wrong output shape detected')
+
+    def test_slice_infer_13(self):
+        graph = self.build_test_graph2()
+        node = Node(graph, 'sslice_1')
+        # node.in_node(0).value = np.array([1])
+        node.in_node(1).value = np.array([1])
+        node.shrink_axis_mask = 1
+        tf_strided_slice_infer(node)
+        self.assertTrue(np.array_equal(node.out_node().shape, np.array([])), 'Wrong output shape detected')
+        self.assertTrue(np.array_equal(node.out_node().value, np.array(34)), 'Wrong output shape detected')
+
+    def test_slice_infer_14(self):  
+        graph = self.build_test_graph2()
+        node = Node(graph, 'sslice_1')
+        # node.in_node(0).value = np.array([1])
+        node.in_node(3).value = np.array([-1])
+        node.end_mask=1
+        node.begin_mask=1
+        node.in_node(0).shape=[4]
+        tf_strided_slice_infer(node) 
+        self.assertTrue(np.array_equal(node.out_node().shape, np.array([4])), 'Wrong output shape detected')
+        print(node.out_node().value)
+        self.assertTrue(np.array_equal(node.out_node().value, np.array([62, 34, 34, 1])), 'Wrong output shape detected')
+
+    def test_slice_infer_dim_beg(self):
+        graph = self.build_test_graph_dim_beg()
+        node = Node(graph, 'sslice_1')
+        # node.in_node(0).value = np.array([1])
+        node.shrink_axis_mask = 1
+        tf_strided_slice_infer(node)
+        self.assertTrue(np.array_equal(node.out_node().shape, np.array([4])), 'Wrong output shape detected')
+        self.assertTrue(np.array_equal(node.out_node().value, np.array([1, 34, 34, 62])), 'Wrong output shape detected')
+
+
+class TestConvertNegativeIndices(unittest.TestCase):
+    def test_convert_negative_indices(self):
+        dimensions = np.array([3, 4, 8, 10])
+        indices = np.array([2, 0, -3, -4])
+        convert_negative_indices(indices, dimensions)
+        self.assertTrue(np.array_equal(indices, np.array([2, 0, 5, 6])), 'Wrong dimension indices')
+
+
+class TestMXNetSliceAxisInfer(unittest.TestCase):
+    def test_slice_axis_infer_layer(self):
+        graph = build_graph(
+            {'node_1': {'name': 'data', 'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'Placeholder'},
+             'slice_axis_node': {'name': 'slice_axis_node', 'type': 'sigmoid', 'value': None,
+                                 'kind': 'op', 'op': 'slice_axis', },
+             'node_3': {'name': 'node_3', 'type': 'Identity', 'value': None, 'kind': 'op'},
+             },
+            [
+                ('node_1', 'slice_axis_node'),
+                ('slice_axis_node', 'node_3'),
+            ],
+            {
+                'node_1': {'shape': np.array([1, 1024, 19, 19])},
+                'slice_axis_node': {'axis': 1, 'offset': 10, 'dim': 25},
+            })
+
+        slice_axis_node = Node(graph, 'slice_axis_node')
+        mxnet_slice_axis_infer(slice_axis_node)
+        res_shape = [1, 15, 19, 19]
+        for i in range(0, len(graph.node['node_3']['shape'])):
+            self.assertEqual(graph.node['node_3']['shape'][i], res_shape[i])
diff --git a/model-optimizer/mo/front/common/partial_infer/split_test.py b/model-optimizer/mo/front/common/partial_infer/split_test.py
new file mode 100644 (file)
index 0000000..a81b57a
--- /dev/null
@@ -0,0 +1,207 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.front.common.partial_infer.split import tf_split_infer, tf_unpack_infer, tf_split_v_infer, split
+from mo.front.common.partial_infer.utils import int64_array
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph, build_graph_with_edge_attrs
+
+
+class TestTFSplitInfer(unittest.TestCase):
+    graph = None
+
+    def setUp(self):
+        self.graph = build_graph({'split_dim': {'value': None, 'kind': 'data'},
+                                  'data_to_split': {'value': None, 'shape': None, 'kind': 'data'},
+                                  'split_node': {'kind': 'op', 'op': 'Split', 'num_split': 3, 'axis': None},
+                                  'out_data_1': {'value': None, 'shape': None, 'kind': 'data'},
+                                  'out_data_2': {'value': None, 'shape': None, 'kind': 'data'},
+                                  'out_data_3': {'value': None, 'shape': None, 'kind': 'data'},
+                                  },
+                                 [('split_dim', 'split_node'),
+                                  ('data_to_split', 'split_node'),
+                                  ('split_node', 'out_data_1'),
+                                  ('split_node', 'out_data_2'),
+                                  ('split_node', 'out_data_3'),
+                                  ])
+
+    def test_tf_split_infer(self):
+        split_node = Node(self.graph, 'split_node')
+        self.graph.node['split_dim']['value'] = np.array(1)
+        self.graph.node['data_to_split']['shape'] = int64_array([2, 12, 25, 30])
+
+        tf_split_infer(split_node)
+        exp_shape = int64_array([2, 4, 25, 30])
+        for out_node in split_node.out_nodes().values():
+            self.assertTrue(np.all(exp_shape == out_node.shape))
+        self.assertEqual(1, split_node.input_port)
+
+    def test_tf_split_infer_negative_index(self):
+        split_node = Node(self.graph, 'split_node')
+        self.graph.node['split_dim']['value'] = np.array(-3)
+        self.graph.node['data_to_split']['shape'] = int64_array([2, 12, 25, 30])
+
+        tf_split_infer(split_node)
+        exp_shape = int64_array([2, 4, 25, 30])
+        for out_node in split_node.out_nodes().values():
+            self.assertTrue(np.all(exp_shape == out_node.shape))
+        self.assertEqual(1, split_node.input_port)
+
+    def test_tf_split_infer_unknown_index(self):
+        split_node = Node(self.graph, 'split_node')
+        self.graph.node['data_to_split']['shape'] = int64_array([2, 12, 25, 30])
+
+        tf_split_infer(split_node)
+        for out_node in split_node.out_nodes().values():
+            self.assertIsNone(out_node.shape)
+
+    def test_tf_split_infer_input_shape_is_None(self):
+        split_node = Node(self.graph, 'split_node')
+        self.graph.node['split_dim']['value'] = np.array(1)
+
+        tf_split_infer(split_node)
+        for out_node in split_node.out_nodes().values():
+            self.assertIsNone(out_node.shape)
+
+    def test_tf_split_infer_wrong_num_split(self):
+        split_node = Node(self.graph, 'split_node')
+        self.graph.node['split_dim']['value'] = np.array(0)
+        self.graph.node['data_to_split']['shape'] = int64_array([2, 12, 25, 30])
+
+        tf_split_infer(split_node)
+        for out_node in split_node.out_nodes().values():
+            self.assertIsNone(out_node.shape)
+
+
+class TestTFSplitVInfer(unittest.TestCase):
+    graph = None
+
+    def setUp(self):
+        self.graph = build_graph({'data_to_split': {'value': None, 'shape': None, 'kind': 'data'},
+                                  'size_splits': {'value': [3, 5, 4], 'kind': 'data'},
+                                  'split_dim': {'value': None, 'kind': 'data'},
+                                  'split_node': {'kind': 'op', 'op': 'Split', 'axis': None},
+                                  'out_data_1': {'value': None, 'shape': None, 'kind': 'data'},
+                                  'out_data_2': {'value': None, 'shape': None, 'kind': 'data'},
+                                  'out_data_3': {'value': None, 'shape': None, 'kind': 'data'},
+                                  },
+                                 [('data_to_split', 'split_node'),
+                                  ('size_splits', 'split_node'),
+                                  ('split_dim', 'split_node'),
+                                  ('split_node', 'out_data_1'),
+                                  ('split_node', 'out_data_2'),
+                                  ('split_node', 'out_data_3'),
+                                  ])
+
+    def test_tf_split_infer_three_inputs(self):
+        split_node = Node(self.graph, 'split_node')
+        self.graph.node['split_dim']['value'] = np.array(1)
+        self.graph.node['data_to_split']['shape'] = int64_array([2, 12, 25, 30])
+
+        tf_split_v_infer(split_node)
+        exp_shape = [int64_array([2, 3, 25, 30]), int64_array([2, 5, 25, 30]), int64_array([2, 4, 25, 30])]
+        for ind, out_node in split_node.out_nodes().items():
+            self.assertTrue(np.all(exp_shape[ind] == out_node.shape))
+
+
+class TestTFUnpack(unittest.TestCase):
+    graph = None
+
+    def setUp(self):
+        self.graph = build_graph({'data_to_split': {'value': None, 'shape': None, 'kind': 'data'},
+                                  'unpack': {'kind': 'op', 'op': 'Split', 'num_split': 3, 'axis': None},
+                                  'out_data_1': {'value': None, 'shape': None, 'kind': 'data'},
+                                  'out_data_2': {'value': None, 'shape': None, 'kind': 'data'},
+                                  'out_data_3': {'value': None, 'shape': None, 'kind': 'data'},
+                                  'out_data_4': {'value': None, 'shape': None, 'kind': 'data'},
+                                  },
+                                 [('data_to_split', 'unpack'),
+                                  ('unpack', 'out_data_1'),
+                                  ('unpack', 'out_data_2'),
+                                  ('unpack', 'out_data_3'),
+                                  ])
+
+    def test_tf_unpack_infer(self):
+        unpack_node = Node(self.graph, 'unpack')
+        self.graph.node['unpack']['axis'] = np.array(1)
+        self.graph.node['data_to_split']['shape'] = int64_array([2, 3, 25, 30])
+
+        tf_unpack_infer(unpack_node)
+        exp_shape = int64_array([2, 1, 25, 30])
+        for out_node in unpack_node.out_nodes().values():
+            self.assertTrue(np.all(exp_shape == out_node.shape))
+
+    def test_tf_unpack_infer_default_number_of_pieces(self):
+        unpack_node = Node(self.graph, 'unpack')
+        self.graph.node['unpack']['axis'] = np.array(1)
+        self.graph.node['unpack']['num_split'] = None
+        self.graph.node['data_to_split']['shape'] = int64_array([2, 3, 25, 30])
+
+        tf_unpack_infer(unpack_node)
+        exp_shape = int64_array([2, 1, 25, 30])
+        for out_node in unpack_node.out_nodes().values():
+            self.assertTrue(np.all(exp_shape == out_node.shape))
+
+    def test_tf_unpack_infer_not_supported(self):
+        # the case when the size of the dimension being unpacked is not equal to number of pieces is not supported
+        unpack_node = Node(self.graph, 'unpack')
+        self.graph.node['unpack']['axis'] = np.array(1)
+        self.graph.node['data_to_split']['shape'] = int64_array([2, 6, 25, 30])
+
+        tf_unpack_infer(unpack_node)
+        for out_node in unpack_node.out_nodes().values():
+            self.assertIsNone(out_node.shape)
+
+
+class TestSplitFunc(unittest.TestCase):
+    graph = None
+
+    def setUp(self):
+        self.graph = build_graph_with_edge_attrs(
+            {'data_to_split': {'value': None, 'shape': int64_array([2, 12, 25, 44]), 'kind': 'data'},
+             'split_node': {'kind': 'op', 'op': 'Split', 'axis': None},
+             'out_data_2': {'value': None, 'shape': None, 'kind': 'data'},
+             'out_data_5': {'value': None, 'shape': None, 'kind': 'data'},
+             'out_data_7': {'value': None, 'shape': None, 'kind': 'data'},
+             },
+            [('data_to_split', 'split_node', {'in': 0}),
+             ('split_node', 'out_data_2', {'out': 2}),
+             ('split_node', 'out_data_5', {'out': 5}),
+             ('split_node', 'out_data_7', {'out': 7}),
+             ])
+
+    def test_split_non_sequential_output_port(self):
+        split(Node(self.graph, 'data_to_split'), Node(self.graph, 'split_node'), -1, [3, 2, 7, 5, 6, 4, 9, 8])
+        self.assertTrue(np.all(Node(self.graph, 'out_data_2').shape == [2, 12, 25, 7]))
+        self.assertTrue(np.all(Node(self.graph, 'out_data_5').shape == [2, 12, 25, 4]))
+        self.assertTrue(np.all(Node(self.graph, 'out_data_7').shape == [2, 12, 25, 8]))
+
+    def test_split_value_infer_non_sequential_output_port(self):
+        data_node = Node(self.graph, 'data_to_split')
+        value = np.array(range(2 * 12 * 25 * 44)).reshape(data_node.shape)
+        data_node.value = value.copy()
+        split(data_node, Node(self.graph, 'split_node'), -1, [3, 2, 7, 5, 6, 4, 9, 8])
+        self.assertTrue(np.all(Node(self.graph, 'out_data_2').shape == [2, 12, 25, 7]))
+        self.assertTrue(np.all(Node(self.graph, 'out_data_5').shape == [2, 12, 25, 4]))
+        self.assertTrue(np.all(Node(self.graph, 'out_data_7').shape == [2, 12, 25, 8]))
+
+        self.assertTrue(np.all(Node(self.graph, 'out_data_2').value == value[:, :, :, 5:12]))
+        self.assertTrue(np.all(Node(self.graph, 'out_data_5').value == value[:, :, :, 23:27]))
+        self.assertTrue(np.all(Node(self.graph, 'out_data_7').value == value[:, :, :, 36:]))
diff --git a/model-optimizer/mo/front/extractor_test.py b/model-optimizer/mo/front/extractor_test.py
new file mode 100644 (file)
index 0000000..5fcb5eb
--- /dev/null
@@ -0,0 +1,602 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+from generator import generator, generate
+
+from mo.front.extractor import input_user_data_repack, output_user_data_repack, extract_port_from_string, \
+    update_ie_fields, add_input_op
+from mo.front.extractor import spatial_attr_getter, add_input_ops, attr_getter, CaffePythonFrontExtractorOp, \
+    add_output_ops
+from mo.graph.graph import Node
+from mo.middle.passes import eliminate
+from mo.utils.error import Error
+from mo.utils.unittest.extractors import FakeMultiParam
+from mo.utils.unittest.graph import build_graph, build_graph_with_edge_attrs, build_graph_with_attrs, compare_graphs
+
+
+class FakePythonParam:
+    def __init__(self, param: FakeMultiParam):
+        self.__setattr__('python_param', param)
+
+
+nodes_attributes = {'input': {'kind': 'data'},
+                    'pool_1': {'type': 'Pooling', 'kind': 'op'},
+                    'output': {'kind': 'data'}
+                    }
+
+
+class UpdateIEFieldsTest(unittest.TestCase):
+    def test_default_update_ie_fields(self):
+        update_ie_fields({}, ir_version=None)
+
+    def test_not_set_update_ie_fields(self):
+        with self.assertRaisesRegex(Error, 'Unrecognized IR version.*'):
+            update_ie_fields({}, ir_version='abracadabra')
+
+
+class TestExtractor(unittest.TestCase):
+    def test_spatial_attr_getter(self):
+        input_shape = np.array([1, 125, 13, 13])
+        params = {
+            'kernel': np.array([1, 1, 1, 2]),
+            'pad': np.array([1, 1, 3, 4]),
+            'stride': np.array([1, 1, 2, 3]),
+        }
+        graph = build_graph(nodes_attributes,
+                            [('input', 'pool_1'),
+                             ('pool_1', 'output')],
+                            {'input': {'shape': input_shape},
+                             'pool_1': {**params, 'spatial_dims': [2, 3]},
+                             'output': {'is_output': True, 'shape': None}})
+        pool_1_node = Node(graph, 'pool_1')
+        for param in params.keys():
+            if type(params[param]) is np.ndarray:
+                port_lambda = lambda x: x
+                self.assertEqual(params[param][2],
+                                 spatial_attr_getter(pool_1_node, field=param, dim=0, post=port_lambda))
+                self.assertEqual(params[param][3],
+                                 spatial_attr_getter(pool_1_node, field=param, dim=1, post=port_lambda))
+
+    def test_attr_getter(self):
+        nodes = {'input': {'kind': 'data'},
+                 'reshape': {'type': 'Reshape', 'kind': 'op'},
+                 'output': {'kind': 'data'}
+                 }
+        input_shape = np.array([1, 125, 13, 13])
+        params = {
+            'dim': [1, 1, 2, 3],
+            'max_size': np.array([3, 2, 1, 0])
+        }
+        expect_params = {
+            'dim': "1,1,2,3",
+            'max_size': "3,2,1,0",
+        }
+        graph = build_graph(nodes,
+                            [('input', 'reshape'),
+                             ('reshape', 'output')],
+                            {'input': {'shape': input_shape},
+                             'reshape': {**params, 'spatial_dims': [2, 3]},
+                             'output': {'is_output': True, 'shape': None}})
+        pool_1_node = Node(graph, 'reshape')
+        for param in params.keys():
+            if type(params[param]) is list:
+                self.assertEqual(expect_params[param],
+                                 attr_getter(pool_1_node, param))
+
+
+class TestAddInputOp(unittest.TestCase):
+    nodes = [
+        ('op_node', {'kind': 'op'}),
+        ('future_input', {'kind': 'op'}),
+        ('another_node', {'kind': 'op'}),
+    ]
+    edges = [('future_input', 'op_node', {'in': 1, 'out': 0}),
+             ('another_node', 'op_node', {'in': 0, 'out': 0})]
+
+    def test_in_port_no_data(self):
+        graph = build_graph_with_attrs(nodes_with_attrs=self.nodes, edges_with_attrs=self.edges)
+        new_input_shape = np.array([1, 2, 3, 4])
+        graph_ref = build_graph_with_attrs(nodes_with_attrs=self.nodes, edges_with_attrs=self.edges[1:],
+                                           new_nodes_with_attrs=[('input_node', {'kind': 'op', 'op': 'Placeholder',
+                                                                                 'shape': new_input_shape})],
+                                           new_edges_with_attrs=[('input_node', 'op_node', {'in': 1, 'out': 0})])
+        add_input_op(graph, 'op_node', 1, data=False, shape=new_input_shape)
+        graph.remove_edge('future_input', 'op_node')
+        (flag, resp) = compare_graphs(graph, graph_ref, last_node='op_node')
+        self.assertTrue(flag, resp)
+
+    def test_in_port_with_data(self):
+        graph = build_graph_with_attrs(nodes_with_attrs=self.nodes, edges_with_attrs=self.edges)
+        new_input_shape = np.array([1, 2, 3, 4])
+        graph_ref = build_graph_with_attrs(nodes_with_attrs=self.nodes, edges_with_attrs=self.edges[1:],
+                                           new_nodes_with_attrs=[('input_node', {'kind': 'op', 'op': 'Placeholder',
+                                                                                 'shape': new_input_shape}),
+                                                                 ('input_data', {'kind': 'data'})],
+                                           new_edges_with_attrs=[('input_node', 'input_data', {'in': 0, 'out': 0}),
+                                                                 ('input_data', 'op_node', {'in': 1, 'out': 0})])
+        add_input_op(graph, 'op_node', 1, data=True, shape=new_input_shape)
+        graph.remove_edge('future_input', 'op_node')
+        (flag, resp) = compare_graphs(graph, graph_ref, last_node='op_node')
+        self.assertTrue(flag, resp)
+
+    nodes_out = [
+        ('op_node', {'kind': 'op'}),
+        ('future_input', {'kind': 'op'}),
+        ('another_node', {'kind': 'op'}),
+    ]
+    edges_out = [('op_node', 'future_input', {'in': 0, 'out': 1}),
+                 ('op_node', 'another_node', {'in': 0, 'out': 0})]
+
+    def test_out_port_no_data(self):
+        graph = build_graph_with_attrs(nodes_with_attrs=self.nodes_out, edges_with_attrs=self.edges_out)
+        new_input_shape = np.array([1, 2, 3, 4])
+        graph_ref = build_graph_with_attrs(nodes_with_attrs=self.nodes_out, edges_with_attrs=self.edges_out[1:],
+                                           new_nodes_with_attrs=[('input_node', {'kind': 'op', 'op': 'Placeholder',
+                                                                                 'shape': new_input_shape})],
+                                           new_edges_with_attrs=[('input_node', 'future_input', {'in': 0, 'out': 0})])
+        add_input_op(graph, 'op_node', 1, data=False, shape=new_input_shape, is_out_port=True)
+        graph.remove_edge('op_node', 'future_input')
+        (flag, resp) = compare_graphs(graph, graph_ref, last_node='another_node')
+        self.assertTrue(flag, resp)
+        (flag, resp) = compare_graphs(graph, graph_ref, last_node='future_input')
+        self.assertTrue(flag, resp)
+
+    def test_out_port_with_data(self):
+        graph = build_graph_with_attrs(nodes_with_attrs=self.nodes_out, edges_with_attrs=self.edges_out[1:],
+                                       new_nodes_with_attrs=[('input_data', {'kind': 'data', 'shape': None})],
+                                       new_edges_with_attrs=[('op_node', 'input_data', {'out': 1, 'in': 0}),
+                                                             ('input_data', 'future_input', {'in': 0, 'out': 0})])
+        new_input_shape = np.array([1, 2, 3, 4])
+        graph_ref = build_graph_with_attrs(nodes_with_attrs=self.nodes_out, edges_with_attrs=self.edges_out[1:],
+                                           new_nodes_with_attrs=[('input_node', {'kind': 'op', 'op': 'Placeholder',
+                                                                                 'shape': new_input_shape}),
+                                                                 ('input_data', {'kind': 'data', 'shape': None})],
+                                           new_edges_with_attrs=[('input_node', 'input_data', {'in': 0, 'out': 0}),
+                                                                 ('input_data', 'future_input', {'in': 0, 'out': 0})])
+        add_input_op(graph, 'op_node', 1, data=True, shape=new_input_shape, is_out_port=True)
+        graph.remove_edge('op_node', 'input_data')
+
+        (flag, resp) = compare_graphs(graph, graph_ref, last_node='another_node')
+        self.assertTrue(flag, resp)
+        (flag, resp) = compare_graphs(graph, graph_ref, last_node='future_input')
+        self.assertTrue(flag, resp)
+
+
+class TestInputAddition(unittest.TestCase):
+    # Tests for input
+    nodes = {'node_1': {'type': 'Identity', 'kind': 'op', 'op': 'Placeholder'},
+             'conv_1': {'type': 'Convolution', 'kind': 'op', 'op': 'NotPlaceholder'},
+             'relu_1': {'type': 'ReLU', 'kind': 'op', 'op': 'NotPlaceholder'},
+             }
+    edges = [
+        ('node_1', 'conv_1'),
+        ('conv_1', 'relu_1'),
+    ]
+
+    def test_none_out_port_raise(self):
+        graph = build_graph(self.nodes, self.edges)
+        shape = np.array([1, 2, 3, 4])
+        inputs = {'conv_1': [{'shape': shape, 'out': None}]}
+        with self.assertRaisesRegex(Error, 'Output port for input node conv_1 should be specified, it cannot be None!'):
+            add_input_ops(graph=graph, user_defined_inputs=inputs, before_infer=True)
+
+    def test_wrong_output_port_raise(self):
+        graph = build_graph(self.nodes, self.edges)
+        shape = np.array([1, 2, 3, 4])
+        inputs = {'conv_1': [{'shape': shape, 'out': 5}]}
+        with self.assertRaisesRegex(Error, 'Output port index 5 is out of number of available output ports for node'):
+            add_input_ops(graph=graph, user_defined_inputs=inputs, before_infer=True)
+
+    def test_wrong_input_port_raise(self):
+        graph = build_graph(self.nodes, self.edges)
+        shape = np.array([1, 2, 3, 4])
+        inputs = {'conv_1': [{'shape': shape, 'in': 5}]}
+        with self.assertRaisesRegex(Error, 'Input port index 5 is out of number of available input ports for node'):
+            add_input_ops(graph=graph, user_defined_inputs=inputs, before_infer=True)
+
+    def test_one_input_one_shape(self):
+        shape = np.array([1, 2, 3, 4])
+        inputs = {'conv_1': [{'shape': shape}]}
+        nodes = {
+            'old_input': {'type': 'Identity', 'kind': 'op', 'op': 'Placeholder'},
+            'conv_1': {'type': 'Convolution', 'kind': 'op', 'op': 'NotPlaceholder'},
+            'relu_1': {'type': 'ReLU', 'kind': 'op', 'op': 'NotPlaceholder'},
+            'output': {'type': 'SoftMax', 'kind': 'op', 'op': 'NotPlaceholder'}
+        }
+        edges = [
+            ('old_input', 'conv_1'),
+            ('conv_1', 'relu_1'),
+            ('relu_1', 'output')
+        ]
+        graph = build_graph(nodes, edges)
+        add_input_ops(graph=graph, user_defined_inputs=inputs, before_infer=True)
+        new_input = list(graph.in_edges('conv_1'))[0][0]
+        self.assertFalse(graph.node['old_input']['is_input'])
+        self.assertTrue(graph.node[new_input]['is_input'])
+        self.assertTrue((new_input, 'conv_1') in graph.edges())
+        self.assertTrue(('old_input', 'conv_1') not in graph.edges())
+        shapes_are_equal = np.array_equal(graph.node[new_input]['shape'], shape)
+        self.assertTrue(shapes_are_equal)
+
+    def test_one_input_no_shape(self):
+        shape = None
+        inputs = {'conv_1': [{'shape': shape}]}
+        nodes = {
+            'old_input': {'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+            'old_input_data': {'kind': 'data', 'value': None, 'shape': np.array([-1, 224, 224, 3])},
+            'conv_1': {'type': 'Convolution', 'kind': 'op', 'op': 'NotPlaceholder'},
+            'conv_1_data': {'kind': 'data', 'value': True, 'shape': np.array([-1, 224, 224, 3])},
+            'relu_1': {'type': 'ReLU', 'kind': 'op', 'op': 'NotPlaceholder'},
+            'relu_1_data': {'kind': 'data', 'value': None, 'shape': np.array([-1, 112, 112, 64])},
+            'output': {'type': 'SoftMax', 'kind': 'op', 'op': 'NotPlaceholder', 'is_output': True},
+            'output_data': {'name': 'output_data', 'kind': 'data', 'shape': np.array([-1, 112, 112, 64])}
+        }
+        edges = [
+            ('old_input', 'old_input_data'),
+            ('old_input_data', 'conv_1'),
+            ('conv_1', 'conv_1_data'),
+            ('conv_1_data', 'relu_1'),
+            ('relu_1', 'relu_1_data'),
+            ('relu_1_data', 'output'),
+            ('output', 'output_data')
+        ]
+        graph = build_graph(nodes, edges)
+        add_input_ops(graph=graph, user_defined_inputs=inputs, before_infer=False)
+        new_input = list(graph.in_edges(list(graph.in_edges('conv_1'))[0][0]))[0][0]
+        new_input_data = list(graph.in_edges('conv_1'))[0][0]
+        self.assertFalse(graph.node['old_input']['is_input'])
+        self.assertTrue(graph.node[new_input]['is_input'])
+        self.assertTrue((new_input_data, 'conv_1') in graph.edges())
+        self.assertTrue(('old_input_data', 'conv_1') not in graph.edges())
+        self.assertIsNotNone(graph.node[new_input_data]['shape'])
+
+    def test_two_inputs_two_shapes_positive_1(self):
+        shape_1 = [1, 2, 3, 4]
+        shape_2 = [4, 3, 2, 1]
+        inputs = {'node_1': [{'shape': shape_1}], 'node_4': [{'shape': shape_2}]}
+        nodes = {
+            'input_1': {'type': 'Identity', 'kind': 'op', 'op': 'Placeholder'},
+            'input_2': {'type': 'Identity', 'kind': 'op', 'op': 'Placeholder'},
+            'node_1': {'type': 'Identity', 'kind': 'op', 'op': 'NotPlaceholder'},
+            'node_2': {'type': 'Identity', 'kind': 'op', 'op': 'NotPlaceholder'},
+            'node_3': {'type': 'Identity', 'kind': 'op', 'op': 'NotPlaceholder'},
+            'node_4': {'type': 'Identity', 'kind': 'op', 'op': 'NotPlaceholder'},
+            'output': {'type': 'Identity', 'kind': 'op', 'op': 'OpOutput', 'is_output': True}
+        }
+        edges = [
+            ('input_1', 'node_1'),
+            ('node_1', 'node_2'),
+            ('node_3', 'output'),
+            ('input_2', 'node_4'),
+            ('node_4', 'output')
+        ]
+        graph = build_graph(nodes, edges)
+        add_input_ops(graph=graph, user_defined_inputs=inputs, before_infer=True)
+        new_input_1 = list(graph.in_edges('node_1'))[0][0]
+        new_input_2 = list(graph.in_edges('node_4'))[0][0]
+        self.assertFalse(graph.node['input_1']['is_input'])
+        self.assertTrue(graph.node[new_input_1]['is_input'])
+        self.assertTrue(graph.node[new_input_2]['is_input'])
+        self.assertTrue((new_input_1, 'node_1') in graph.edges())
+        self.assertTrue((new_input_2, 'node_4') in graph.edges())
+        self.assertListEqual(shape_1, graph.node[new_input_1]['shape'])
+        self.assertListEqual(shape_2, graph.node[new_input_2]['shape'])
+
+    def test_two_inputs_two_shapes_not_all_inputs(self):
+        shape_1 = [1, 2, 3, 4]
+        shape_2 = [4, 3, 2, 1]
+        inputs = {'node_1': [{'shape': shape_1}], 'node_4': [{'shape': shape_2}]}
+        nodes = {
+            'input_1': {'type': 'Identity', 'kind': 'op', 'op': 'Placeholder'},
+            'input_2': {'type': 'Identity', 'kind': 'op', 'op': 'Placeholder'},
+            'node_1': {'type': 'Identity', 'kind': 'op', 'op': 'NotPlaceholder'},
+            'node_2': {'type': 'Identity', 'kind': 'op', 'op': 'NotPlaceholder'},
+            'node_3': {'type': 'Identity', 'kind': 'op', 'op': 'NotPlaceholder'},
+            'node_4': {'type': 'Identity', 'kind': 'op', 'op': 'NotPlaceholder'},
+            'output': {'type': 'Identity', 'kind': 'op', 'op': 'OpOutput', 'is_output': True},
+            'input_3': {'type': 'Identity', 'kind': 'op', 'op': 'Placeholder'}
+        }
+        edges = [
+            ('input_1', 'node_1'),
+            ('node_1', 'node_2'),
+            ('node_3', 'output'),
+            ('input_2', 'node_4'),
+            ('node_4', 'output'),
+            ('input_3', 'output')
+        ]
+        graph = build_graph(nodes, edges)
+        self.assertRaises(Error, add_input_ops, graph, inputs, True)
+
+    # Tests for cases with input/output ports cutting
+    def test_add_input_with_input_port_before_infer(self):
+        shape = np.array([1, 2, 3, 4])
+        inputs = {'conv_1': [{'shape': shape, 'in': 0}]}
+        nodes = {
+            'old_input': {'type': 'Identity', 'kind': 'op', 'op': 'Placeholder'},
+            'conv_1': {'type': 'Convolution', 'kind': 'op', 'op': 'NotPlaceholder'},
+            'relu_1': {'type': 'ReLU', 'kind': 'op', 'op': 'NotPlaceholder'},
+            'output': {'type': 'SoftMax', 'kind': 'op', 'op': 'NotPlaceholder'}
+        }
+        edges = [
+            ('old_input', 'conv_1'),
+            ('conv_1', 'relu_1'),
+            ('relu_1', 'output')
+        ]
+        graph = build_graph(nodes, edges)
+        add_input_ops(graph=graph, user_defined_inputs=inputs, before_infer=True)
+
+        # Check that graph
+        graph_ref = build_graph(nodes, edges, update_attributes={'old_input': {'shape': shape}})
+        (flag, resp) = compare_graphs(graph, graph_ref, last_node='output')
+        self.assertTrue(flag, resp)
+
+        # also checks that new old_input was changed
+        new_input = list(graph.in_edges('conv_1'))[0][0]
+        self.assertFalse(graph.node['old_input']['is_input'])
+        self.assertTrue(graph.node[new_input]['is_input'])
+        self.assertTrue((new_input, 'conv_1') in graph.edges())
+        self.assertTrue(('old_input', 'conv_1') not in graph.edges())
+
+    def test_add_input_with_output_port_before_infer(self):
+        shape = np.array([1, 2, 3, 4])
+        inputs = {'conv_1': [{'shape': shape, 'out': 0}]}
+        nodes = {
+            'old_input': {'type': 'Identity', 'kind': 'op', 'op': 'Placeholder'},
+            'conv_1': {'type': 'Convolution', 'kind': 'op', 'op': 'NotPlaceholder'},
+            'conv_2': {'type': 'Convolution', 'kind': 'op', 'op': 'NotPlaceholder'},
+            'relu_1': {'type': 'ReLU', 'kind': 'op', 'op': 'NotPlaceholder'},
+            'output': {'type': 'SoftMax', 'kind': 'op', 'op': 'NotPlaceholder'}
+        }
+        edges = [
+            ('old_input', 'conv_1'),
+            ('conv_1', 'relu_1'),
+            ('conv_2', 'relu_1'),
+            ('relu_1', 'output')
+        ]
+        graph = build_graph(nodes, edges)
+        add_input_ops(graph=graph, user_defined_inputs=inputs, before_infer=True)
+
+        graph_ref = build_graph(nodes_attrs={'new_input': {'kind': 'op', 'op': 'Placeholder', 'shape': shape},
+                                             **nodes},
+                                edges=[('new_input', 'relu_1'),
+                                       ('relu_1', 'output'),
+                                       ('conv_2', 'relu_1'),
+                                       ('old_input', 'conv_1'),],)
+        # Check that new input is added right (with right ports !)
+        (flag, resp) = compare_graphs(graph, graph_ref, last_node='output')
+        self.assertTrue(flag, resp)
+
+        # Check that other graph is not damaged
+        (flag, resp) = compare_graphs(graph, graph_ref, last_node='conv_1')
+        self.assertTrue(flag, resp)
+
+        # Checks for new input and edges
+        self.assertTrue('conv_1/placeholder_out_port_0' in graph.nodes())
+        new_input = 'conv_1/placeholder_out_port_0'
+        self.assertTrue(graph.node[new_input]['is_input'])
+        self.assertTrue((new_input, 'relu_1') in graph.edges())
+        self.assertTrue(('old_input', 'relu_1') not in graph.edges())
+
+    def test_add_input_with_output_port_after_infer(self):
+        shape = np.array([1, 2, 3, 4])
+        inputs = {'conv_1': [{'shape': shape, 'out': 0}]}
+        nodes = {
+            'old_input': {'type': 'Identity', 'kind': 'op', 'op': 'Placeholder'},
+            'inp_data' : {'kind': 'data', 'shape': shape + 1},
+            'conv_1': {'type': 'Convolution', 'kind': 'op', 'op': 'NotPlaceholder'},
+            'conv_data': {'kind': 'data', 'shape': shape},
+            'relu_1': {'type': 'ReLU', 'kind': 'op', 'op': 'NotPlaceholder'},
+        }
+        edges = [
+            ('old_input', 'inp_data'),
+            ('inp_data', 'conv_1'),
+            ('conv_1', 'conv_data'),
+            ('conv_data', 'relu_1'),
+        ]
+        graph = build_graph(nodes, edges)
+        add_input_ops(graph=graph, user_defined_inputs=inputs, before_infer=False)
+
+        graph_ref = build_graph(nodes_attrs={'new_input': {'kind': 'op', 'op': 'Placeholder', 'shape': shape},
+                                             **nodes},
+                                edges=[('old_input', 'inp_data'),
+                                       ('inp_data', 'conv_1'),
+                                       ('new_input', 'conv_data'),
+                                       ('conv_data', 'relu_1'),
+                                       ],)
+        # Check that new input is added right (with right ports !)
+        (flag, resp) = compare_graphs(graph, graph_ref, last_node='relu_1')
+        self.assertTrue(flag, resp)
+
+        # Check that other graph is not damaged
+        (flag, resp) = compare_graphs(graph, graph_ref, last_node='conv_1')
+        self.assertTrue(flag, resp)
+
+        # Checks for new input and edges
+        self.assertTrue('conv_1/placeholder_out_port_0' in graph.nodes())
+        new_input = 'conv_1/placeholder_out_port_0'
+
+        self.assertTrue(graph.node[new_input]['is_input'])
+        self.assertTrue((new_input, 'conv_data') in graph.edges())
+        self.assertTrue(('conv_1', 'conv_data') not in graph.edges())
+
+@generator
+class TestOutputCut(unittest.TestCase):
+    # {'embeddings': [{'port': None}]}
+    @generate({'C': [{'port': None}]}, {'C': [{'out': 0}]}, {'C': [{'out': 1}]})
+    def test_output_port_cut(self, output):
+        nodes = {'A': {'type': 'Identity', 'kind': 'op'},
+                 'B': {'type': 'Identity', 'kind': 'op'},
+                 'C': {'type': 'Identity', 'kind': 'op'},
+                 'D': {'type': 'Identity', 'kind': 'op'},
+                 'E': {'type': 'Identity', 'kind': 'op'},
+                 }
+        edges = [
+            ('A', 'C', {'in': 0, 'out': 0}),
+            ('B', 'C', {'in': 1, 'out': 0}),
+            ('C', 'D', {'in': 0, 'out': 0}),
+            ('C', 'E', {'in': 0, 'out': 1})
+        ]
+        graph = build_graph_with_edge_attrs(nodes, edges)
+        sinks = add_output_ops(graph, output)
+        eliminate.graph_clean_up(graph)
+        self.assertEqual(len(Node(graph, 'C').out_nodes()), 1)
+        self.assertEqual(len(Node(graph, 'C').in_nodes()), 2)
+
+    @generate({'C': [{'in': 0}]}, {'C': [{'in': 1}]})
+    def test_output_port_cut(self, output):
+        nodes = {'A': {'op': 'Placeholder', 'kind': 'op'},
+                 'B': {'op': 'Placeholder', 'kind': 'op'},
+                 'C': {'type': 'Identity', 'kind': 'op'},
+                 'D': {'type': 'Identity', 'kind': 'op'},
+                 'E': {'type': 'Identity', 'kind': 'op'},
+                 }
+        edges = [
+            ('A', 'C', {'in': 0, 'out': 0}),
+            ('B', 'C', {'in': 1, 'out': 0}),
+            ('C', 'D', {'in': 0, 'out': 0}),
+            ('C', 'E', {'in': 0, 'out': 1})
+        ]
+        graph = build_graph_with_edge_attrs(nodes, edges)
+        sinks = add_output_ops(graph, output)
+        eliminate.graph_clean_up(graph)
+        self.assertEqual(len(graph.nodes()), 2)
+
+
+class TestUserDataRepack(unittest.TestCase):
+    nodes = {'A': {'name': 'Aa', 'op': 'Placeholder', 'kind': 'op'},
+             'B': {'name': 'Bb', 'op': 'Placeholder', 'kind': 'op'},
+             'C': {'name': 'Cc', 'type': 'Identity', 'value': None, 'kind': 'op'},
+             'D': {'name': 'Dd', 'type': 'Identity', 'value': None, 'kind': 'op'},
+             'E': {'name': 'Ee', 'type': 'Identity', 'value': None, 'kind': 'op'},
+             }
+    edges = [
+        ('A', 'C', {'in': 0, 'out': 0}),
+        ('B', 'C', {'in': 1, 'out': 0}),
+        ('C', 'D', {'in': 0, 'out': 0}),
+        ('C', 'E', {'in': 0, 'out': 1})
+    ]
+
+    def test_input_user_data_repack_none(self):
+        graph = build_graph_with_edge_attrs(self.nodes, self.edges)
+        input, freeze_placeholder = input_user_data_repack(graph, None, None)
+        self.assertEqual(input, None)
+        self.assertEqual(freeze_placeholder, None)
+
+    def test_input_user_data_repack_names_to_ids_list(self):
+        graph = build_graph_with_edge_attrs(self.nodes, self.edges)
+        input, freeze_placeholder = input_user_data_repack(graph, ['Aa', 'Bb'], None)
+        self.assertDictEqual(input, {'A': [{'shape': None, 'port': None}], 'B': [{'shape': None, 'port': None}]})
+        self.assertEqual(freeze_placeholder, None)
+
+    def test_input_user_data_repack_names_ports_in_out(self):
+        graph = build_graph_with_edge_attrs(self.nodes, self.edges)
+        input, freeze_placeholder = input_user_data_repack(graph, ['Aa:1', '0:Bb'], None)
+        self.assertDictEqual(input, {'A': [{'shape': None, 'out': 1}], 'B': [{'shape': None, 'in': 0}]})
+        self.assertEqual(freeze_placeholder, None)
+
+    def test_input_user_data_repack_dict_with_shapes(self):
+        graph = build_graph_with_edge_attrs(self.nodes, self.edges)
+        shape_1 = np.array([1, 160, 160, 3])
+        shape_2 = np.array([1, 127, 127, 3])
+        input, freeze_placeholder = input_user_data_repack(graph, {'Aa': shape_1, 'Bb': shape_2}, None)
+        self.assertDictEqual(input, {'A': [{'shape': shape_1, 'port': None}], 'B': [{'shape': shape_2, 'port': None}]})
+        self.assertEqual(freeze_placeholder, None)
+
+    def test_input_user_data_repack_dict_with_shapes_and_ports(self):
+        graph = build_graph_with_edge_attrs(self.nodes, self.edges)
+        shape_1 = np.array([1, 160, 160, 3])
+        shape_2 = np.array([1, 127, 127, 3])
+        input, freeze_placeholder = input_user_data_repack(graph, {'Aa:0': shape_1, 'Bb:1': shape_2}, None)
+        self.assertDictEqual(input, {'A': [{'shape': shape_1, 'out': 0}], 'B': [{'shape': shape_2, 'out': 1}]})
+        self.assertEqual(freeze_placeholder, None)
+
+    def test_freeze_placeholder_and_input(self):
+        graph = build_graph_with_edge_attrs(self.nodes, self.edges)
+        shape_1 = np.array([1, 160, 160, 3])
+        input, freeze_placeholder = input_user_data_repack(graph, {'Aa:0': shape_1}, {'Bb': False})
+        self.assertDictEqual(input, {'A': [{'shape': shape_1, 'out': 0}], 'B': [{'shape': None, 'port': None}]})
+        self.assertEqual(freeze_placeholder, {'B': False})
+
+    def test_error(self):
+        graph = build_graph_with_edge_attrs(self.nodes, self.edges)
+        self.assertRaises(Error, input_user_data_repack, graph, np.array([1, 227, 227, 3]), None)
+
+    def test_error_2(self):
+        graph = build_graph_with_edge_attrs(self.nodes, self.edges)
+        self.assertRaises(Error, input_user_data_repack, graph, np.array([1, 227, 227, 3]), None)
+
+    def test_error_3(self):
+        graph = build_graph_with_edge_attrs(self.nodes, self.edges)
+        self.assertRaises(Error, input_user_data_repack, graph, ['Bcb'], None)
+
+    def test_input_and_freeze(self):
+        graph = build_graph_with_edge_attrs(self.nodes, self.edges)
+        shape_1 = np.array([1, 160, 160, 3])
+        input, freeze_placeholder = input_user_data_repack(graph, shape_1, {'Bb': True})
+        self.assertDictEqual(input, {'A': [{'shape': shape_1, 'port': None}], 'B': [{'shape': None, 'port': None}]})
+        self.assertDictEqual(freeze_placeholder, {'B': True})
+
+    def test_output_user_data_repack(self):
+        graph = build_graph_with_edge_attrs(self.nodes, self.edges)
+        output = output_user_data_repack(graph, ['Cc'])
+        self.assertDictEqual(output, {'C': [{'port': None}]})
+
+    def test_output_user_data_repack_ports(self):
+        graph = build_graph_with_edge_attrs(self.nodes, self.edges)
+        output = output_user_data_repack(graph, ['Cc:1', '0:Cc'])
+        self.assertDictEqual(output, {'C': [{'out': 1}, {'in': 0}]})
+
+    def test_output_user_data_repack_none(self):
+        graph = build_graph_with_edge_attrs(self.nodes, self.edges)
+        output = output_user_data_repack(graph, None)
+        self.assertEqual(output, None)
+
+
+class TestExtractPort(unittest.TestCase):
+    def test_out_port(self):
+        name, in_port, out_port = extract_port_from_string('node_name:1')
+        self.assertEqual(name, 'node_name')
+        self.assertEqual(in_port, None)
+        self.assertEqual(out_port, 1)
+
+    def test_in_port(self):
+        name, in_port, out_port = extract_port_from_string('0:node_name')
+        self.assertEqual(name, 'node_name')
+        self.assertEqual(in_port, 0)
+        self.assertEqual(out_port, None)
+
+    def test_no_port(self):
+        name, in_port, out_port = extract_port_from_string('node_name')
+        self.assertEqual(name, 'node_name')
+        self.assertEqual(in_port, None)
+        self.assertEqual(out_port, None)
+
+    def test_non_int(self):
+        self.assertRaises(Error, extract_port_from_string, 'port:node_name')
+
+    def test_two_ports(self):
+        self.assertRaises(Error, extract_port_from_string, '1:node_name:0')
+
+
+class TestCaffePythonFrontExtractorOp(unittest.TestCase):
+    def test_get_attrs(self):
+        exp_attrs = {"test_attr_1": 12, "test_attr_2": "sdf sdf"}
+        param_str = "'test_attr_1': 12, 'test_attr_2': 'sdf sdf'"
+        attrs = CaffePythonFrontExtractorOp.get_attrs(FakePythonParam(FakeMultiParam({'param_str': param_str})))
+        self.assertEqual(exp_attrs, attrs)
\ No newline at end of file
diff --git a/model-optimizer/mo/front/kaldi/extractors/add_shift_ext_test.py b/model-optimizer/mo/front/kaldi/extractors/add_shift_ext_test.py
new file mode 100644 (file)
index 0000000..08703d2
--- /dev/null
@@ -0,0 +1,56 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import numpy as np
+
+from mo.front.kaldi.extractors.add_shift_ext import AddShiftFrontExtractor
+from mo.front.kaldi.extractors.common_ext_test import KaldiFrontExtractorTest
+from mo.front.kaldi.loader.utils_test import TestKaldiUtilsLoading
+from mo.ops.op import Op
+from mo.ops.scale_shift import ScaleShiftOp
+
+
+class AddShiftFrontExtractorTest(KaldiFrontExtractorTest):
+    @classmethod
+    def register_op(cls):
+        Op.registered_ops['ScaleShift'] = ScaleShiftOp
+
+    @classmethod
+    def create_pb_for_test_node(cls):
+        input_shape = cls.test_node.in_node().shape
+        pb = cls.write_tag_with_value('<LearnRateCoef>', 0)
+        pb += cls.write_tag_with_value('FV', input_shape[1])
+        for i in np.zeros(input_shape[1], dtype=np.uint32):
+            pb += TestKaldiUtilsLoading.pack_value(i, TestKaldiUtilsLoading.uint32_fmt)
+        cls.test_node['parameters'] = TestKaldiUtilsLoading.bytesio_from(pb)
+        AddShiftFrontExtractor.extract(cls.test_node)
+
+    def test_assertion(self):
+        self.assertRaises(AttributeError, AddShiftFrontExtractor.extract, None)
+
+    def test_extracted_shapes_add_shift(self):
+        weights = self.test_node.weights
+        biases = self.test_node.biases
+        weights_shape = weights.shape[0]
+        self.assertEqual(self.test_node.in_node().shape[1], weights_shape)
+        self.assertEqual(biases.shape[0], weights_shape)
+
+    def test_extracted_blobs_add_shift(self):
+        weights = self.test_node.weights
+        biases = self.test_node.biases
+        self.assertTrue(np.array_equal(weights, np.ones(weights.shape)))
+        self.assertTrue(np.array_equal(biases, np.zeros(biases.shape)))
+        self.assertTrue(self.test_node.bias_term)
diff --git a/model-optimizer/mo/front/kaldi/extractors/affine_component_ext_test.py b/model-optimizer/mo/front/kaldi/extractors/affine_component_ext_test.py
new file mode 100644 (file)
index 0000000..14b083b
--- /dev/null
@@ -0,0 +1,48 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import numpy as np
+
+from mo.front.kaldi.extractors.affine_transform_ext import AffineTransformFrontExtractor
+from mo.front.kaldi.extractors.common_ext_test import KaldiFrontExtractorTest
+from mo.front.kaldi.loader.utils_test import TestKaldiUtilsLoading
+from mo.ops.inner_product import InnerProduct
+from mo.ops.op import Op
+
+
+class AffineComponentFrontExtractorTest(KaldiFrontExtractorTest):
+    @classmethod
+    def register_op(cls):
+        Op.registered_ops['FullyConnected'] = InnerProduct
+
+    @classmethod
+    def create_pb_for_test_node(cls):
+        pb = KaldiFrontExtractorTest.generate_learn_info()
+        pb += KaldiFrontExtractorTest.generate_matrix([10, 10])
+        pb += KaldiFrontExtractorTest.generate_vector(10)
+        cls.test_node['parameters'] = TestKaldiUtilsLoading.bytesio_from(pb)
+        AffineTransformFrontExtractor.extract(cls.test_node)
+
+    def test_assertion(self):
+        self.assertRaises(AttributeError, AffineTransformFrontExtractor.extract, None)
+
+    def test_attrs(self):
+        self.assertEqual(self.test_node['out-size'], 10)
+        self.assertEqual(self.test_node['layout'], 'NCHW')
+
+    def test_out_blobs(self):
+        self.assertTrue(np.array_equal(self.test_node.weights, range(10 * 10)))
+        self.assertTrue(np.array_equal(self.test_node.biases, range(10)))
diff --git a/model-optimizer/mo/front/kaldi/extractors/affine_transform_ext_test.py b/model-optimizer/mo/front/kaldi/extractors/affine_transform_ext_test.py
new file mode 100644 (file)
index 0000000..7b9f41c
--- /dev/null
@@ -0,0 +1,52 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import types
+
+import numpy as np
+
+from mo.front.kaldi.extractors.affine_transform_ext import AffineTransformFrontExtractor
+from mo.front.kaldi.extractors.common_ext_test import KaldiFrontExtractorTest
+from mo.front.kaldi.loader.utils_test import TestKaldiUtilsLoading
+from mo.ops.inner_product import InnerProduct
+from mo.ops.op import Op
+
+
+class AffineTransformFrontExtractorTest(KaldiFrontExtractorTest):
+    @classmethod
+    def register_op(cls):
+        Op.registered_ops['FullyConnected'] = InnerProduct
+
+    @classmethod
+    def create_pb_for_test_node(cls):
+        pb = KaldiFrontExtractorTest.generate_learn_info()
+        pb += KaldiFrontExtractorTest.generate_matrix([10, 10])
+        pb += KaldiFrontExtractorTest.generate_vector(10)
+        cls.test_node['parameters'] = TestKaldiUtilsLoading.bytesio_from(pb)
+        AffineTransformFrontExtractor.extract(cls.test_node)
+
+    def test_assertion(self):
+        self.assertRaises(AttributeError, AffineTransformFrontExtractor.extract, None)
+
+    def test_attrs(self):
+        self.assertEqual(self.test_node['out-size'], 10)
+        self.assertEqual(self.test_node['layout'], 'NCHW')
+
+    def test_out_blobs(self):
+        self.assertTrue(np.array_equal(self.test_node.weights, range(10 * 10)))
+        self.assertTrue(np.array_equal(self.test_node.biases, range(10)))
+
+
diff --git a/model-optimizer/mo/front/kaldi/extractors/common_ext_test.py b/model-optimizer/mo/front/kaldi/extractors/common_ext_test.py
new file mode 100644 (file)
index 0000000..e9cdb98
--- /dev/null
@@ -0,0 +1,113 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import networkx as nx
+import numpy as np
+
+from mo.front.common.partial_infer.utils import int64_array
+from mo.front.kaldi.loader.utils_test import TestKaldiUtilsLoading
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph
+
+
+class KaldiFrontExtractorTest(unittest.TestCase):
+    graph = nx.MultiDiGraph()
+
+    @classmethod
+    def setUp(cls):
+        cls.nodes_attributes = {
+            'input_data_node': {
+                'name': 'input_data_node',
+                'kind': 'data',
+                'shape': np.array([1, 32, 1, 40], dtype=np.int64),
+            },
+            'weights': {
+                'name': 'weights',
+                'kind': 'data',
+                'shape': np.array([10, 32, 1, 8], dtype=np.int64),
+                'value': np.zeros((10, 32, 1, 8)),
+                'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis'],
+            },
+            'test_node': {
+                'name': 'test_node',
+                'kind': 'op'
+            },
+            'output_data_node': {
+                'name': 'output_data_node',
+                'kind': 'data',
+                'shape': None
+            }
+        }
+        cls.create_graph()
+        cls.test_node = Node(cls.graph, 'test_node')
+        cls.graph.add_node(cls.test_node.id, type='test_node')
+        cls.register_op()
+        cls.create_pb_for_test_node()
+
+    @staticmethod
+    def register_op():
+        raise NotImplementedError('Please, implement register_op')
+
+    @classmethod
+    def create_graph(cls):
+        cls.graph = build_graph(cls.nodes_attributes, [
+            ('input_data_node', 'test_node'),
+            ('test_node', 'output_data_node')
+        ], nodes_with_edges_only=True)
+
+    @classmethod
+    def create_pb_for_test_node(cls):
+        pass
+
+    @staticmethod
+    def generate_learn_info():
+        pb = KaldiFrontExtractorTest.write_tag_with_value('<LearnRateCoef>', 0)
+        pb += KaldiFrontExtractorTest.write_tag_with_value('<BiasLearnRateCoef>', 1)
+        pb += KaldiFrontExtractorTest.write_tag_with_value('<MaxNorm>', 2)
+        return pb
+
+    @staticmethod
+    def generate_matrix(shape):
+        pb = KaldiFrontExtractorTest.write_tag_with_value('FM', shape[0])
+        pb += KaldiFrontExtractorTest.write_int_value(shape[1])
+        pb += KaldiFrontExtractorTest.generate_blob(np.prod(shape))
+        return pb
+
+    @staticmethod
+    def generate_vector(size: int) -> bytes:
+        pb = KaldiFrontExtractorTest.write_tag_with_value('FV', size)
+        pb += KaldiFrontExtractorTest.generate_blob(size)
+        return pb
+
+    @staticmethod
+    def generate_blob(size: int) -> bytes:
+        pb = b''
+        for i in range(size):
+            pb += TestKaldiUtilsLoading.pack_value(i, TestKaldiUtilsLoading.float32_fmt)
+        return pb
+
+    @staticmethod
+    def write_tag_with_value(tag: str, value) -> bytes:
+        pb = bytes(tag + ' ', 'ascii')
+        return pb + KaldiFrontExtractorTest.write_int_value(value)
+
+    @staticmethod
+    def write_int_value(value) -> bytes:
+        pb = TestKaldiUtilsLoading.pack_value(4, 'B')
+        pb += TestKaldiUtilsLoading.pack_value(value, TestKaldiUtilsLoading.uint32_fmt)
+        return pb
diff --git a/model-optimizer/mo/front/kaldi/extractors/concat_ext_test.py b/model-optimizer/mo/front/kaldi/extractors/concat_ext_test.py
new file mode 100644 (file)
index 0000000..b2274ba
--- /dev/null
@@ -0,0 +1,30 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+from mo.front.kaldi.extractors.common_ext_test import KaldiFrontExtractorTest
+from mo.front.kaldi.extractors.concat_ext import ConcatFrontExtractor
+from mo.ops.convolution import Convolution
+from mo.ops.op import Op
+
+
+class ConcatFrontExtractorTest(KaldiFrontExtractorTest):
+    @classmethod
+    def register_op(cls):
+        Op.registered_ops['Concat'] = Convolution
+
+    def test_concat(self):
+        ConcatFrontExtractor.extract(self.test_node)
+        self.assertEqual(self.test_node.axis, 1)
diff --git a/model-optimizer/mo/front/kaldi/extractors/convolutional_component_ext_test.py b/model-optimizer/mo/front/kaldi/extractors/convolutional_component_ext_test.py
new file mode 100644 (file)
index 0000000..50fef84
--- /dev/null
@@ -0,0 +1,67 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+import numpy as np
+
+from mo.front.kaldi.extractors.common_ext_test import KaldiFrontExtractorTest
+from mo.front.kaldi.extractors.convolutional_component_ext import ConvolutionalComponentFrontExtractor
+from mo.front.kaldi.loader.utils_test import TestKaldiUtilsLoading
+from mo.ops.convolution import Convolution
+from mo.ops.op import Op
+
+
+class ConvolutionalComponentFrontExtractorTest(KaldiFrontExtractorTest):
+    @classmethod
+    def register_op(cls):
+        Op.registered_ops['Convolution'] = Convolution
+
+    @classmethod
+    def create_pb_for_test_node(cls):
+        pb = KaldiFrontExtractorTest.write_tag_with_value('<PatchDim>', 2)
+        pb += KaldiFrontExtractorTest.write_tag_with_value('<PatchStep>', 2)
+        pb += KaldiFrontExtractorTest.write_tag_with_value('<PatchStride>', 4)
+        pb += KaldiFrontExtractorTest.generate_learn_info()
+        pb += b'<Filters> '
+        pb += KaldiFrontExtractorTest.generate_matrix([2, 1])
+        pb += b'<Bias> '
+        pb += KaldiFrontExtractorTest.generate_vector(2)
+        cls.test_node['parameters'] = TestKaldiUtilsLoading.bytesio_from(pb)
+        ConvolutionalComponentFrontExtractor.extract(cls.test_node)
+
+    def test_assertion(self):
+        self.assertRaises(AttributeError, ConvolutionalComponentFrontExtractor.extract, None)
+
+    def test_attrs(self):
+        val_attrs = {
+            'kernel': [1, 1, 1, 2],
+            'stride': [1, 1, 1, 2],
+            'pad': [[[0, 0], [0, 0], [0, 0], [0, 0]]],
+            'output': 2,
+            'patch_stride': 4,
+            'spatial_dims': [2, 3],
+            'channel_dims': [1],
+            'batch_dims': [0],
+            'dilation': [1, 1, 1, 1]
+        }
+        for attr in val_attrs:
+            if isinstance(val_attrs[attr], list):
+                self.assertTrue((self.test_node[attr] == val_attrs[attr]).all())
+            else:
+                self.assertEqual(self.test_node[attr], val_attrs[attr])
+
+    def test_convolution_blobs(self):
+        self.assertTrue(np.array_equal(self.test_node.weights, [0, 1]))
+        self.assertTrue(np.array_equal(self.test_node.biases, [0, 1]))
+
diff --git a/model-optimizer/mo/front/kaldi/extractors/fixed_affine_component_ext_test.py b/model-optimizer/mo/front/kaldi/extractors/fixed_affine_component_ext_test.py
new file mode 100644 (file)
index 0000000..e03f698
--- /dev/null
@@ -0,0 +1,49 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import numpy as np
+
+from mo.front.kaldi.extractors.common_ext_test import KaldiFrontExtractorTest
+from mo.front.kaldi.extractors.fixed_affine_component_ext import FixedAffineComponentFrontExtractor
+from mo.front.kaldi.loader.utils_test import TestKaldiUtilsLoading
+from mo.ops.inner_product import InnerProduct
+from mo.ops.op import Op
+
+
+class FixedAffineComponentFrontExtractorTest(KaldiFrontExtractorTest):
+    @classmethod
+    def register_op(cls):
+        Op.registered_ops['FullyConnected'] = InnerProduct
+
+    @classmethod
+    def create_pb_for_test_node(cls):
+        pb = b'<LinearParams> ' + KaldiFrontExtractorTest.generate_matrix([10, 10])
+        pb += b'<BiasParams> ' + KaldiFrontExtractorTest.generate_vector(10)
+        cls.test_node['parameters'] = TestKaldiUtilsLoading.bytesio_from(pb)
+        FixedAffineComponentFrontExtractor.extract(cls.test_node)
+
+    def test_assertion(self):
+        self.assertRaises(AttributeError, FixedAffineComponentFrontExtractor.extract, None)
+
+    def test_attrs(self):
+        self.assertEqual(self.test_node['out-size'], 10)
+        self.assertEqual(self.test_node['layout'], 'NCHW')
+
+    def test_out_blobs(self):
+        self.assertTrue(np.array_equal(self.test_node.weights, range(10 * 10)))
+        self.assertTrue(np.array_equal(self.test_node.biases, range(10)))
+
+
diff --git a/model-optimizer/mo/front/kaldi/extractors/max_pooling_ext_test.py b/model-optimizer/mo/front/kaldi/extractors/max_pooling_ext_test.py
new file mode 100644 (file)
index 0000000..b3e7ad1
--- /dev/null
@@ -0,0 +1,52 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+from mo.front.kaldi.extractors.common_ext_test import KaldiFrontExtractorTest
+
+from mo.front.kaldi.extractors.max_pooling_ext import MaxPoolingComponentFrontExtractor
+from mo.front.kaldi.loader.utils_test import TestKaldiUtilsLoading
+from mo.ops.op import Op
+from mo.ops.pooling import Pooling
+
+
+class MaxPoolingComponentFrontExtractorTest(KaldiFrontExtractorTest):
+    @classmethod
+    def register_op(cls):
+        Op.registered_ops['Pooling'] = Pooling
+
+    @classmethod
+    def create_pb_for_test_node(cls):
+        pb = KaldiFrontExtractorTest.write_tag_with_value('<PoolSize>', 2)
+        pb += KaldiFrontExtractorTest.write_tag_with_value('<PoolStep>', 2)
+        pb += KaldiFrontExtractorTest.write_tag_with_value('<PoolStride>', 4)
+        cls.test_node['parameters'] = TestKaldiUtilsLoading.bytesio_from(pb)
+        MaxPoolingComponentFrontExtractor.extract(cls.test_node)
+
+    def test_assertion(self):
+        self.assertRaises(AttributeError, MaxPoolingComponentFrontExtractor.extract, None)
+
+    def test_attrs(self):
+        val_attrs = {
+            'window': [1, 1, 1, 2],
+            'stride': [1, 1, 2, 2],
+            'pool_stride': 4,
+            'pad': [[[0, 0], [0, 0], [0, 0], [0, 0]]]
+        }
+        for attr in val_attrs:
+            if isinstance(val_attrs[attr], list):
+                self.assertTrue((self.test_node[attr] == val_attrs[attr]).all())
+            else:
+                self.assertEqual(self.test_node[attr], val_attrs[attr])
diff --git a/model-optimizer/mo/front/kaldi/extractors/rescale_ext_test.py b/model-optimizer/mo/front/kaldi/extractors/rescale_ext_test.py
new file mode 100644 (file)
index 0000000..b7628bb
--- /dev/null
@@ -0,0 +1,51 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import numpy as np
+
+from mo.front.kaldi.extractors.common_ext_test import KaldiFrontExtractorTest
+from mo.front.kaldi.extractors.rescale_ext import RescaleFrontExtractor
+from mo.front.kaldi.loader.utils_test import TestKaldiUtilsLoading
+from mo.ops.op import Op
+from mo.ops.scale_shift import ScaleShiftOp
+
+
+class RescaleFrontExtractorTest(KaldiFrontExtractorTest):
+    @classmethod
+    def register_op(cls):
+        Op.registered_ops['ScaleShift'] = ScaleShiftOp
+
+    @classmethod
+    def create_pb_for_test_node(cls):
+        input_shape = cls.test_node.in_node().shape
+        pb = cls.write_tag_with_value('<LearnRateCoef>', 0)
+        pb += cls.write_tag_with_value('FV', input_shape[1])
+        for i in range(input_shape[1]):
+            pb += TestKaldiUtilsLoading.pack_value(i, TestKaldiUtilsLoading.float32_fmt)
+        cls.test_node['parameters'] = TestKaldiUtilsLoading.bytesio_from(pb)
+        RescaleFrontExtractor.extract(cls.test_node)
+
+    def test_assertion(self):
+        self.assertRaises(AttributeError, RescaleFrontExtractor.extract, None)
+
+    def test_extracted_shapes_add_shift(self):
+        weights = self.test_node.weights
+        weights_shape = weights.shape[0]
+        self.assertEqual(self.test_node.in_node().shape[1], weights_shape)
+
+    def test_extracted_blobs_add_shift(self):
+        weights = self.test_node.weights
+        self.assertTrue(np.array_equal(weights, range(self.test_node.in_node().shape[1])))
diff --git a/model-optimizer/mo/front/kaldi/extractors/sigmoid_ext_test.py b/model-optimizer/mo/front/kaldi/extractors/sigmoid_ext_test.py
new file mode 100644 (file)
index 0000000..521ac06
--- /dev/null
@@ -0,0 +1,33 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+from mo.front.kaldi.extractors.sigmoid_ext import SigmoidFrontExtractor
+from mo.front.kaldi.extractors.common_ext_test import KaldiFrontExtractorTest
+from mo.ops.activation import Activation
+from mo.ops.op import Op
+
+
+class SigmoidFrontExtractorTest(KaldiFrontExtractorTest):
+    @classmethod
+    def register_op(cls):
+        Op.registered_ops['Activation'] = Activation
+
+    def test_assertion(self):
+        self.assertRaises(AttributeError, SigmoidFrontExtractor.extract, None)
+
+    def test_extracted_blobs_add_shift(self):
+        SigmoidFrontExtractor.extract(self.test_node)
+        self.assertTrue(self.test_node.operation, 'sigmoid')
diff --git a/model-optimizer/mo/front/kaldi/extractors/slice_ext_test.py b/model-optimizer/mo/front/kaldi/extractors/slice_ext_test.py
new file mode 100644 (file)
index 0000000..0c2a16c
--- /dev/null
@@ -0,0 +1,35 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+from mo.front.kaldi.extractors.common_ext_test import KaldiFrontExtractorTest
+from mo.front.kaldi.extractors.slice_ext import SliceFrontExtractor
+from mo.ops.op import Op
+from mo.ops.slice import Slice
+from mo.utils.unittest.extractors import FakeMultiParam
+
+
+class SliceFrontExtractorTest(KaldiFrontExtractorTest):
+    @classmethod
+    def register_op(cls):
+        Op.registered_ops['Slice'] = Slice
+        cls.slice_params = {
+            'slice_point': [99, 1320],
+            'axis': 1
+        }
+        cls.test_node['pb'] = FakeMultiParam(cls.slice_params)
+
+    def test_assertion_no_pb(self):
+        self.assertRaises(AttributeError, SliceFrontExtractor.extract, None)
diff --git a/model-optimizer/mo/front/kaldi/extractors/tanh_ext_test.py b/model-optimizer/mo/front/kaldi/extractors/tanh_ext_test.py
new file mode 100644 (file)
index 0000000..4604022
--- /dev/null
@@ -0,0 +1,33 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+from mo.front.kaldi.extractors.common_ext_test import KaldiFrontExtractorTest
+from mo.front.kaldi.extractors.tanh_component_ext import TanhFrontExtractor
+from mo.ops.activation import Activation
+from mo.ops.op import Op
+
+
+class TanhFrontExtractorTest(KaldiFrontExtractorTest):
+    @classmethod
+    def register_op(cls):
+        Op.registered_ops['Activation'] = Activation
+
+    def test_assertion(self):
+        self.assertRaises(AttributeError, TanhFrontExtractor.extract, None)
+
+    def test_extracted_blobs_add_shift(self):
+        TanhFrontExtractor.extract(self.test_node)
+        self.assertTrue(self.test_node.operation, 'sigmoid')
diff --git a/model-optimizer/mo/front/kaldi/loader/utils_test.py b/model-optimizer/mo/front/kaldi/loader/utils_test.py
new file mode 100644 (file)
index 0000000..ba5b06b
--- /dev/null
@@ -0,0 +1,97 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import io
+import struct
+import unittest
+
+from mo.front.kaldi.loader.utils import end_of_nnet_tag, end_of_component_tag, get_bool, get_uint16, get_uint32, \
+    get_uint64, read_binary_bool_token, read_binary_integer32_token, read_binary_integer64_token, find_next_tag, \
+    find_next_component, find_end_of_component, get_parameters
+from mo.utils.error import Error
+
+
+class TestKaldiUtilsLoading(unittest.TestCase):
+    bool_fmt = '?'
+    uint16_fmt = 'H'
+    uint32_fmt = 'I'
+    uint64_fmt = 'q'
+    float32_fmt = 'f'
+
+    @staticmethod
+    def bytesio_from(buffer):
+        return io.BytesIO(buffer)
+
+    @staticmethod
+    def pack_value(value, fmt):
+        return struct.pack(fmt, value)
+
+    def test_check_common_tags(self):
+        self.assertEqual(end_of_nnet_tag, '</Nnet>')
+        self.assertEqual(end_of_component_tag, '<!EndOfComponent>')
+
+    def test_check_results_getting_function(self):
+        self.assertTrue(get_bool(self.pack_value(True, self.bool_fmt)))
+        self.assertFalse(get_bool(self.pack_value(False, self.bool_fmt)))
+        self.assertEqual(get_uint16(self.pack_value(16, self.uint16_fmt)), 16)
+        self.assertEqual(get_uint32(self.pack_value(32, self.uint32_fmt)), 32)
+        self.assertEqual(get_uint64(self.pack_value(64, self.uint64_fmt)), 64)
+
+    def test_read_binary_bool_token(self):
+        true_value = self.bytesio_from(self.pack_value(True, self.bool_fmt))
+        false_value = self.bytesio_from(self.pack_value(False, self.bool_fmt))
+        self.assertTrue(read_binary_bool_token(true_value))
+        self.assertFalse(read_binary_bool_token(false_value))
+
+    def test_read_binary_integer32_token(self):
+        stream = self.bytesio_from(self.pack_value(4, 'B') + self.pack_value(32, self.uint32_fmt))
+        self.assertEqual(read_binary_integer32_token(stream), 32)
+
+    def test_read_binary_integer64_token(self):
+        stream = self.bytesio_from(self.pack_value(8, 'B') + self.pack_value(64, self.uint64_fmt))
+        self.assertEqual(read_binary_integer64_token(stream), 64)
+
+    def test_find_next_tag(self):
+        test_token = b'<TestToken>'
+        self.assertEqual(find_next_tag(self.bytesio_from(test_token)), test_token.decode('ascii'))
+        fake_token = b'<FakeBegin' + test_token
+        self.assertEqual(find_next_tag(self.bytesio_from(fake_token)), test_token.decode('ascii'))
+
+    def test_find_next_tag_raise_error(self):
+        test_token = b'some bytes'
+        self.assertRaises(Error, find_next_tag, self.bytesio_from(test_token))
+
+    def test_find_next_component(self):
+        component = b'<LstmProjectedStreams>'
+        test_file = b'<Nnet>somefakeinfo<another>info' + component + b'<tag><!EndOfComponent></Nnet>'
+        self.assertEqual(find_next_component(self.bytesio_from(test_file)), component.decode('ascii').lower()[1:-1])
+
+    def test_find_next_component_end_of_nnet(self):
+        test_file = b'<Nnet>somefakeinfo<another>info<tag><!EndOfComponent></Nnet>'
+        self.assertEqual(find_next_component(self.bytesio_from(test_file)), end_of_nnet_tag.lower()[1:-1])
+
+    def test_find_end_of_component(self):
+        component = '<AffineComponent>'
+        test_file = b'somefakeinfo<another>info<tag>' + bytes(end_of_component_tag, 'ascii') + b'</Nnet>'
+        end_tag, position = find_end_of_component(self.bytesio_from(test_file), component.lower()[1:-1])
+        self.assertEqual(end_tag, end_of_component_tag)
+        self.assertEqual(position, test_file.decode('ascii').index(end_of_component_tag) + len(end_of_component_tag))
+
+    def test_get_pb(self):
+        component = '<AffineComponent>'
+        test_file = b'somefakeinfo<another>info<tag>' + bytes(end_of_component_tag, 'ascii') + b'</Nnet>'
+        end_tag, end_position = find_end_of_component(self.bytesio_from(test_file), component[1:-1].lower())
+        pb = get_parameters(self.bytesio_from(test_file), 0, end_position)
diff --git a/model-optimizer/mo/front/mxnet/extractors/activation_test.py b/model-optimizer/mo/front/mxnet/extractors/activation_test.py
new file mode 100644 (file)
index 0000000..d7e034c
--- /dev/null
@@ -0,0 +1,63 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+from mo.front.mxnet.extractors.activation import ActivationFrontExtractor
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph
+
+
+class TestActivationFrontExtractorOp(unittest.TestCase):
+    def test_extract_sigmoid_layer(self):
+        graph = build_graph(
+            {'node_1': {'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+             'act_node': {'type': 'Activation', 'kind': 'op', 'op': 'Activation', },
+             'node_2': {'type': 'Identity', 'kind': 'op'},
+             },
+            [
+                ('node_1', 'act_node'),
+                ('act_node', 'node_2'),
+            ],
+            {
+                'act_node': {'symbol_dict': {'attrs': {'act_type': 'sigmoid'}}},
+            })
+
+        act_node = Node(graph, 'act_node')
+        act_extr_op = ActivationFrontExtractor()
+        supported = act_extr_op.extract(act_node)
+        self.assertTrue(supported)
+        self.assertEqual(act_node['op'], 'Activation')
+
+    def test_extract_relu_layer(self):
+        graph = build_graph(
+            {'node_1': {'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+             'act_node': {'type': 'relu', 'kind': 'op', 'op': 'Activation', },
+             'node_2': {'type': 'Identity', 'kind': 'op'},
+             },
+            [
+                ('node_1', 'act_node'),
+                ('act_node', 'node_2'),
+            ],
+            {
+                'act_node': {'symbol_dict': {'attrs': {'act_type': 'relu'}}},
+            })
+
+        act_node = Node(graph, 'act_node')
+        act_extr_op = ActivationFrontExtractor()
+        supported = act_extr_op.extract(act_node)
+        self.assertTrue(supported)
+        self.assertEqual(act_node['op'], 'ReLU')
diff --git a/model-optimizer/mo/front/mxnet/extractors/crop_test.py b/model-optimizer/mo/front/mxnet/extractors/crop_test.py
new file mode 100644 (file)
index 0000000..06b839c
--- /dev/null
@@ -0,0 +1,44 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+from mo.front.common.partial_infer.crop import crop_infer
+from mo.front.mxnet.extractors.crop import crop_ext
+from mo.front.mxnet.extractors.utils import AttrDictionary
+
+
+class FakeProtoLayer:
+    def __init__(self, val):
+        self.crop_param = val
+
+
+class TestCropExt(unittest.TestCase):
+    def test_crop_ext(self):
+        params = {
+            'offset': '(5, 5)',
+            'num_args': 2
+        }
+        res = crop_ext(AttrDictionary(params))
+        exp_res = {
+            'axis': 2,
+            'offset': [5, 5],
+            'dim': None,
+            'infer': crop_infer,
+            'type': 'Crop'
+        }
+        for key in exp_res.keys():
+            self.assertEqual(res[key], exp_res[key])
diff --git a/model-optimizer/mo/front/mxnet/extractors/eltwise_test.py b/model-optimizer/mo/front/mxnet/extractors/eltwise_test.py
new file mode 100644 (file)
index 0000000..4d07e57
--- /dev/null
@@ -0,0 +1,44 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+from mo.front.mxnet.extractors.eltwise import eltwise_ext
+from mo.front.mxnet.extractors.utils import AttrDictionary
+
+
+class TestEltwiseParsing(unittest.TestCase):
+    def test_eltwise_sum(self):
+        attrs = {}
+        res = eltwise_ext(AttrDictionary(attrs), infer=lambda a, b: a + b, op_type="sum")
+        exp_attrs = {
+            'type': 'Eltwise',
+            'operation': 'sum'
+        }
+
+        for key in exp_attrs.keys():
+            self.assertEqual(res[key], exp_attrs[key])
+
+    def test_eltwise_mul(self):
+        attrs = {}
+        res = eltwise_ext(AttrDictionary(attrs), infer=lambda a, b: a * b, op_type="mul")
+        exp_attrs = {
+            'type': 'Eltwise',
+            'operation': 'mul'
+        }
+
+        for key in exp_attrs.keys():
+            self.assertEqual(res[key], exp_attrs[key])
diff --git a/model-optimizer/mo/front/mxnet/extractors/leaky_relu_test.py b/model-optimizer/mo/front/mxnet/extractors/leaky_relu_test.py
new file mode 100644 (file)
index 0000000..f3fab2b
--- /dev/null
@@ -0,0 +1,89 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.front.mxnet.extractors.leaky_relu import LeakyReLUFrontExtractor
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph
+
+
+class TestLeakyReLUFrontExtractorOp(unittest.TestCase):
+    def test_extract_leaky_relu_layer(self):
+        graph = build_graph(
+            {'node_1': {'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+             'act_node': {'type': 'LeakyReLU', 'kind': 'op', 'op': 'LeakyReLU', },
+             'node_2': {'type': 'Identity', 'kind': 'op'},
+             },
+            [
+                ('node_1', 'act_node'),
+                ('act_node', 'node_2'),
+            ],
+            {
+                'act_node': {'symbol_dict': {'attrs': {'slope': '0.6'}}},
+            })
+
+        act_node = Node(graph, 'act_node')
+        act_extr_op = LeakyReLUFrontExtractor()
+        supported = act_extr_op.extract(act_node)
+        self.assertTrue(supported)
+        self.assertEqual(act_node['op'], 'ReLU')
+        self.assertEqual(act_node['negative_slope'], 0.6)
+
+    def test_extract_prelu_layer(self):
+        graph = build_graph(
+            {'node_1': {'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+             'node_3': {'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+             'act_node': {'type': 'LeakyReLU', 'kind': 'op', 'op': 'LeakyReLU', },
+             'node_2': {'type': 'Identity', 'kind': 'op'},
+             },
+            [
+                ('node_1', 'act_node'),
+                ('node_3', 'act_node'),
+                ('act_node', 'node_2'),
+            ],
+            {
+                'act_node': {'symbol_dict': {'attrs': {'act_type': 'prelu'}}},
+                'node_3': {'value': np.array([1], dtype=np.float32)},
+            })
+        act_node = Node(graph, 'act_node')
+        act_extr_op = LeakyReLUFrontExtractor()
+        supported = act_extr_op.extract(act_node)
+        self.assertTrue(supported)
+        self.assertEqual(act_node['op'], 'PReLU')
+
+    def test_extract_elu_layer(self):
+        graph = build_graph(
+            {'node_1': {'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+             'act_node': {'type': 'LeakyReLU', 'kind': 'op', 'op': 'LeakyReLU', },
+             'node_2': {'type': 'Placeholder', 'kind': 'op'},
+             },
+            [
+                ('node_1', 'act_node'),
+                ('act_node', 'node_2'),
+            ],
+            {
+                'act_node': {'symbol_dict': {'attrs': {'act_type': 'elu'}}},
+            })
+
+        act_node = Node(graph, 'act_node')
+        act_extr_op = LeakyReLUFrontExtractor()
+        supported = act_extr_op.extract(act_node)
+        self.assertTrue(supported)
+        self.assertEqual(act_node['op'], 'Activation')
+        self.assertEqual(act_node['operation'], 'elu')
diff --git a/model-optimizer/mo/front/mxnet/extractors/multibox_detection_test.py b/model-optimizer/mo/front/mxnet/extractors/multibox_detection_test.py
new file mode 100644 (file)
index 0000000..c6e4c0c
--- /dev/null
@@ -0,0 +1,80 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+from mo.front.mxnet.extractors.multibox_detection import multi_box_detection_ext
+from mo.front.mxnet.extractors.utils import AttrDictionary
+
+
+class TestMultiBoxDetection_Parsing(unittest.TestCase):
+    def test_multi_box_detection_check_attrs(self):
+        attrs = {
+            "force_suppress": "True",
+            "nms_threshold": "0.4",
+            "nms_topk": "400",
+            "variances": "(0.1, 0.1, 0.2, 0.2)"
+        }
+
+        res = multi_box_detection_ext(AttrDictionary(attrs))
+
+        exp_attrs = {
+            'type': 'DetectionOutput',
+            'num_classes': 21,
+            'keep_top_k': 400,
+            'variance_encoded_in_target': 0,
+            'code_type': "caffe.PriorBoxParameter.CENTER_SIZE",
+            'share_location': 1,
+            'confidence_threshold': 0.01,
+            'background_label_id': 0,
+            'nms_threshold': 0.4,
+            'top_k': 400,
+            'decrease_label_id': 1,
+            'clip': 1,
+            'normalized': 1,
+        }
+
+        for key in exp_attrs.keys():
+            self.assertEqual(res[key], exp_attrs[key])
+
+    def test_multi_box_detection_check_attrs_without_top_k(self):
+        attrs = {
+            "force_suppress": "True",
+            "nms_threshold": "0.2",
+            "threshold": "0.02",
+            "variances": "(0.1, 0.1, 0.2, 0.2)"
+        }
+
+        res = multi_box_detection_ext(AttrDictionary(attrs))
+
+        exp_attrs = {
+            'type': 'DetectionOutput',
+            'num_classes': 21,
+            'keep_top_k': -1,
+            'variance_encoded_in_target': 0,
+            'code_type': "caffe.PriorBoxParameter.CENTER_SIZE",
+            'share_location': 1,
+            'confidence_threshold': 0.02,
+            'background_label_id': 0,
+            'nms_threshold': 0.2,
+            'top_k': -1,
+            'decrease_label_id': 1,
+            'clip': 1,
+            'normalized': 1,
+        }
+
+        for key in exp_attrs.keys():
+            self.assertEqual(res[key], exp_attrs[key])
diff --git a/model-optimizer/mo/front/mxnet/extractors/multibox_prior_test.py b/model-optimizer/mo/front/mxnet/extractors/multibox_prior_test.py
new file mode 100644 (file)
index 0000000..cc2cc8f
--- /dev/null
@@ -0,0 +1,56 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.front.mxnet.extractors.multibox_prior import multi_box_prior_ext
+from mo.front.mxnet.extractors.utils import AttrDictionary
+
+
+class TestMultiBoxPrior_Parsing(unittest.TestCase):
+    def test_multi_box_prior_check_attrs(self):
+        attrs = {
+            'ratios': '(1,2,0.5)',
+            'steps': '(0.02666666666666667, 0.02666666666666667)',
+            'clip': 'False',
+            'sizes': '(0.1,0.141)'
+        }
+
+        res = multi_box_prior_ext(AttrDictionary(attrs))
+        exp_attrs = {
+            'type': 'PriorBox',
+            'img_size': 0,
+            'img_h': 0,
+            'img_w': 0,
+            'step': 0.02666666666666667,
+            'step_h': 0,
+            'step_w': 0,
+            'offset': 0.5,
+            'variance': '0.100000,0.100000,0.200000,0.200000',
+            'flip': 0,
+            'clip': 0,
+            'min_size': (0.1, 0.141),
+            'max_size': '',
+            'aspect_ratio': [1, 2, 0.5],
+        }
+
+        for key in exp_attrs.keys():
+            if key in ['aspect_ratio', 'variance']:
+                np.testing.assert_equal(res[key], exp_attrs[key])
+            else:
+                self.assertEqual(res[key], exp_attrs[key])
diff --git a/model-optimizer/mo/front/mxnet/extractors/relu_test.py b/model-optimizer/mo/front/mxnet/extractors/relu_test.py
new file mode 100644 (file)
index 0000000..c045d86
--- /dev/null
@@ -0,0 +1,43 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+from mo.front.mxnet.extractors.relu import ReLUFrontExtractor
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph
+
+
+class TestReluFrontExtractorOp(unittest.TestCase):
+    def test_extract_relu_layer(self):
+        graph = build_graph(
+            {'node_1': {'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+             'relu_node': {'type': 'relu', 'kind': 'op', 'op': 'relu', },
+             'node_2': {'type': 'Placeholder', 'kind': 'op'},
+             },
+            [
+                ('node_1', 'relu_node'),
+                ('relu_node', 'node_2'),
+            ],
+            {
+                'relu_node': {'symbol_dict': {'attrs': {}}},
+            })
+
+        relu_node = Node(graph, 'relu_node')
+        relu_extr_op = ReLUFrontExtractor()
+        supported = relu_extr_op.extract(relu_node)
+        self.assertTrue(supported)
+        self.assertEqual(relu_node['op'], 'ReLU')
diff --git a/model-optimizer/mo/front/mxnet/extractors/sigmoid_test.py b/model-optimizer/mo/front/mxnet/extractors/sigmoid_test.py
new file mode 100644 (file)
index 0000000..fcf5893
--- /dev/null
@@ -0,0 +1,44 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+from mo.front.mxnet.extractors.sigmoid import Sigmoid
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph
+
+
+class TestSigmoidFrontExtractorOp(unittest.TestCase):
+    def test_extract_sigmoid_layer(self):
+        graph = build_graph(
+            {'node_1': {'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+             'sigmoid_node': {'type': 'sigmoid', 'kind': 'op', 'op': 'sigmoid', },
+             'node_3': {'type': 'Identity', 'value': None, 'kind': 'op'},
+             },
+            [
+                ('node_1', 'sigmoid_node'),
+                ('sigmoid_node', 'node_3'),
+            ],
+            {
+                'sigmoid_node': {'symbol_dict': {'attrs': {}}},
+            })
+
+        sigmoid_node = Node(graph, 'sigmoid_node')
+        sigmoid_extr_op = Sigmoid()
+        supported = sigmoid_extr_op.extract(sigmoid_node)
+        self.assertTrue(supported)
+        self.assertEqual(sigmoid_node['op'], 'Activation')
+        self.assertEqual(sigmoid_node['operation'], 'sigmoid')
diff --git a/model-optimizer/mo/front/mxnet/extractors/slice_axis_test.py b/model-optimizer/mo/front/mxnet/extractors/slice_axis_test.py
new file mode 100644 (file)
index 0000000..435044d
--- /dev/null
@@ -0,0 +1,51 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+from mo.front.mxnet.extractors.slice_axis import slice_axis_ext
+from mo.front.mxnet.extractors.utils import AttrDictionary
+from mo.graph.graph import Node
+from mo.utils.unittest.graph import build_graph
+
+
+class TestMXNetSliceAxisExtractorOp(unittest.TestCase):
+    def test_extract_slice_axis_layer(self):
+        graph = build_graph(
+            {'node_1': {'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'Placeholder'},
+             'slice_axis_node': {'type': 'sigmoid', 'kind': 'op', 'op': 'slice_axis', },
+             'node_3': {'type': 'Identity', 'value': None, 'kind': 'op'},
+             },
+            [
+                ('node_1', 'slice_axis_node'),
+                ('slice_axis_node', 'node_3'),
+            ],
+            {
+                'slice_axis_node': {'symbol_dict': {'attrs': {'axis': 0, 'begin': 10, 'end': 25}}},
+            })
+
+        exp_attrs = {
+            'type': 'Crop',
+            'axis': 0,
+            'offset': 10,
+            'dim': 25
+        }
+
+        slice_axis_node = Node(graph, 'slice_axis_node')
+        res = slice_axis_ext(AttrDictionary(slice_axis_node['symbol_dict']['attrs']))
+
+        for key in exp_attrs.keys():
+            self.assertEqual(res[key], exp_attrs[key])
diff --git a/model-optimizer/mo/front/mxnet/extractors/utils_test.py b/model-optimizer/mo/front/mxnet/extractors/utils_test.py
new file mode 100644 (file)
index 0000000..070d532
--- /dev/null
@@ -0,0 +1,202 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import patch
+
+import mxnet as mx
+
+from mo.front.mxnet.extractors.utils import AttrDictionary
+from mo.front.mxnet.extractors.utils import load_params
+
+
+class TestAttrDictionary(unittest.TestCase):
+    def testBool(self):
+        attrs = {
+            "global_pool": "True"
+        }
+
+        attr_dict = AttrDictionary(attrs)
+        global_pool = attr_dict.bool("global_pool", False)
+        self.assertEqual(True, global_pool)
+
+    def testBoolAsDigits(self):
+        attrs = {
+            "global_pool": "1"
+        }
+
+        attr_dict = AttrDictionary(attrs)
+        global_pool = attr_dict.bool("global_pool", False)
+        self.assertEqual(True, global_pool)
+
+    def testBoolWithoutAttr(self):
+        attrs = {
+            "something": "1"
+        }
+
+        attr_dict = AttrDictionary(attrs)
+        global_pool = attr_dict.bool("global_pool", False)
+        self.assertEqual(False, global_pool)
+
+    def testStrAttr(self):
+        attrs = {
+            "something": "Val"
+        }
+
+        attr_dict = AttrDictionary(attrs)
+        attr = attr_dict.str("something", "Text")
+        self.assertEqual("Val", attr)
+
+    def testStrAttrWithoutAttr(self):
+        attrs = {
+            "something2": "Val"
+        }
+
+        attr_dict = AttrDictionary(attrs)
+        attr = attr_dict.str("something", "Text")
+        self.assertEqual("Text", attr)
+
+    def testFloatAttr(self):
+        attrs = {
+            "something": "0.5"
+        }
+
+        attr_dict = AttrDictionary(attrs)
+        attr = attr_dict.float("something", 0.1)
+        self.assertEqual(0.5, attr)
+
+    def testFloatWithoutAttr(self):
+        attrs = {
+            "something2": "0.5"
+        }
+
+        attr_dict = AttrDictionary(attrs)
+        attr = attr_dict.float("something", 0.1)
+        self.assertEqual(0.1, attr)
+
+    def testIntAttr(self):
+        attrs = {
+            "something": "5"
+        }
+
+        attr_dict = AttrDictionary(attrs)
+        attr = attr_dict.float("something", 1)
+        self.assertEqual(5, attr)
+
+    def testIntWithoutAttr(self):
+        attrs = {
+            "something2": "5"
+        }
+
+        attr_dict = AttrDictionary(attrs)
+        attr = attr_dict.float("something", 1)
+        self.assertEqual(1, attr)
+
+    def testTupleAttr(self):
+        attrs = {
+            "something": "(5,6,7)"
+        }
+
+        attr_dict = AttrDictionary(attrs)
+        a, b, c = attr_dict.tuple("something", int, (1, 2, 3))
+        self.assertEqual(5, a)
+        self.assertEqual(6, b)
+        self.assertEqual(7, c)
+
+    def testTupleWithoutAttr(self):
+        attrs = {
+            "something2": "(5,6,7)"
+        }
+
+        attr_dict = AttrDictionary(attrs)
+        a, b, c = attr_dict.tuple("something", int, (1, 2, 3))
+        self.assertEqual(1, a)
+        self.assertEqual(2, b)
+        self.assertEqual(3, c)
+
+    def testTupleWithEmptyTupleAttr(self):
+        attrs = {
+            "something2": "()"
+        }
+
+        attr_dict = AttrDictionary(attrs)
+        a, b = attr_dict.tuple("something", int, (2, 3))
+        self.assertEqual(2, a)
+        self.assertEqual(3, b)
+
+    def testTupleWithEmptyListAttr(self):
+        attrs = {
+            "something2": "[]"
+        }
+
+        attr_dict = AttrDictionary(attrs)
+        a, b = attr_dict.tuple("something", int, (2, 3))
+        self.assertEqual(2, a)
+        self.assertEqual(3, b)
+
+    def testListAttr(self):
+        attrs = {
+            "something": "5,6,7"
+        }
+
+        attr_dict = AttrDictionary(attrs)
+        l = attr_dict.list("something", int, [1, 2, 3])
+        self.assertEqual(5, l[0])
+        self.assertEqual(6, l[1])
+        self.assertEqual(7, l[2])
+
+    def testListWithoutAttr(self):
+        attrs = {
+            "something2": "5,6,7"
+        }
+
+        attr_dict = AttrDictionary(attrs)
+        l = attr_dict.list("something", int, [1, 2, 3])
+        self.assertEqual(1, l[0])
+        self.assertEqual(2, l[1])
+        self.assertEqual(3, l[2])
+
+
+class TestUtils(unittest.TestCase):
+    @patch('mxnet.nd.load')
+    def test_load_symbol_nodes_from_params(self, mock_nd_load):
+        mock_nd_load.return_value = {'arg:conv0_weight': mx.nd.array([1, 2], dtype='float32'),
+                                     'arg:conv1_weight': mx.nd.array([2, 3], dtype='float32'),
+                                     'aux:bn_data_mean': mx.nd.array([5, 6], dtype='float32')}
+        model_params = load_params("model.params")
+        self.assertTrue('conv0_weight' in model_params._param_names)
+        self.assertTrue('conv1_weight' in model_params._param_names)
+        self.assertTrue('bn_data_mean' in model_params._aux_names)
+        self.assertEqual([1., 2.], model_params._arg_params['conv0_weight'].asnumpy().tolist())
+        self.assertEqual([2., 3.], model_params._arg_params['conv1_weight'].asnumpy().tolist())
+        self.assertEqual([5., 6.], model_params._aux_params['bn_data_mean'].asnumpy().tolist())
+
+    @patch('mxnet.nd.load')
+    def test_load_symbol_nodes_from_args_nd(self, mock_nd_load):
+        mock_nd_load.return_value = {'conv0_weight': mx.nd.array([1, 2], dtype='float32'),
+                                     'conv1_weight': mx.nd.array([2, 3], dtype='float32')}
+        model_params = load_params("args_model.nd", data_names=('data1', 'data2'))
+        self.assertTrue('conv0_weight' in model_params._param_names)
+        self.assertTrue('conv1_weight' in model_params._param_names)
+        self.assertEqual([1., 2.], model_params._arg_params['conv0_weight'].asnumpy().tolist())
+        self.assertEqual([2., 3.], model_params._arg_params['conv1_weight'].asnumpy().tolist())
+
+    @patch('mxnet.nd.load')
+    def test_load_symbol_nodes_from_auxs_nd(self, mock_nd_load):
+        mock_nd_load.return_value = {'bn_data_mean': mx.nd.array([5, 6], dtype='float32')}
+        model_params = load_params("auxs_model.nd")
+        self.assertTrue('bn_data_mean' in model_params._aux_names)
+        self.assertEqual([5., 6.], model_params._aux_params['bn_data_mean'].asnumpy().tolist())
diff --git a/model-optimizer/mo/front/mxnet/loader_test.py b/model-optimizer/mo/front/mxnet/loader_test.py
new file mode 100644 (file)
index 0000000..2c77d7e
--- /dev/null
@@ -0,0 +1,62 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import patch
+
+from mo.front.mxnet.loader import load_symbol_nodes, parse_input_model
+
+
+class MockSymbolLoadObj():
+    def tojson(self):
+        pass
+
+
+class TestLoader(unittest.TestCase):
+    @patch('json.load')
+    @patch('json.loads')
+    @patch('os.path.isfile')
+    @patch('mxnet.symbol.load')
+    def test_load_symbol_nodes(self, mock_symbol_load, mock_isfile, mock_json_loads, mock_json_load):
+        mock_isfile.return_value = True
+        mock_json_load.return_value = {'nodes': ''}
+        mock_json_loads.return_value = {'nodes': {'node1': 1}}
+        mock_symbol_load_obj = MockSymbolLoadObj()
+        mock_symbol_load.return_value = mock_symbol_load_obj
+        with patch('mo.front.mxnet.loader.open') as mock_open:
+            self.assertEqual({'node1': 1}, load_symbol_nodes("model_name", True))
+
+    @patch('json.load')
+    @patch('json.loads')
+    @patch('os.path.isfile')
+    @patch('mxnet.symbol.load')
+    def test_load_symbol_with_custom_nodes(self, mock_symbol_load, mock_isfile, mock_json_loads, mock_json_load):
+        mock_isfile.return_value = True
+        mock_json_load.return_value = {'nodes': [{'op': 'custom_op'}, {'op': 'custom_op'}]}
+        mock_json_loads.return_value = {'nodes': {'node1': 1}}
+        mock_symbol_load_obj = MockSymbolLoadObj()
+        mock_symbol_load.return_value = mock_symbol_load_obj
+        with patch('mo.front.mxnet.loader.open') as mock_open:
+            list_nodes = load_symbol_nodes("model_name", False)
+            self.assertEqual(2, len(list_nodes))
+            for node in list_nodes:
+                self.assertEqual({'op': 'custom_op'}, node)
+
+    def test_parse_input_model(self):
+        input_model = '/model-optimizer-mxnet/data/nd/vgg19-0015.params'
+        model_name, iteration_number = parse_input_model(input_model)
+        self.assertEqual(model_name, '/model-optimizer-mxnet/data/nd/vgg19')
+        self.assertEqual(iteration_number, 15)
diff --git a/model-optimizer/mo/front/onnx/extractors/constant_test.py b/model-optimizer/mo/front/onnx/extractors/constant_test.py
new file mode 100644 (file)
index 0000000..8204966
--- /dev/null
@@ -0,0 +1,60 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import logging as log
+import unittest
+
+import numpy as np
+import onnx
+from generator import generator, generate
+from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE
+
+from mo.front.onnx.extractors.constant import onnx_constant_ext
+from mo.utils.unittest.extractors import PB
+
+dtypes = [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.float32, np.double, np.bool]
+
+
+@generator
+class ConstantONNXExtractorTest(unittest.TestCase):
+    @staticmethod
+    def _create_constant_node(numpy_dtype):
+        numpy_dtype = np.dtype(numpy_dtype)
+        if numpy_dtype not in NP_TYPE_TO_TENSOR_TYPE:
+            log.error("Numpy type {} not supported in ONNX".format(numpy_dtype))
+            return None
+
+        values = np.array(np.random.randn(5, 5).astype(numpy_dtype))
+        pb = onnx.helper.make_node(
+            'Constant',
+            inputs=[],
+            outputs=['values'],
+            value=onnx.helper.make_tensor(
+                name='const_tensor',
+                data_type=NP_TYPE_TO_TENSOR_TYPE[numpy_dtype],
+                dims=values.shape,
+                vals=values.flatten().astype(numpy_dtype),
+            ),
+        )
+        node = PB({'pb': pb})
+        return node
+
+    @generate(*dtypes)
+    def test_constant_ext(self, np_dtype):
+        node = self._create_constant_node(np_dtype)
+        attrs = onnx_constant_ext(node)
+        self.assertTrue(attrs['data_type'] == np_dtype,
+                        'Wrong data_type attribute: recieved {}, expected {}'.format(attrs['data_type'], np_dtype))
diff --git a/model-optimizer/mo/front/tf/extractors/concat_test.py b/model-optimizer/mo/front/tf/extractors/concat_test.py
new file mode 100644 (file)
index 0000000..054da61
--- /dev/null
@@ -0,0 +1,38 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+from mo.front.tf.extractors.concat import tf_concat_ext
+from mo.utils.unittest.extractors import PB, BaseExtractorsTestingClass
+
+
+class ConcatExtractorTest(BaseExtractorsTestingClass):
+    @classmethod
+    def setUpClass(cls):
+        cls.patcher = 'mo.front.tf.extractors.concat.concat_infer'
+
+    def test_concat(self):
+        pb = PB({'attr': {
+            'N': PB({'i': 3}),
+        }})
+        self.expected = {
+            'type': 'Concat',
+            'N': 3,
+        }
+        self.res = tf_concat_ext(pb=pb)
+        self.res["infer"](None)
+        self.call_args = self.infer_mock.call_args
+        self.expected_call_args = (None)
+        self.compare()
diff --git a/model-optimizer/mo/front/tf/extractors/const_test.py b/model-optimizer/mo/front/tf/extractors/const_test.py
new file mode 100644 (file)
index 0000000..5caafa4
--- /dev/null
@@ -0,0 +1,177 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import numpy as np
+import tensorflow as tf
+from generator import generator, generate
+
+from mo.front.tf.extractors.const import tf_const_ext
+from mo.utils.unittest.extractors import PB, BaseExtractorsTestingClass
+
+dtypes = {"ints": [(tf.int8, np.int8),
+                   (tf.int16, np.int16),
+                   (tf.int32, np.int32),
+                   (tf.int64, np.int64)],
+
+          "uints": [(tf.uint8, np.uint8),
+                    (tf.uint16, np.uint16),
+                    ],
+          "floats": [(tf.float32, np.float32),
+                     (tf.float64, np.double)],
+
+          "bools": [(tf.bool, np.bool)],
+
+          "strings": [(tf.string, np.str)]}
+if tf.__version__ > "1.4.0":
+    dtypes['uints'].extend([(tf.uint32, np.uint32), (tf.uint64, np.uint64)])
+
+
+@generator
+class ConstExtractorTest(BaseExtractorsTestingClass):
+    @classmethod
+    def setUpClass(cls):
+        cls.patcher = 'mo.front.tf.extractors.const.tf_const_infer'
+
+    @generate(*dtypes['ints'])
+    def test_const_ints(self, tf_dtype, np_dtype):
+        shape = [1, 1, 200, 50]
+        values = np.random.randint(low=np.iinfo(np_dtype).min, high=np.iinfo(np_dtype).max, size=shape, dtype=np_dtype)
+        tensor_proto = tf.make_tensor_proto(values=values, dtype=tf_dtype, shape=shape)
+        pb = PB({"attr": PB({
+            "value": PB({
+                "tensor": PB({
+                    "dtype": tensor_proto.dtype,
+                    "tensor_shape": tensor_proto.tensor_shape,
+                    "tensor_content": tensor_proto.tensor_content
+                })
+            })
+        })})
+        self.expected = {
+            'data_type': np_dtype,
+            'shape': np.asarray(shape, dtype=np.int),
+            'value': values
+        }
+        self.res = tf_const_ext(pb=pb)
+        self.res["infer"](None)
+        self.call_args = self.infer_mock.call_args
+        self.expected_call_args = None
+        self.compare()
+
+    @generate(*dtypes['uints'])
+    def test_const_uints(self, tf_dtype, np_dtype):
+        shape = [1, 1, 200, 50]
+        values = np.random.randint(low=np.iinfo(np_dtype).min, high=np.iinfo(np_dtype).max, size=shape, dtype=np_dtype)
+        tensor_proto = tf.make_tensor_proto(values=values, dtype=tf_dtype, shape=shape)
+        pb = PB({"attr": PB({
+            "value": PB({
+                "tensor": PB({
+                    "dtype": tensor_proto.dtype,
+                    "tensor_shape": tensor_proto.tensor_shape,
+                })
+            })
+        })})
+        if tf_dtype == tf.uint16:
+            setattr(pb.attr.value.tensor, "int_val", values.tolist())
+        else:
+            setattr(pb.attr.value.tensor, "tensor_content", tensor_proto.tensor_content)
+        self.expected = {
+            'data_type': np_dtype,
+            'shape': np.asarray(shape, dtype=np.int),
+            'value': values
+        }
+        self.res = tf_const_ext(pb=pb)
+        self.res["infer"](None)
+        self.call_args = self.infer_mock.call_args
+        self.expected_call_args = None
+        self.compare()
+
+    @generate(*dtypes['floats'])
+    def test_const_floats(self, tf_dtype, np_dtype):
+        shape = [1, 1, 200, 50]
+        values = np.random.uniform(low=np.finfo(np.float32).min, high=np.finfo(np.float32).max, size=shape).astype(
+            np_dtype)
+        tensor_proto = tf.make_tensor_proto(values=values, dtype=tf_dtype, shape=shape)
+        pb = PB({"attr": PB({
+            "value": PB({
+                "tensor": PB({
+                    "dtype": tensor_proto.dtype,
+                    "tensor_shape": tensor_proto.tensor_shape,
+                    "tensor_content": tensor_proto.tensor_content
+                })
+            })
+        })})
+        self.expected = {
+            'data_type': np_dtype,
+            'shape': np.asarray(shape, dtype=np.int),
+            'value': values
+        }
+        self.res = tf_const_ext(pb=pb)
+        self.res["infer"](None)
+        self.call_args = self.infer_mock.call_args
+        self.expected_call_args = None
+        self.compare()
+
+    # TODO: Check how to correctly handle tensor_proto with booleans. It has no tensor_content section
+    @generate(*dtypes['bools'])
+    def test_const_floats(self, tf_dtype, np_dtype):
+        shape = [1, 1, 50, 50]
+        values = np.random.choice(a=[True, False], size=shape, p=[0.5, 0.5])
+        tensor_proto = tf.make_tensor_proto(values=values, dtype=tf_dtype, shape=shape)
+        pb = PB({"attr": PB({
+            "value": PB({
+                "tensor": PB({
+                    "dtype": tensor_proto.dtype,
+                    "tensor_shape": tensor_proto.tensor_shape,
+                    "bool_val": values.tolist()
+                })
+            })
+        })})
+        self.expected = {
+            'data_type': np_dtype,
+            'shape': np.asarray(shape, dtype=np.int),
+            'value': values
+        }
+        self.res = tf_const_ext(pb=pb)
+        self.res["infer"](None)
+        self.call_args = self.infer_mock.call_args
+        self.expected_call_args = None
+        self.compare()
+        # TODO: Check how to correctly create tensor_proto with strings
+        # @generate(*dtypes['strings'])
+        # def test_const_floats(self, tf_dtype, np_dtype):
+        #     shape = [1, 1, 50, 50]
+        #     values = np.chararray(shape=shape)
+        #     values[:] = "bla"
+        #     tensor_proto = tf.make_tensor_proto(values=values, dtype=tf_dtype, shape=shape)
+        #     pb = PB({"attr": PB({
+        #         "value": PB({
+        #             "tensor": PB({
+        #                 "dtype": tensor_proto.dtype,
+        #                 "tensor_shape": tensor_proto.tensor_shape,
+        #                 "tensor_content": tensor_proto.tensor_content
+        #             })
+        #         })
+        #     })})
+        #     self.expected = {
+        #         'data_type': np_dtype,
+        #         'shape': np.asarray(shape, dtype=np.int),
+        #         'value': values
+        #     }
+        #     self.res = tf_const_ext(pb=pb)
+        #     self.res["infer"](None)
+        #     self.call_args = self.infer_mock.call_args
+        #     self.expected_call_args = None
+        #     self.compare()
diff --git a/model-optimizer/mo/front/tf/extractors/eltwise_test.py b/model-optimizer/mo/front/tf/extractors/eltwise_test.py
new file mode 100644 (file)
index 0000000..0a0f1e3
--- /dev/null
@@ -0,0 +1,357 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+import numpy as np
+import tensorflow as tf
+from generator import generator, generate
+
+from mo.front.tf.extractor import tf_op_extractors
+from mo.utils.unittest.extractors import PB, BaseExtractorsTestingClass
+
+dtypes_map = [(1, np.float32), (2, np.float64), (3, np.int32), (4, np.uint8), (5, np.int16), (6, np.int8),
+              (7, str), (9, np.int64), (10, bool), (17, np.uint16)]
+
+if tf.__version__ > "1.4.0":
+    dtypes_map.extend([(22, np.uint32), (23, np.uint64)])
+
+
+@generator
+class EltwiseExtractorTest(BaseExtractorsTestingClass):
+    def check_lambda_res(self, actual, expected, expected_type):
+        self.assertEqual(expected_type, type(actual), "Eltwise lambda function results has wrong data type!")
+        if isinstance(actual, np.ndarray) and isinstance(expected, np.ndarray):
+            np.testing.assert_equal(actual, expected)
+        else:
+            self.assertEqual(expected, actual, "Eltwise lambda function results validation failed!")
+
+    @classmethod
+    def setUpClass(cls):
+        cls.patcher = 'mo.front.tf.extractors.eltwise.eltwise_infer'
+
+    @generate(*dtypes_map)
+    def test_eltwise_dtypes_map(self, dtype, np_type):
+        node_pb = PB({
+            'pb': PB({
+                'attr': PB({
+                    'T': PB({
+                        "type": dtype
+                    })
+                })
+            })
+        })
+        self.expected = {
+            'can_be_bias': True,
+            'data_type': np_type,
+            'operation': 'sum',
+            'type': 'Eltwise'
+        }
+        self.res = tf_op_extractors['Add'](node_pb)
+        self.res["infer"](None)
+        self.call_args = self.infer_mock.call_args
+        self.expected_call_args = None
+        self.compare()
+
+    @generate(*[((1, 2), 3, int), ((1., 2.), 3., float), (('a', 'b'), 'ab', str),
+                ((np.full(shape=(10, 10), fill_value=4), np.full(shape=(10, 10), fill_value=2)),
+                 np.full(shape=(10, 10), fill_value=6), np.ndarray)
+                ])
+    def test_eltwise_add(self, lambda_args, expected_res, expected_type):
+        node_pb = PB({
+            'pb': PB({
+                'attr': PB({
+                    'T': PB({
+                        "type": 1
+                    })
+                })
+            })
+        })
+        self.expected = {
+            'can_be_bias': True,
+            'data_type': np.float32,
+            'operation': 'sum',
+            'type': 'Eltwise'
+        }
+        self.res = tf_op_extractors['Add'](node_pb)
+        self.res["infer"](None)
+        self.call_args = self.infer_mock.call_args
+        eltwise_lambda = self.call_args[0][1]
+        lambda_res = eltwise_lambda(*lambda_args)
+        self.check_lambda_res(actual=lambda_res, expected=expected_res, expected_type=expected_type)
+        self.expected_call_args = None
+        self.compare()
+
+    @generate(*[((1, 2), 2, int), ((1., 2.), 2., float),
+                ((np.full(shape=(10, 10), fill_value=4), np.full(shape=(10, 10), fill_value=2)),
+                 np.full(shape=(10, 10), fill_value=8), np.ndarray)])
+    def test_eltwise_mul(self, lambda_args, expected_res, expected_type):
+        node_pb = PB({
+            'pb': PB({
+                'attr': PB({
+                    'T': PB({
+                        "type": 1
+                    })
+                })
+            })
+        })
+        self.expected = {
+            'data_type': np.float32,
+            'operation': 'mul',
+            'type': 'Eltwise'
+        }
+        self.res = tf_op_extractors['Mul'](node_pb)
+        self.res["infer"](None)
+        self.call_args = self.infer_mock.call_args
+        eltwise_lambda = self.call_args[0][1]
+        lambda_res = eltwise_lambda(*lambda_args)
+        self.check_lambda_res(actual=lambda_res, expected=expected_res, expected_type=expected_type)
+        self.expected_call_args = None
+        self.compare()
+
+    @generate(*[(4, 0.5, np.float64),
+                (np.full(shape=(10, 10), fill_value=4), np.full(shape=(10, 10), fill_value=0.5), np.ndarray)])
+    def test_eltwise_rsqrt(self, lambda_args, expected_res, expected_type):
+        node_pb = PB({
+            'pb': PB({
+                'attr': PB({
+                    'T': PB({
+                        "type": 1
+                    })
+                })
+            })
+        })
+        self.expected = {
+            'data_type': np.float32,
+            'type': 'Power',
+            'power': -0.5,
+            'scale': 1,
+            'shift': 0
+        }
+        self.res = tf_op_extractors['Rsqrt'](node_pb)
+        self.res["infer"](None)
+        self.call_args = self.infer_mock.call_args
+        eltwise_lambda = self.call_args[0][1]
+        lambda_res = eltwise_lambda(lambda_args)
+        self.check_lambda_res(actual=lambda_res, expected=expected_res, expected_type=expected_type)
+        self.expected_call_args = None
+        self.compare()
+
+    @generate(*[(1, -1, int), (1., -1., float),
+                (np.full(shape=(10, 10), fill_value=4), np.full(shape=(10, 10), fill_value=-4), np.ndarray)])
+    def test_eltwise_neg(self, lambda_args, expected_res, expected_type):
+        node_pb = PB({
+            'pb': PB({
+                'attr': PB({
+                    'T': PB({
+                        "type": 1
+                    })
+                })
+            })
+        })
+        self.expected = {
+            'data_type': np.float32,
+            'type': 'Power',
+            'power': 1,
+            'scale': -1,
+            'shift': 0
+        }
+        self.res = tf_op_extractors['Neg'](node_pb)
+        self.res["infer"](None)
+        self.call_args = self.infer_mock.call_args
+        eltwise_lambda = self.call_args[0][1]
+        lambda_res = eltwise_lambda(lambda_args)
+        self.check_lambda_res(actual=lambda_res, expected=expected_res, expected_type=expected_type)
+        self.expected_call_args = None
+        self.compare()
+
+    @generate(*[((1, 2), -1, int), ((1., 2.), -1., float),
+                ((np.full(shape=(10, 10), fill_value=4), np.full(shape=(10, 10), fill_value=2)),
+                 np.full(shape=(10, 10), fill_value=2), np.ndarray)
+                ])
+    def test_eltwise_sub(self, lambda_args, expected_res, expected_type):
+        node_pb = PB({
+            'pb': PB({
+                'attr': PB({
+                    'T': PB({
+                        "type": 1
+                    })
+                })
+            })
+        })
+        self.expected = {
+            'data_type': np.float32,
+        }
+        self.res = tf_op_extractors['Sub'](node_pb)
+        self.res["infer"](None)
+        self.call_args = self.infer_mock.call_args
+        eltwise_lambda = self.call_args[0][1]
+        lambda_res = eltwise_lambda(*lambda_args)
+        self.check_lambda_res(actual=lambda_res, expected=expected_res, expected_type=expected_type)
+        self.expected_call_args = None
+        self.compare()
+
+    @generate(*[(4, 4, np.int64), (4., 4., np.float64),
+                (-1, 0, np.int64), (-1., 0, np.float64),
+                (np.full(shape=(3, 3), fill_value=-1), np.zeros(shape=(3, 3)), np.ndarray),
+                (np.full(shape=(3, 3), fill_value=4), np.full(shape=(3, 3), fill_value=4), np.ndarray)])
+    def test_eltwise_relu(self, lambda_args, expected_res, expected_type):
+        node_pb = PB({
+            'pb': PB({
+                'attr': PB({
+                    'T': PB({
+                        "type": 1
+                    })
+                })
+            })
+        })
+        self.expected = {
+            'data_type': np.float32,
+            "type": "ReLU"
+        }
+        self.res = tf_op_extractors['Relu'](node_pb)
+        self.res["infer"](None)
+        self.call_args = self.infer_mock.call_args
+        eltwise_lambda = self.call_args[0][1]
+        lambda_res = eltwise_lambda(lambda_args)
+        self.check_lambda_res(actual=lambda_res, expected=expected_res, expected_type=expected_type)
+        self.expected_call_args = None
+        self.compare()
+
+    @generate(*[(4, 4, np.int64), (4., 4., np.float64),
+                (-1, 0, np.int64), (-1., 0, np.float64),
+                (10, 6, np.int64), (10., 6, np.float64),
+                (np.full(shape=(3, 3), fill_value=-1), np.zeros(shape=(3, 3)), np.ndarray),
+                (np.full(shape=(3, 3), fill_value=10), np.full(shape=(3, 3), fill_value=6), np.ndarray),
+                (np.full(shape=(3, 3), fill_value=4), np.full(shape=(3, 3), fill_value=4), np.ndarray)])
+    def test_eltwise_relu6(self, lambda_args, expected_res, expected_type):
+        node_pb = PB({
+            'pb': PB({
+                'attr': PB({
+                    'T': PB({
+                        "type": 1
+                    })
+                })
+            })
+        })
+        self.expected = {
+            'data_type': np.float32,
+            'type': 'Clamp',
+            'min': 0,
+            'max': 6
+        }
+        self.res = tf_op_extractors['Relu6'](node_pb)
+        self.res["infer"](None)
+        self.call_args = self.infer_mock.call_args
+        eltwise_lambda = self.call_args[0][1]
+        lambda_res = eltwise_lambda(lambda_args)
+        self.check_lambda_res(actual=lambda_res, expected=expected_res, expected_type=expected_type)
+        self.expected_call_args = None
+        self.compare()
+
+    @generate(*[(4, 16, int), (4., 16., float),
+                (-1, 1, int), (-1., 1., float),
+                (np.full(shape=(3, 3), fill_value=-1), np.full(shape=(3, 3), fill_value=1), np.ndarray),
+                (np.full(shape=(3, 3), fill_value=4), np.full(shape=(3, 3), fill_value=16), np.ndarray)])
+    def test_eltwise_square(self, lambda_args, expected_res, expected_type):
+        node_pb = PB({
+            'pb': PB({
+                'attr': PB({
+                    'T': PB({
+                        "type": 1
+                    })
+                })
+            })
+        })
+        self.expected = {
+            'data_type': np.float32,
+        }
+        self.res = tf_op_extractors['Square'](node_pb)
+        self.res["infer"](None)
+        self.call_args = self.infer_mock.call_args
+        eltwise_lambda = self.call_args[0][1]
+        lambda_res = eltwise_lambda(lambda_args)
+        self.check_lambda_res(actual=lambda_res, expected=expected_res, expected_type=expected_type)
+        self.expected_call_args = None
+        self.compare()
+
+    @generate(*[
+        ((4, 16), 16, np.int64),
+        ((4., 16.), 16., np.float64),
+        ((-1, 1), 1, np.int64),
+        ((-1., 1), 1., np.float64),
+        (
+                (
+                        np.full(shape=(3, 3), fill_value=-1),
+                        np.full(shape=(3, 3), fill_value=-2)
+                ),
+                np.full(shape=(3, 3), fill_value=-1),
+                np.ndarray
+        ),
+        (
+                (
+                        np.full(shape=(3, 3), fill_value=4),
+                        np.full(shape=(3, 3), fill_value=0)
+                ),
+                np.full(shape=(3, 3), fill_value=4),
+                np.ndarray)
+    ])
+    def test_eltwise_maximum(self, lambda_args, expected_res, expected_type):
+        node_pb = PB({
+            'pb': PB({
+                'attr': PB({
+                    'T': PB({
+                        "type": 1
+                    })
+                })
+            })
+        })
+        self.expected = {
+            'data_type': np.float32,
+        }
+        self.res = tf_op_extractors['Maximum'](node_pb)
+        self.res["infer"](None)
+        self.call_args = self.infer_mock.call_args
+        eltwise_lambda = self.call_args[0][1]
+        lambda_res = eltwise_lambda(*lambda_args)
+        self.check_lambda_res(actual=lambda_res, expected=expected_res, expected_type=expected_type)
+        self.expected_call_args = None
+        self.compare()
+
+    @generate(*[((16, 4), 4, float), ((4., 16.), 0.25, float),
+                ((-1, 1), -1, float), ((-16., 1), -16., float),
+                ((np.full(shape=(3, 3), fill_value=-1), np.full(shape=(3, 3), fill_value=-2)),
+                 np.full(shape=(3, 3), fill_value=0.5), np.ndarray),
+                ((np.full(shape=(3, 3), fill_value=4), np.full(shape=(3, 3), fill_value=0)),
+                 np.full(shape=(3, 3), fill_value=np.inf), np.ndarray)])
+    def test_eltwise_realdiv(self, lambda_args, expected_res, expected_type):
+        node_pb = PB({
+            'pb': PB({
+                'attr': PB({
+                    'T': PB({
+                        "type": 1
+                    })
+                })
+            })
+        })
+        self.expected = {
+            'data_type': np.float32,
+        }
+        self.res = tf_op_extractors['RealDiv'](node_pb)
+        self.res["infer"](None)
+        self.call_args = self.infer_mock.call_args
+        eltwise_lambda = self.call_args[0][1]
+        lambda_res = eltwise_lambda(*lambda_args)
+        self.check_lambda_res(actual=lambda_res, expected=expected_res, expected_type=expected_type)
+        self.expected_call_args = None
+        self.compare()
diff --git a/model-optimizer/mo/front/tf/extractors/expand_dims_test.py b/model-optimizer/mo/front/tf/extractors/expand_dims_test.py
new file mode 100644 (file)
index 0000000..dd1f1d8
--- /dev/null
@@ -0,0 +1,32 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+from mo.front.tf.extractors.expand_dims import tf_expand_dims_ext
+from mo.utils.unittest.extractors import BaseExtractorsTestingClass
+
+
+class ExpandDimsExtractorTest(BaseExtractorsTestingClass):
+    @classmethod
+    def setUpClass(cls):
+        cls.patcher = 'mo.front.tf.extractors.expand_dims.tf_expand_dims_infer'
+
+    def test_expand_dims(self):
+        self.expected = {}
+        self.res = tf_expand_dims_ext(pb=None)
+        self.res["infer"](None)
+        self.call_args = self.infer_mock.call_args
+        self.expected_call_args = None
+        self.compare()
diff --git a/model-optimizer/mo/front/tf/extractors/identity_test.py b/model-optimizer/mo/front/tf/extractors/identity_test.py
new file mode 100644 (file)
index 0000000..1a6a84f
--- /dev/null
@@ -0,0 +1,32 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+from mo.front.tf.extractors.identity import tf_identity_ext
+from mo.utils.unittest.extractors import BaseExtractorsTestingClass
+
+
+class IdentityExtractorTest(BaseExtractorsTestingClass):
+    @classmethod
+    def setUpClass(cls):
+        cls.patcher = 'mo.front.tf.extractors.identity.copy_shape_infer'
+
+    def test_identity(self):
+        self.expected = {}
+        self.res = tf_identity_ext(pb=None)
+        self.res["infer"](None)
+        self.call_args = self.infer_mock.call_args
+        self.expected_call_args = None
+        self.compare()
diff --git a/model-optimizer/mo/front/tf/extractors/lrn_test.py b/model-optimizer/mo/front/tf/extractors/lrn_test.py
new file mode 100644 (file)
index 0000000..b4855b6
--- /dev/null
@@ -0,0 +1,53 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+from mo.front.common.partial_infer.elemental import copy_shape_infer
+from mo.front.tf.extractors.lrn import tf_lrn_ext
+from mo.utils.unittest.extractors import PB
+
+
+class LRNExtractorTest(unittest.TestCase):
+    """
+    Unit Test:
+        1. test_bias_check - check if bias is not 1
+        2. test_simple_check - check IE parameters calculations
+        
+    """
+
+    def test_simple_check(self):
+        # Input parameters for LRN extactor
+        # taken from ICV AlexNet LRN layer
+        pb = PB({'attr': {
+            'alpha': PB({'f': 0.000019999999494757503}),
+            'beta': PB({'f': 0.75}),
+            'bias': PB({'f':2.0}),
+            'depth_radius': PB({'i': 2}),
+        }})
+        res = tf_lrn_ext(pb)
+        # Reference results for given parameters
+        ref = {
+            'type': 'Norm',
+            'alpha': 9.999999747378752e-05,
+            'beta': 0.75,
+            'bias': 2.0,
+            'local_size': 5,
+            'region': 'across',
+            'infer': copy_shape_infer,
+        }
+        for attr in ref:
+            self.assertEqual(res[attr], ref[attr])
diff --git a/model-optimizer/mo/front/tf/extractors/matmul_test.py b/model-optimizer/mo/front/tf/extractors/matmul_test.py
new file mode 100644 (file)
index 0000000..e7bd524
--- /dev/null
@@ -0,0 +1,43 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+from mo.front.tf.extractors.matmul import tf_matmul_ext
+from mo.utils.unittest.extractors import PB, BaseExtractorsTestingClass
+
+
+class MatmulExtractorTest(BaseExtractorsTestingClass):
+    @classmethod
+    def setUpClass(cls):
+        cls.patcher = 'mo.front.tf.extractors.matmul.tf_matmul_infer'
+
+    def test_matmul(self):
+        pb = PB({'attr': {
+            'transpose_a': PB({
+                'b': True
+            }),
+            'transpose_b': PB({
+                'b': False
+            }),
+        }})
+        self.expected = {
+            'transpose_a': True,
+            'transpose_b': False,
+        }
+        self.res = tf_matmul_ext(pb=pb)
+        self.res["infer"](None)
+        self.call_args = self.infer_mock.call_args
+        self.expected_call_args = None
+        self.compare()
diff --git a/model-optimizer/mo/front/tf/extractors/mean_test.py b/model-optimizer/mo/front/tf/extractors/mean_test.py
new file mode 100644 (file)
index 0000000..7430bae
--- /dev/null
@@ -0,0 +1,42 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import numpy as np
+
+from mo.front.tf.extractors.mean import tf_mean_ext
+from mo.utils.unittest.extractors import PB, BaseExtractorsTestingClass
+
+
+class MatmulExtractorTest(BaseExtractorsTestingClass):
+    @classmethod
+    def setUpClass(cls):
+        cls.patcher = 'mo.front.tf.extractors.mean.tf_reduce_infer'
+
+    def test_matmul(self):
+        pb = PB({'attr': {
+            'keep_dims': PB({
+                'b': True
+            }),
+        }})
+        self.expected = {
+            'type': "Pooling",
+            'keep_dims': True,
+        }
+        self.res = tf_mean_ext(pb=pb)
+        self.res["infer"](None)
+        self.call_args = self.infer_mock.call_args
+        self.expected_call_args = (None, np.add.reduce)
+        self.compare()
diff --git a/model-optimizer/mo/front/tf/extractors/prod_test.py b/model-optimizer/mo/front/tf/extractors/prod_test.py
new file mode 100644 (file)
index 0000000..53b974d
--- /dev/null
@@ -0,0 +1,41 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import numpy as np
+
+from mo.front.tf.extractors.prod import tf_reduce_prod_ext
+from mo.utils.unittest.extractors import PB, BaseExtractorsTestingClass
+
+
+class ProdExtractorTest(BaseExtractorsTestingClass):
+    @classmethod
+    def setUpClass(cls):
+        cls.patcher = 'mo.front.tf.extractors.prod.tf_reduce_infer'
+
+    def test_prod(self):
+        pb = PB({'attr': {
+            'keep_dims': PB({
+                'b': True
+            }),
+        }})
+        self.expected = {
+            'keep_dims': True,
+        }
+        self.res = tf_reduce_prod_ext(pb=pb)
+        self.res["infer"](None)
+        self.call_args = self.infer_mock.call_args
+        self.expected_call_args = (None, np.multiply.reduce)
+        self.compare()
diff --git a/model-optimizer/mo/front/tf/extractors/squeeze_test.py b/model-optimizer/mo/front/tf/extractors/squeeze_test.py
new file mode 100644 (file)
index 0000000..ccc0ff1
--- /dev/null
@@ -0,0 +1,42 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import numpy as np
+
+from mo.front.tf.extractors.squeeze import tf_squeeze_ext
+from mo.utils.unittest.extractors import PB, BaseExtractorsTestingClass
+
+
+class SqueezeExtractorTest(BaseExtractorsTestingClass):
+    @classmethod
+    def setUpClass(cls):
+        cls.patcher = 'mo.front.tf.extractors.squeeze.tf_squeeze_infer'
+
+    def test_squeeze(self):
+        pb = PB({'attr': {
+            'squeeze_dims': PB({
+                'list': PB({'i': [1, 2]})
+            })
+        }})
+        self.expected = {
+            'type': 'Reshape',
+            'squeeze_dims': np.array([1, 2], dtype=np.int8),
+        }
+        self.res = tf_squeeze_ext(pb=pb)
+        self.res["infer"](None)
+        self.call_args = self.infer_mock.call_args
+        self.expected_call_args = None
+        self.compare()
diff --git a/model-optimizer/mo/front/tf/extractors/utils_test.py b/model-optimizer/mo/front/tf/extractors/utils_test.py
new file mode 100644 (file)
index 0000000..51544cd
--- /dev/null
@@ -0,0 +1,44 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+from mo.front.tf.extractors.utils import collect_tf_attrs
+from mo.utils.unittest.extractors import PB
+
+
+class AttrParsingTest(unittest.TestCase):
+    def test_simple_check(self):
+        pb = PB({'attr': {
+            'str': PB({'s': "aaaa"}),
+            'int': PB({'i': 7}),
+            'float': PB({'f': 2.0}),
+            'bool': PB({'b': True}),
+            'lisint': PB({'list': PB({'i': 5, 'i': 6})})}
+        })
+
+        res = collect_tf_attrs(pb.attr)
+
+        # Reference results for given parameters
+        ref = {
+            'str': pb.attr['str'].s,
+            'int': pb.attr['int'].i,
+            'float': pb.attr['float'].f,
+            'bool': pb.attr['bool'].b,
+            'lisint': pb.attr['lisint'].list.i
+        }
+        for attr in ref:
+            self.assertEqual(res[attr], ref[attr])
diff --git a/model-optimizer/mo/front/tf/loader_test.py b/model-optimizer/mo/front/tf/loader_test.py
new file mode 100644 (file)
index 0000000..326849f
--- /dev/null
@@ -0,0 +1,34 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest.mock
+from io import StringIO
+from unittest.mock import Mock, MagicMock
+
+from generator import generate, generator
+
+from mo.front.tf.loader import load_tf_graph_def
+
+
+@generator
+class TestLoader(unittest.TestCase):
+    @generate('/path/to/somewhere/my_checkpoint.ckpt', '/path/to/somewhere/my_meta_graph.meta')
+    @unittest.mock.patch('sys.stdout', new_callable=StringIO)
+    def test_helper_print_ckpt(self, path, out):
+        mock = Mock(__bool__=MagicMock(side_effect=Exception()))
+        self.assertRaises(Exception, load_tf_graph_def, path, meta_graph_file=mock)
+        self.assertRegex(out.getvalue(),
+                         '\[ WARNING ] The value for the --input_model command line parameter ends with "\.ckpt"')
diff --git a/model-optimizer/mo/graph/graph_test.py b/model-optimizer/mo/graph/graph_test.py
new file mode 100644 (file)
index 0000000..6b5d990
--- /dev/null
@@ -0,0 +1,324 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+from mo.graph.graph import erase_node, get_node_id_by_name, Node, replace_node, get_inputs_with_ports
+from mo.ops.const import Const
+from mo.utils.error import Error
+from mo.utils.unittest.graph import build_graph, compare_graphs
+
+nodes = {
+    '0': {'name': 'input1', 'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'Placeholder'},
+    '1': {'name': 'input2', 'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'Placeholder'},
+    '2': {'name': 'node_1', 'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'NotPlaceholder'},
+    '3': {'name': 'node_2', 'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'NotPlaceholder'},
+    '4': {'name': 'node_3', 'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'NotPlaceholder'},
+    '5': {'name': 'node_4', 'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'NotPlaceholder'},
+    '6': {'name': 'output', 'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'OpOutput',
+          'is_output': True},
+    'input_3': {'name': 'input_3', 'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'Placeholder'}
+}
+edges = {
+    ('0', '2'),
+    ('2', '3'),
+    ('4', '6'),
+    ('1', '5'),
+    ('5', '6'),
+    ('input_3', '6')
+}
+
+
+class TestGetNodeById(unittest.TestCase):
+    def setUp(self):
+        self.graph = build_graph(nodes, edges)
+
+    def test_get_node_id_by_name(self):
+        self.assertEqual(get_node_id_by_name(self.graph, 'input1'), '0')
+
+    def test_get_node_id_by_name_1(self):
+        self.assertEqual(get_node_id_by_name(self.graph, 'input2'), '1')
+
+    def test_get_node_id_by_name_2(self):
+        self.assertEqual(get_node_id_by_name(self.graph, 'node_1'), '2')
+
+    def test_get_node_id_by_name_3(self):
+        self.assertEqual(get_node_id_by_name(self.graph, 'node_2'), '3')
+
+    def test_get_node_id_by_name_4(self):
+        self.assertEqual(get_node_id_by_name(self.graph, 'node_3'), '4')
+
+    def test_get_node_id_by_name_5(self):
+        self.assertEqual(get_node_id_by_name(self.graph, 'node_4'), '5')
+
+    def test_get_node_id_by_name_6(self):
+        self.assertEqual(get_node_id_by_name(self.graph, 'output'), '6')
+
+    def test_get_node_id_by_name_7(self):
+        self.assertEqual(get_node_id_by_name(self.graph, 'input_3'), 'input_3')
+
+    def test_get_node_id_by_name_8(self):
+        self.assertRaises(Error, get_node_id_by_name, self.graph, '1')
+
+
+class TestEraseNode(unittest.TestCase):
+    def test_remove_noop_nodes_middle(self):
+        graph = build_graph(
+            {
+                'input': {'type': 'Placeholder', 'value': None, 'kind': 'op'},
+                'noop': {'type': 'NoOp', 'value': None, 'kind': 'op'},
+                'output': {'type': 'Identity', 'value': None, 'kind': 'op'},
+            },
+            [('input', 'noop'),
+             ('noop', 'output')])
+
+        self.assertEqual(len(graph.nodes()), 3)
+        self.assertEqual(len(graph.edges()), 2)
+        self.assertListEqual(list(graph.out_edges('input')), [('input', 'noop')])
+
+        erase_node(Node(graph, 'noop'))
+
+        self.assertEqual(len(graph.nodes()), 2)
+        self.assertEqual(len(graph.edges()), 1)
+        self.assertListEqual(list(graph.out_edges('input')), [('input', 'output')])
+
+    def test_remove_noop_nodes_middle_2(self):
+        graph = build_graph(
+            {
+                'input': {'type': 'Placeholder', 'value': None, 'kind': 'op'},
+                'noop': {'type': 'NoOp', 'value': None, 'kind': 'op'},
+                'output_1': {'type': 'Identity', 'value': None, 'kind': 'op'},
+                'output_2': {'type': 'Identity', 'value': None, 'kind': 'op'},
+                'output_3': {'type': 'Identity', 'value': None, 'kind': 'op'},
+            },
+            [('input', 'noop'),
+             ('noop', 'output_1', {'in': 4, 'out': 0}),
+             ('noop', 'output_2', {'in': 2, 'out': 0}),
+             ('noop', 'output_3', {'in': 10, 'out': 0})])
+
+        ref_graph = build_graph(
+            {
+                'input': {'type': 'Placeholder', 'value': None, 'kind': 'op'},
+                'output_1': {'type': 'Identity', 'value': None, 'kind': 'op'},
+                'output_2': {'type': 'Identity', 'value': None, 'kind': 'op'},
+                'output_3': {'type': 'Identity', 'value': None, 'kind': 'op'},
+            },
+            [('input', 'output_1', {'in': 4, 'out': 0}),
+             ('input', 'output_2', {'in': 2, 'out': 0}),
+             ('input', 'output_3', {'in': 10, 'out': 0})],
+            nodes_with_edges_only=True)
+
+        erase_node(Node(graph, 'noop'))
+
+        compare_graphs(graph, ref_graph, 'output_1')
+
+    def test_remove_noop_nodes_check_out_port(self):
+        graph = build_graph(
+            {
+                'input': {'type': 'Placeholder', 'value': None, 'kind': 'op'},
+                'noop': {'type': 'NoOp', 'value': None, 'kind': 'op'},
+                'output_1': {'type': 'Identity', 'value': None, 'kind': 'op'},
+                'output_2': {'type': 'Identity', 'value': None, 'kind': 'op'},
+                'output_3': {'type': 'Identity', 'value': None, 'kind': 'op'},
+            },
+            [('input', 'noop'),
+             ('noop', 'output_1', {'in': 4, 'out': 1}),
+             ('noop', 'output_2', {'in': 2, 'out': 1}),
+             ('noop', 'output_3', {'in': 10, 'out': 1})])
+
+        ref_graph = build_graph(
+            {
+                'input': {'type': 'Placeholder', 'value': None, 'kind': 'op'},
+                'output_1': {'type': 'Identity', 'value': None, 'kind': 'op'},
+                'output_2': {'type': 'Identity', 'value': None, 'kind': 'op'},
+                'output_3': {'type': 'Identity', 'value': None, 'kind': 'op'},
+            },
+            [('input', 'output_1', {'in': 4, 'out': 0}),
+             ('input', 'output_2', {'in': 2, 'out': 0}),
+             ('input', 'output_3', {'in': 10, 'out': 0})],
+            nodes_with_edges_only=True)
+
+        erase_node(Node(graph, 'noop'))
+
+        compare_graphs(graph, ref_graph, 'output_1')
+
+    def test_remove_noop_nodes_too_many_outputs(self):
+        graph = build_graph(
+            {
+                'input': {'type': 'Placeholder', 'value': None, 'kind': 'op'},
+                'noop': {'type': 'NoOp', 'value': None, 'kind': 'op'},
+                'output_1': {'type': 'Identity', 'value': None, 'kind': 'op'},
+                'output_2': {'type': 'Identity', 'value': None, 'kind': 'op'},
+                'output_3': {'type': 'Identity', 'value': None, 'kind': 'op'},
+            },
+            [('input', 'noop'),
+             ('noop', 'output_1', {'in': 4, 'out': 0}),
+             ('noop', 'output_2', {'in': 2, 'out': 1}),
+             ('noop', 'output_3', {'in': 10, 'out': 0})])
+
+        self.assertRaises(AssertionError, erase_node, Node(graph, 'noop'))
+
+    def test_remove_noop_nodes_front(self):
+        graph = build_graph(
+            {
+                'noop': {'type': 'NoOp', 'value': None, 'kind': 'op'},
+                'output': {'type': 'Identity', 'value': None, 'kind': 'op'}
+            },
+            [('noop', 'output')]
+        )
+
+        self.assertEqual(len(graph.nodes()), 2)
+        self.assertEqual(len(graph.edges()), 1)
+        self.assertListEqual(list(graph.out_edges('noop')), [('noop', 'output')])
+
+        erase_node(Node(graph, 'noop'))
+
+        self.assertEqual(len(graph.nodes()), 1)
+        self.assertEqual(len(graph.edges()), 0)
+        self.assertEqual(len(graph.in_edges('output')), 0)
+
+    def test_remove_noop_nodes_back(self):
+        graph = build_graph(
+            {
+                'input': {'type': 'Placeholder', 'value': None, 'kind': 'op'},
+                'noop': {'type': 'NoOp', 'value': None, 'kind': 'op'}
+            },
+            [('input', 'noop')]
+        )
+
+        self.assertEqual(len(graph.nodes()), 2)
+        self.assertEqual(len(graph.edges()), 1)
+        self.assertListEqual(list(graph.in_edges('noop')), [('input', 'noop')])
+
+        erase_node(Node(graph, 'noop'))
+
+        self.assertEqual(len(graph.nodes()), 1)
+        self.assertEqual(len(graph.edges()), 0)
+        self.assertEqual(len(graph.in_edges('input')), 0)
+
+    def test_remove_noop_nodes_noop_only(self):
+        import networkx as nx
+        graph = nx.MultiDiGraph()
+        graph.add_node('noop', **{'type': 'NoOp', 'value': None, 'kind': 'op'})
+
+        self.assertEqual(len(graph.nodes()), 1)
+        self.assertEqual(len(graph.edges()), 0)
+
+        erase_node(Node(graph, 'noop'))
+
+        self.assertEqual(len(graph.nodes()), 0)
+        self.assertEqual(len(graph.edges()), 0)
+
+    def test_remove_noop_error(self):
+        graph = build_graph(
+            {
+                'input_1': {'type': 'Placeholder', 'value': None, 'kind': 'op'},
+                'input_2': {'type': 'Placeholder', 'value': None, 'kind': 'op'},
+                'input_3': {'type': 'Placeholder', 'value': None, 'kind': 'op'},
+                'noop': {'type': 'NoOp', 'value': None, 'kind': 'op'},
+                'output_1': {'type': 'Identity', 'value': None, 'kind': 'op'},
+                'output_2': {'type': 'Identity', 'value': None, 'kind': 'op'},
+                'output_3': {'type': 'Identity', 'value': None, 'kind': 'op'},
+            },
+            [('input_1', 'noop'),
+             ('input_2', 'noop'),
+             ('input_3', 'noop'),
+             ('noop', 'output_1'),
+             ('noop', 'output_2'),
+             ('noop', 'output_3')])
+        self.assertRaises(AssertionError, erase_node, Node(graph, 'noop'))
+
+
+class TestReplaceNode(unittest.TestCase):
+    def test_replace_node_one_consumer(self):
+        graph = build_graph(
+            {
+                'input_1': {'type': 'Placeholder', 'value': None, 'kind': 'op'},
+                'input_2': {'type': 'Placeholder', 'value': None, 'kind': 'op'},
+                'old': {'type': 'Identity', 'value': None, 'kind': 'op', 'is_output': True},
+                'output': {'type': 'OpOutput', 'value': None, 'kind': 'op'},
+            },
+            [('input_1', 'old'),
+             ('input_2', 'old'),
+             ('old', 'output')])
+
+        new_node = Const(graph, {'name': 'new'}).create_node([Node(graph, 'input_1'), Node(graph, 'input_2')])
+        replace_node(Node(graph, 'old'), new_node)
+
+        self.assertEqual(len(graph.nodes()), 4)
+        self.assertEqual(len(graph.edges()), 3)
+        self.assertEqual(new_node['is_output'], True)
+        self.assertListEqual(list(graph.out_edges('new')), [('new', 'output')])
+
+    def test_replace_node_several_consumers(self):
+        graph = build_graph(
+            {
+                'input_1': {'type': 'Placeholder', 'value': None, 'kind': 'op'},
+                'input_2': {'type': 'Placeholder', 'value': None, 'kind': 'op'},
+                'old': {'type': 'Identity', 'value': None, 'kind': 'op'},
+                'output_1': {'type': 'Identity', 'value': None, 'kind': 'op'},
+                'output_2': {'type': 'Identity', 'value': None, 'kind': 'op'},
+                'output_3': {'type': 'Identity', 'value': None, 'kind': 'op'},
+            },
+            [('input_1', 'old'),
+             ('input_2', 'old'),
+             ('old', 'output_3'),
+             ('old', 'output_2'),
+             ('old', 'output_1'),
+             ])
+
+        new_node = Const(graph, {'name': 'new'}).create_node([Node(graph, 'input_1'), Node(graph, 'input_2')])
+        replace_node(Node(graph, 'old'), new_node)
+
+        self.assertEqual(len(graph.nodes()), 6)
+        self.assertEqual(len(graph.edges()), 5)
+        self.assertListEqual(sorted(graph.out_edges('new')), [('new', 'output_1'), ('new', 'output_2'),
+                                                              ('new', 'output_3')])
+        expected_result = [('new', 'output_1', {'in': 0, 'out': 2, 'name': 'old'}),
+                           ('new', 'output_2', {'in': 0, 'out': 1, 'name': 'old'}),
+                           ('new', 'output_3', {'in': 0, 'out': 0, 'name': 'old'})]
+        self.assertListEqual(sorted(graph.out_edges('new', data=True)), expected_result)
+
+
+class GetNodesWithPorts(unittest.TestCase):
+    def test_get_nodes_with_ports(self):
+        nodes = {
+            'one': {},
+            'two': {},
+            'three': {},
+            'four': {},
+            'five': {}
+        }
+        edges = [
+            ('one', 'two', {'in': 0, 'out': 0}),
+            ('two', 'three', {'in': 0, 'out': 0}),
+            ('two', 'four', {'in': 0, 'out': 1}),
+            ('two', 'five', {'in': 0, 'out': 2}),
+            ('three', 'five', {'in': 1, 'out': 0})
+        ]
+        graph = build_graph(nodes, edges)
+        match = {
+            'one': Node(graph, 'one'),
+            'two': Node(graph, 'two'),
+            'three': Node(graph, 'three'),
+            'four': Node(graph, 'four'),
+            'five': Node(graph, 'five'),
+
+        }
+        input_names_in_pattern = ['one', 'three']
+        result = get_inputs_with_ports(graph=graph, match=match, pattern_edges=edges,
+                                       input_names_in_pattern=input_names_in_pattern)
+        self.assertListEqual([(match['one'], 0), (match['three'], 0)], result)
diff --git a/model-optimizer/mo/main_test.py b/model-optimizer/mo/main_test.py
new file mode 100644 (file)
index 0000000..79f9feb
--- /dev/null
@@ -0,0 +1,31 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import argparse
+import unittest
+from unittest.mock import patch
+
+from mo.main import main
+from mo.utils.error import FrameworkError
+
+
+class TestMainErrors(unittest.TestCase):
+    @patch('argparse.ArgumentParser.parse_args', return_value=argparse.Namespace())
+    @patch('mo.main.driver', side_effect=FrameworkError('FW ERROR MESSAGE'))
+    def test_FrameworkError(self, mock_argparse, mock_driver):
+        with self.assertLogs() as logger:
+            main(argparse.ArgumentParser(), 'framework_string')
+            self.assertEqual(logger.output, ['ERROR:root:FW ERROR MESSAGE'])
diff --git a/model-optimizer/mo/middle/passes/conv_test.py b/model-optimizer/mo/middle/passes/conv_test.py
new file mode 100644 (file)
index 0000000..ad4e3aa
--- /dev/null
@@ -0,0 +1,163 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.middle.passes.conv import convert_muladd_to_scaleshift_or_power
+from mo.middle.passes.eliminate import graph_clean_up
+from mo.utils.unittest.graph import build_graph, compare_graphs
+
+nodes_attributes = {
+    'placeholder_1': {'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+    'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    # ScaleShift layer
+    'scaleshift_1': {'type': 'ScaleShift', 'value': None, 'kind': 'op', 'op': 'ScaleShift'},
+    'scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'data'},
+    'scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'data'},
+    'scaleshift_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # Mul and Add operations
+    'mul_1': {'value': None, 'kind': 'op', 'op': 'Mul'},
+    'mul_1_w': {'value': None, 'shape': None, 'kind': 'data'},
+    'mul_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+    'add_1': {'value': None, 'kind': 'op', 'op': 'Add'},
+    'add_1_w': {'value': None, 'shape': None, 'kind': 'data'},
+    'add_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # Power layer
+    'power_1': {'type': 'Power', 'kind': 'op', 'op': 'Power', 'scale': None, 'shift': None, 'power': None},
+    'power_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+}
+
+
+class MulAddToScaleShiftOrPower(unittest.TestCase):
+    def _create_graph_with_mul_add(self, mul_w, add_w):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'add_1'),
+                             ('add_1_w', 'add_1'),
+                             ('add_1', 'add_1_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'add_1_data': {'shape': np.array([1, 227, 227, 3]), 'is_output': True},
+                             'mul_1_w': {'shape': np.array(mul_w.shape) if mul_w is not None else None,
+                                         'value': np.array(mul_w) if mul_w is not None else None},
+                             'add_1_w': {'shape': np.array(add_w.shape) if add_w is not None else None,
+                                         'value': np.array(add_w) if add_w is not None else None},
+                             })
+        del graph['mul_1']['mul_1_data'][0]['in']
+        del graph['add_1']['add_1_data'][0]['in']
+        return graph
+
+    def test_mul_add_to_scaleshift_1(self):
+        graph = self._create_graph_with_mul_add(np.array([1, 2, 3]), np.array([1, 2, 3]))
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'scaleshift_1'),
+                                 ('scaleshift_1_w', 'scaleshift_1'),
+                                 ('scaleshift_1_b', 'scaleshift_1'),
+                                 ('scaleshift_1', 'scaleshift_1_data'),
+                                 ],
+                                {'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                                 'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                                 'scaleshift_1_data': {'is_output': True}
+                                 })
+
+        convert_muladd_to_scaleshift_or_power(graph)
+        graph_clean_up(graph)
+        (flag, resp) = compare_graphs(graph, graph_ref, 'add_1_data', 'scaleshift_1_data')
+        self.assertTrue(flag, resp)
+
+    def test_mul_add_to_power_1(self):
+        graph = self._create_graph_with_mul_add(np.array([3]), np.array([2]))
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'power_1'),
+                                 ('power_1', 'power_1_data'),
+                                 ],
+                                {'power_1': {'scale': 3, 'shift': 2, 'power': 1},
+                                 'power_1_data': {'is_output': True}
+                                 })
+
+        convert_muladd_to_scaleshift_or_power(graph)
+        graph_clean_up(graph)
+        (flag, resp) = compare_graphs(graph, graph_ref, 'add_1_data', 'power_1_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    def test_mul_add_neg_1(self):
+        graph = self._create_graph_with_mul_add(None, np.array([2]))
+        graph_ref = self._create_graph_with_mul_add(None, np.array([2]))
+
+        convert_muladd_to_scaleshift_or_power(graph)
+        graph_clean_up(graph)
+        (flag, resp) = compare_graphs(graph, graph_ref, 'add_1_data', 'add_1_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    def test_mul_add_neg_2(self):
+        graph = self._create_graph_with_mul_add(np.array([2]), None)
+        graph_ref = self._create_graph_with_mul_add(np.array([2]), None)
+
+        convert_muladd_to_scaleshift_or_power(graph)
+        graph_clean_up(graph)
+        (flag, resp) = compare_graphs(graph, graph_ref, 'add_1_data', 'add_1_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    def test_mul_add_neg_3(self):
+        graph = self._create_graph_with_mul_add(None, None)
+        graph_ref = self._create_graph_with_mul_add(None, None)
+
+        convert_muladd_to_scaleshift_or_power(graph)
+        graph_clean_up(graph)
+        (flag, resp) = compare_graphs(graph, graph_ref, 'add_1_data', 'add_1_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    def test_mul_add_neg_4(self):
+        graph = self._create_graph_with_mul_add(np.array([1, 2, 3]), np.array([3]))
+        graph_ref = self._create_graph_with_mul_add(np.array([1, 2, 3]), np.array(3))
+
+        convert_muladd_to_scaleshift_or_power(graph)
+        graph_clean_up(graph)
+        (flag, resp) = compare_graphs(graph, graph_ref, 'add_1_data', 'add_1_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    def test_mul_add_neg_5(self):
+        graph = self._create_graph_with_mul_add(np.array([3]), np.array([3, 2, 1]))
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'scaleshift_1'),
+                                 ('scaleshift_1_w', 'scaleshift_1'),
+                                 ('scaleshift_1_b', 'scaleshift_1'),
+                                 ('scaleshift_1', 'add_1_data'),
+                                 ],
+                                {'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([3, 3, 3])},
+                                 'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([3, 2, 1])},
+                                 'add_1_data': {'is_output': True}
+                                 })
+
+        convert_muladd_to_scaleshift_or_power(graph)
+        graph_clean_up(graph)
+        (flag, resp) = compare_graphs(graph, graph_ref, 'add_1_data', 'add_1_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/model-optimizer/mo/middle/passes/eliminate_test.py b/model-optimizer/mo/middle/passes/eliminate_test.py
new file mode 100644 (file)
index 0000000..79b892c
--- /dev/null
@@ -0,0 +1,171 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.graph.graph import Node, erase_node
+from mo.middle.passes.eliminate import mark_output_reachable_nodes, graph_clean_up, \
+    get_nodes_with_attributes, mark_const_producer_nodes
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {'placeholder_1': {'type': 'Placeholder', 'kind': 'op'},
+                    'placeholder_2': {'type': 'Placeholder', 'kind': 'op'},
+                    'node_1': {'type': 'Identity', 'value': None, 'kind': 'op'},
+                    'node_2': {'type': 'Identity', 'value': None, 'kind': 'op'},
+                    'node_3': {'type': 'Identity', 'value': None, 'kind': 'op'},
+                    'node_4': {'type': 'Identity', 'value': None, 'kind': 'op'},
+                    'node_5': {'type': 'Identity', 'value': None, 'kind': 'op'},
+                    'node_6': {'type': 'Identity', 'value': None, 'kind': 'op'},
+                    'placeholder_1_data_node': {'value': None, 'kind': 'data'},
+                    'placeholder_2_data_node': {'value': None, 'kind': 'data'},
+                    'data_node_1': {'value': None, 'kind': 'data'},
+                    'data_node_2': {'value': None, 'kind': 'data'},
+                    'data_node_3': {'value': None, 'kind': 'data'},
+                    'data_node_3_2': {'value': None, 'kind': 'data'},
+                    'data_node_4': {'value': None, 'kind': 'data'},
+                    'data_node_5': {'value': None, 'kind': 'data'},
+                    'data_node_6': {'value': None, 'kind': 'data'},
+                    'tf_call_1': {'type': 'TFCustomSubgraphCall', 'kind': 'op'},
+                    'tf_call_2': {'type': 'TFCustomSubgraphCall', 'kind': 'op'},
+                    'tf_call_3': {'type': 'TFCustomSubgraphCall', 'kind': 'op'},
+                    }
+
+
+class TestEliminatePass(unittest.TestCase):
+    def test_mark_output_unreachable_nodes(self):
+        """
+        Checks that all nodes that are unreachable from output nodes are marked correspondingly.
+        The graph doesn't contain data nodes yet.
+        "node_4" is output.
+
+        placeholder_1->node_1->node_2
+              \
+               -> node_3->node_4
+
+        :return: None
+        """
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'node_1'),
+                             ('node_1', 'node_2'),
+                             ('placeholder_1', 'node_3'),
+                             ('node_3', 'node_4')],
+                            {'node_4': {'is_output': True}},
+                            nodes_with_edges_only=True)
+        mark_output_reachable_nodes(graph)
+
+        self.assertListEqual(sorted(['placeholder_1', 'node_3', 'node_4']),
+                             sorted(get_nodes_with_attributes(graph, is_output_reachable=True)))
+        self.assertListEqual(sorted(['node_1', 'node_2']),
+                             sorted(get_nodes_with_attributes(graph, is_output_reachable=False)))
+
+    def test_mark_output_unreachable_nodes_behind_output(self):
+        """
+        Checks case when unreachable node is 'behind' (i.e. is the child) of the output node.
+        The graph doesn't contain data nodes yet.
+        "node_2" is output.
+
+        placeholder_1->node_1->node_2->node_3
+
+        :return: None
+        """
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'node_1'),
+                             ('node_1', 'node_2'),
+                             ('node_2', 'node_3')],
+                            {'node_2': {'is_output': True}},
+                            nodes_with_edges_only=True)
+        mark_output_reachable_nodes(graph)
+
+        self.assertListEqual(sorted(['placeholder_1', 'node_1', 'node_2']),
+                             sorted(get_nodes_with_attributes(graph, is_output_reachable=True)))
+        self.assertFalse(graph.node['node_3']['is_output_reachable'])
+
+    def test_mark_ops_producing_constant_values(self):
+        """
+        Checks case when operation produces only constant tensors so it could be removed. If the node produces several
+        tensors and at least one of them is not constant then we should not mark this node.
+        The graph contains data nodes.
+        "data_node_2" and "data_node_5" are output.
+        "node_3" produces constant tensor "data_node_3" and non-constant tensor "data_node_3_2".
+        "node_6" produces constant tensor "data_node_6".
+        "node_4" could be eliminated since it gets constant input.
+
+                             node_6->data_node_6->
+                                                  \
+        placeholder_1->placeholder_1_data_node->node_1->data_node_1->node_2->data_node_2
+                                                  /
+        node_3->data_node_3->node_4->data_node_4->
+           \
+            ->data_node_3_2->node_5->data_node_5
+
+        :return: None
+        """
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data_node'),
+                             ('placeholder_1_data_node', 'node_1'),
+                             ('node_1', 'data_node_1'),
+                             ('data_node_1', 'node_2'),
+                             ('node_2', 'data_node_2'),
+                             ('node_3', 'data_node_3'),
+                             ('node_3', 'data_node_3_2'),
+                             ('node_6', 'data_node_6'),
+                             ('data_node_6', 'node_1'),
+                             ('data_node_3_2', 'node_5'),
+                             ('node_5', 'data_node_5'),
+                             ('data_node_3', 'node_4'),
+                             ('data_node_4', 'node_1')],
+                            {'data_node_2': {'is_output': True},
+                             'data_node_5': {'is_output': True},
+                             'data_node_3': {'value': np.array(1)},
+                             'data_node_6': {'value': np.array(1)}},
+                            nodes_with_edges_only=True)
+        mark_const_producer_nodes(graph)
+        self.assertTrue((graph.node['node_6']['is_const_producer']))
+        self.assertListEqual(sorted(['node_1', 'node_2', 'node_3', 'node_5', 'placeholder_1']),
+                             sorted(get_nodes_with_attributes(graph, is_const_producer=False, kind='op')))
+
+        graph_clean_up(graph)
+        self.assertTrue('node_3' in graph.nodes())
+        self.assertTrue('node_4' not in graph.nodes())
+        self.assertTrue('node_6' not in graph.nodes())
+
+    def test_undead_nodes_with_constant_inputs(self):
+        """
+        Checks that if node of 'undead' type has constant inputs it is not removed from the graph.
+        :return: None
+        """
+        pass
+
+    def test_remove_node_from_graph(self):
+        """
+        Checks case when remove node from graph.
+        The graph doesn't contain removed node yet.
+        "node_2" should be removed.
+
+        placeholder_1->node_1->node_2->node_3
+
+        :return: None
+        """
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'node_1'),
+                             ('node_1', 'node_2'),
+                             ('node_2', 'node_3')],
+                            nodes_with_edges_only=True)
+        erase_node(Node(graph, 'node_2'))
+
+        self.assertListEqual(sorted(['placeholder_1', 'node_1', 'node_3']), sorted(graph.nodes()))
diff --git a/model-optimizer/mo/middle/passes/fusing/decomposition_test.py b/model-optimizer/mo/middle/passes/fusing/decomposition_test.py
new file mode 100644 (file)
index 0000000..2179f21
--- /dev/null
@@ -0,0 +1,531 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.middle.passes.eliminate import graph_clean_up
+from mo.middle.passes.fusing.decomposition import convert_scale_shift_to_mul_add, convert_batch_norm, \
+    convert_bn_to_mul_add
+from mo.utils.unittest.graph import build_graph, compare_graphs
+
+nodes_attributes = {
+    'placeholder_1': {'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+    'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'placeholder_2': {'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+    'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    # ScaleShift layer
+    'scaleshift_1': {'type': 'ScaleShift', 'kind': 'op', 'op': 'ScaleShift', 'axis': 0},
+    'scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'data'},
+    'scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'data'},
+    'scaleshift_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # Mul and Add operations
+    'mul_1': {'type': None, 'value': None, 'kind': 'op', 'op': 'Mul'},
+    'mul_1_w': {'value': None, 'shape': None, 'kind': 'data'},
+    'mul_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+    'add_1': {'type': None, 'kind': 'op', 'op': 'Add'},
+    'add_1_w': {'value': None, 'shape': None, 'kind': 'data'},
+    'add_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # Mul and Add operations
+    'mul_2': {'type': None, 'kind': 'op', 'op': 'Mul'},
+    'mul_2_w': {'value': None, 'shape': None, 'kind': 'data'},
+    'mul_2_data': {'value': None, 'shape': None, 'kind': 'data'},
+    'add_2': {'type': None, 'kind': 'op', 'op': 'Add'},
+    'add_2_w': {'value': None, 'shape': None, 'kind': 'data'},
+    'add_2_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # Reshape
+    'placeholder_2/Reshape_': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},
+    'placeholder_2/Reshape_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # BatchNorm operation
+    'bn_op': {'type': None, 'kind': 'op', 'op': 'BatchNorm', 'can_be_fused': True},
+    'bn_const': {'value': None, 'shape': None, 'kind': 'data'},
+    'bn_beta': {'value': None, 'shape': None, 'kind': 'data'},
+    'bn_mean': {'value': None, 'shape': None, 'kind': 'data'},
+    'bn_var': {'value': None, 'shape': None, 'kind': 'data'},
+    'bn_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # Concat1 operation
+    'concat': {'type': 'Concat', 'kind': 'op', 'op': 'Concat'},
+    'concat_data': {'value': None, 'shape': None, 'kind': 'data'},
+}
+
+
+class ScaleShiftToMulAdd(unittest.TestCase):
+    # ScaleShift -> Mul
+    def test_scaleshift_to_mul_1(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'scaleshift_1'),
+                             ('scaleshift_1_w', 'scaleshift_1'),
+                             ('scaleshift_1', 'scaleshift_1_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                             'scaleshift_1_data': {'is_output': True}
+                             })
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'mul_1'),
+                                 ('mul_1_w', 'mul_1'),
+                                 ('mul_1', 'scaleshift_1_data'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                                 'mul_1': {'can_be_fused': True},
+                                 'scaleshift_1_data': {'is_output': True}
+                                 })
+
+        graph.graph['layout'] = 'NHWC'
+        convert_scale_shift_to_mul_add(graph)
+        graph_clean_up(graph)
+        (flag, resp) = compare_graphs(graph, graph_ref, 'scaleshift_1_data')
+        self.assertTrue(flag, resp)
+
+    # ScaleShift  2 inputs-> Mul
+    def test_scaleshift2_to_mul(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_2', 'placeholder_2_data'),
+                             ('placeholder_1_data', 'scaleshift_1'),
+                             ('placeholder_2_data', 'scaleshift_1'),
+                             ('scaleshift_1', 'scaleshift_1_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'placeholder_2_data': {'shape': np.array([1, 227])},
+                             'scaleshift_1_data': {'is_output': True}
+                             })
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_2', 'placeholder_2_data'),
+                                 ('placeholder_2_data', 'placeholder_2/Reshape_'),
+                                 ('placeholder_2/Reshape_', 'placeholder_2/Reshape_data'),
+                                 ('placeholder_1_data', 'mul_1'),
+                                 ('placeholder_2/Reshape_data', 'mul_1'),
+                                 ('mul_1', 'scaleshift_1_data'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'placeholder_2_data': {'shape': np.array([1, 227])},
+                                 'placeholder_2/Reshape_': {'dim': np.array([1, 227, 1, 1])},
+                                 'placeholder_2/Reshape_data': {'shape': np.array([1, 227, 1, 1])},
+                                 'mul_1': {'can_be_fused': True},
+                                 'scaleshift_1_data': {'is_output': True}
+                                 })
+
+        graph.graph['layout'] = 'NHWC'
+        convert_scale_shift_to_mul_add(graph)
+        graph_clean_up(graph)
+        (flag, resp) = compare_graphs(graph, graph_ref, 'scaleshift_1_data')
+        self.assertTrue(flag, resp)
+
+    # ScaleShift  2 inputs-> Mul (axis = 1)
+    def test_scaleshift2_axis1_to_mul(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_2', 'placeholder_2_data'),
+                             ('placeholder_1_data', 'scaleshift_1'),
+                             ('placeholder_2_data', 'scaleshift_1'),
+                             ('scaleshift_1', 'scaleshift_1_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'placeholder_2_data': {'shape': np.array([227])},
+                             'scaleshift_1': {'axis': 1},
+                             'scaleshift_1_data': {'is_output': True}
+                             })
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_2', 'placeholder_2_data'),
+                                 ('placeholder_2_data', 'placeholder_2/Reshape_'),
+                                 ('placeholder_2/Reshape_', 'placeholder_2/Reshape_data'),
+                                 ('placeholder_1_data', 'mul_1'),
+                                 ('placeholder_2/Reshape_data', 'mul_1'),
+                                 ('mul_1', 'scaleshift_1_data'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'placeholder_2_data': {'shape': np.array([227])},
+                                 'placeholder_2/Reshape_': {'dim': np.array([1, 227, 1, 1])},
+                                 'placeholder_2/Reshape_data': {'shape': np.array([1, 227, 1, 1])},
+                                 'mul_1': {'can_be_fused': True},
+                                 'scaleshift_1_data': {'is_output': True}
+                                 })
+
+        graph.graph['layout'] = 'NHWC'
+        convert_scale_shift_to_mul_add(graph)
+        graph_clean_up(graph)
+        (flag, resp) = compare_graphs(graph, graph_ref, 'scaleshift_1_data')
+        self.assertTrue(flag, resp)
+
+
+    # ScaleShift -> Mul (Zero biases)
+    def test_scaleshift_to_mul_2(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'scaleshift_1'),
+                             ('scaleshift_1_w', 'scaleshift_1'),
+                             ('scaleshift_1_b', 'scaleshift_1'),
+                             ('scaleshift_1', 'scaleshift_1_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                             'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},
+                             'scaleshift_1_data': {'is_output': True}
+                             })
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'mul_1'),
+                                 ('mul_1_w', 'mul_1'),
+                                 ('mul_1', 'scaleshift_1_data'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                                 'mul_1': {'can_be_fused': True},
+                                 'scaleshift_1_data': {'is_output': True}
+                                 })
+
+        graph.graph['layout'] = 'NHWC'
+        convert_scale_shift_to_mul_add(graph)
+        graph_clean_up(graph)
+        (flag, resp) = compare_graphs(graph, graph_ref, 'scaleshift_1_data')
+        self.assertTrue(flag, resp)
+
+    # ScaleShift -> Mul->Add
+    def test_scaleshift_to_mul_add(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'scaleshift_1'),
+                             ('scaleshift_1_w', 'scaleshift_1'),
+                             ('scaleshift_1_b', 'scaleshift_1'),
+                             ('scaleshift_1', 'scaleshift_1_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                             'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([3, 2, 1])},
+                             'scaleshift_1_data': {'is_output': True}
+                             })
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'mul_1'),
+                                 ('mul_1_w', 'mul_1'),
+                                 ('mul_1', 'mul_1_data'),
+                                 ('mul_1_data', 'add_1'),
+                                 ('add_1_w', 'add_1'),
+                                 ('add_1', 'scaleshift_1_data'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                                 'add_1_w': {'shape': np.array([3]), 'value': np.array([3, 2, 1])},
+                                 'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'add_1': {'can_be_fused': True},
+                                 'mul_1': {'can_be_fused': True},
+                                 'scaleshift_1_data': {'is_output': True}
+                                 })
+
+        graph.graph['layout'] = 'NHWC'
+        convert_scale_shift_to_mul_add(graph)
+        graph_clean_up(graph)
+        (flag, resp) = compare_graphs(graph, graph_ref, 'scaleshift_1_data')
+        self.assertTrue(flag, resp)
+
+    # ScaleShift -> None (Zero weights and biases)
+    def test_scaleshift_to_nothing(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'scaleshift_1'),
+                             ('scaleshift_1_w', 'scaleshift_1'),
+                             ('scaleshift_1_b', 'scaleshift_1'),
+                             ('scaleshift_1', 'scaleshift_1_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])},
+                             'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},
+                             'scaleshift_1_data': {'shape': np.array([1, 227, 227, 3]), 'is_output': True}
+                             })
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data')],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3]), 'is_output': True}})
+
+        graph.graph['layout'] = 'NHWC'
+        convert_scale_shift_to_mul_add(graph)
+        graph_clean_up(graph)
+        (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1_data')
+        self.assertTrue(flag, resp)
+
+    # ScaleShift -> ScaleShift (can_be_fused=False)
+    def test_scaleshift_can_be_fused(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'scaleshift_1'),
+                             ('scaleshift_1_w', 'scaleshift_1'),
+                             ('scaleshift_1_b', 'scaleshift_1'),
+                             ('scaleshift_1', 'scaleshift_1_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])},
+                             'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},
+                             'scaleshift_1': {'can_be_fused': False},
+                             'scaleshift_1_data': {'shape': np.array([1, 227, 227, 3]), 'is_output': True}
+                             })
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'scaleshift_1'),
+                                 ('scaleshift_1_w', 'scaleshift_1'),
+                                 ('scaleshift_1_b', 'scaleshift_1'),
+                                 ('scaleshift_1', 'scaleshift_1_data'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])},
+                                 'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},
+                                 'scaleshift_1': {'can_be_fused': False},
+                                 'scaleshift_1_data': {'shape': np.array([1, 227, 227, 3]), 'is_output': True}
+                                 })
+
+        convert_scale_shift_to_mul_add(graph)
+        graph_clean_up(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'scaleshift_1_data')
+        self.assertTrue(flag, resp)
+
+
+class BatchNormDecomposition(unittest.TestCase):
+    def test_bn_decomposition_1(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'bn_op'),
+                             ('bn_const', 'bn_op'),
+                             ('bn_beta', 'bn_op'),
+                             ('bn_mean', 'bn_op'),
+                             ('bn_var', 'bn_op'),
+                             ('bn_op', 'bn_data'),
+                             ('concat', 'concat_data'),
+                             ('bn_data', 'concat')
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'bn_op': {'eps': 1.2},
+                             'bn_const': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                             'bn_beta': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                             'bn_mean': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                             'bn_var': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                             'bn_data': {'shape': np.array([1, 227, 227, 3])},
+                             'concat_data': {'is_output': True}
+                             })
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'mul_1'),
+                                 ('mul_1_w', 'mul_1'),
+                                 ('mul_1', 'mul_1_data'),
+                                 ('mul_1_data', 'add_1'),
+                                 ('add_1_w', 'add_1'),
+                                 ('add_1', 'add_1_data'),
+                                 ('add_1_data', 'mul_2'),
+                                 ('mul_2_w', 'mul_2'),
+                                 ('mul_2', 'mul_2_data'),
+                                 ('mul_2_data', 'add_2'),
+                                 ('add_2_w', 'add_2'),
+                                 ('add_2', 'add_2_data'),
+                                 ('concat', 'concat_data'),
+                                 ('add_2_data', 'concat')
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_w': {'shape': np.array([3]),
+                                             'value': np.array([0.67419986, 0.55901699, 0.48795004])},
+                                 'mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                                 'add_1_w': {'shape': np.array([3]),
+                                             'value': np.array([-0.67419986, -1.11803399, -1.46385011])},
+                                 'add_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                                 'add_2_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1': {'can_be_fused': True},
+                                 'mul_2': {'can_be_fused': True},
+                                 'add_1': {'can_be_fused': True},
+                                 'add_2': {'can_be_fused': True},
+                                 'concat_data': {'is_output': True}
+                                 })
+
+        graph.graph['layout'] = 'NHWC'
+        convert_batch_norm(graph)
+        graph_clean_up(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat_data')
+        self.assertTrue(flag, resp)
+
+    # 'can_be_fused': False for BatchNorm
+    def test_bn_decomposition_2(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'bn_op'),
+                             ('bn_const', 'bn_op'),
+                             ('bn_beta', 'bn_op'),
+                             ('bn_mean', 'bn_op'),
+                             ('bn_var', 'bn_op'),
+                             ('bn_op', 'bn_data'),
+                             ('concat', 'concat_data'),
+                             ('bn_data', 'concat')
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'bn_op': {'eps': 1.2, 'can_be_fused': False},
+                             'bn_const': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                             'bn_beta': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                             'bn_mean': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                             'bn_var': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                             'bn_data': {'shape': np.array([1, 227, 227, 3])},
+                             'concat_data': {'is_output': True}
+                             })
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'mul_1'),
+                                 ('mul_1_w', 'mul_1'),
+                                 ('mul_1', 'mul_1_data'),
+                                 ('mul_1_data', 'add_1'),
+                                 ('add_1_w', 'add_1'),
+                                 ('add_1', 'add_1_data'),
+                                 ('add_1_data', 'mul_2'),
+                                 ('mul_2_w', 'mul_2'),
+                                 ('mul_2', 'mul_2_data'),
+                                 ('mul_2_data', 'add_2'),
+                                 ('add_2_w', 'add_2'),
+                                 ('add_2', 'add_2_data'),
+                                 ('concat', 'concat_data'),
+                                 ('add_2_data', 'concat')
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_w': {'shape': np.array([3]),
+                                             'value': np.array([0.67419986, 0.55901699, 0.48795004])},
+                                 'mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                                 'add_1_w': {'shape': np.array([3]),
+                                             'value': np.array([-0.67419986, -1.11803399, -1.46385011])},
+                                 'add_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                                 'add_2_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1': {'can_be_fused': False},
+                                 'mul_2': {'can_be_fused': False},
+                                 'add_1': {'can_be_fused': False},
+                                 'add_2': {'can_be_fused': False},
+                                 'concat_data': {'is_output': True}
+                                 })
+
+        graph.graph['layout'] = 'NHWC'
+        convert_batch_norm(graph)
+        graph_clean_up(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat_data')
+        self.assertTrue(flag, resp)
+
+    def test_caffe_bn_decomposition_1(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'bn_op'),
+                             ('bn_mean', 'bn_op'),
+                             ('bn_var', 'bn_op'),
+                             ('bn_op', 'bn_data'),
+                             ('concat', 'concat_data'),
+                             ('bn_data', 'concat')
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'bn_op': {'epsilon': 1.2, 'op': 'BatchNormalization'},
+                             'bn_mean': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                             'bn_var': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                             'bn_data': {'shape': np.array([1, 227, 227, 3])},
+                             'concat_data': {'is_output': True}
+                             })
+
+        del graph['placeholder_1']['placeholder_1_data'][0]['in']
+        del graph['bn_op']['bn_data'][0]['in']
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'mul_1'),
+                                 ('mul_1_w', 'mul_1'),
+                                 ('mul_1', 'mul_1_data'),
+                                 ('mul_1_data', 'add_1'),
+                                 ('add_1_w', 'add_1'),
+                                 ('add_1', 'add_1_data'),
+                                 ('concat', 'concat_data'),
+                                 ('add_1_data', 'concat')
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_w': {'shape': np.array([3]),
+                                             'value': np.array([0.67419986, 0.55901699, 0.48795004])},
+                                 'add_1_w': {'shape': np.array([3]),
+                                             'value': np.array([-0.67419986, -1.11803399, -1.46385011])},
+                                 'add_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1': {'can_be_fused': True},
+                                 'add_1': {'can_be_fused': True},
+                                 'concat_data': {'is_output': True}
+                                 })
+
+        graph.graph['layout'] = 'NHWC'
+        convert_bn_to_mul_add(graph)
+        graph_clean_up(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat_data')
+        self.assertTrue(flag, resp)
+
+    # 'can_be_fused': False for BatchNormalization
+    def test_caffe_bn_decomposition_2(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'bn_op'),
+                             ('bn_mean', 'bn_op'),
+                             ('bn_var', 'bn_op'),
+                             ('bn_op', 'bn_data'),
+                             ('concat', 'concat_data'),
+                             ('bn_data', 'concat')
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'bn_op': {'epsilon': 1.2, 'op': 'BatchNormalization', 'can_be_fused': False},
+                             'bn_mean': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                             'bn_var': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                             'bn_data': {'shape': np.array([1, 227, 227, 3])},
+                             'concat_data': {'is_output': True}
+                             })
+
+        del graph['placeholder_1']['placeholder_1_data'][0]['in']
+        del graph['bn_op']['bn_data'][0]['in']
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'mul_1'),
+                                 ('mul_1_w', 'mul_1'),
+                                 ('mul_1', 'mul_1_data'),
+                                 ('mul_1_data', 'add_1'),
+                                 ('add_1_w', 'add_1'),
+                                 ('add_1', 'add_1_data'),
+                                 ('concat', 'concat_data'),
+                                 ('add_1_data', 'concat')
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_w': {'shape': np.array([3]),
+                                             'value': np.array([0.67419986, 0.55901699, 0.48795004])},
+                                 'add_1_w': {'shape': np.array([3]),
+                                             'value': np.array([-0.67419986, -1.11803399, -1.46385011])},
+                                 'add_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1': {'can_be_fused': False},
+                                 'add_1': {'can_be_fused': False},
+                                 'concat_data': {'is_output': True}
+                                 })
+
+        graph.graph['layout'] = 'NHWC'
+        convert_bn_to_mul_add(graph)
+        graph_clean_up(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat_data')
+        self.assertTrue(flag, resp)
diff --git a/model-optimizer/mo/middle/passes/fusing/fuse_linear_ops_test.py b/model-optimizer/mo/middle/passes/fusing/fuse_linear_ops_test.py
new file mode 100644 (file)
index 0000000..30948e2
--- /dev/null
@@ -0,0 +1,1201 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.graph.graph import Node
+from mo.middle.passes.eliminate import graph_clean_up
+from mo.middle.passes.fusing.fuse_linear_ops import _fuse_mul, _fuse_add, fuse_linear_ops
+from mo.utils.unittest.graph import build_graph, compare_graphs
+
+nodes_attributes = {
+    'placeholder_1': {'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+    'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    # ScaleShift layer
+    'scaleshift_1': {'type': 'ScaleShift', 'kind': 'op', 'op': 'ScaleShift'},
+    'scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'data'},
+    'scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'data'},
+    'scaleshift_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # Mul and Add operations
+    'mul_1': {'type': 'Mul', 'kind': 'op', 'op': 'Mul', 'can_be_fused': True},
+    'mul_1_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'mul_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'add_1': {'type': 'Add', 'kind': 'op', 'op': 'Add', 'can_be_fused': True},
+    'add_1_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'add_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    # Mul2 and Add2 operations
+    'mul_2': {'type': 'Mul', 'kind': 'op', 'op': 'Mul', 'can_be_fused': True},
+    'mul_2_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'mul_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'add_2': {'type': 'Add', 'kind': 'op', 'op': 'Add', 'can_be_fused': True},
+    'add_2_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'add_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    # Concat1 operation
+    'concat_1': {'type': 'Concat', 'kind': 'op', 'op': 'Concat'},
+    'concat_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # Convolutions
+    'conv_1': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NHWC'},
+    'conv_1_w': {'value': None, 'shape': None, 'kind': 'data'},
+    'conv_1_b': {'value': None, 'shape': None, 'kind': 'data'},
+    'conv_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+    'conv_2': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NHWC'},
+    'conv_2_w': {'value': None, 'shape': None, 'kind': 'data'},
+    'conv_2_b': {'value': None, 'shape': None, 'kind': 'data'},
+    'conv_2_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # FullyConnected
+    'fc_1': {'type': 'FullyConnected', 'kind': 'op', 'op': 'InnerProduct', 'layout': 'NHWC'},
+    'fc_1_w': {'value': None, 'shape': None, 'kind': 'data'},
+    'fc_1_b': {'value': None, 'shape': None, 'kind': 'data'},
+    'fc_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # Placeholders
+    'placeholder_2': {'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+    'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'placeholder_3': {'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+    'placeholder_3_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+}
+
+
+# Unit tests for fuse_mul
+class FuseMulTests(unittest.TestCase):
+    # Mul(array)->Conv(w+b)
+    def test_fuse_mul_to_conv_1(self):
+        # Placeholder->Mul->Conv
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'conv_1'),
+                             ('conv_1_w', 'conv_1'),
+                             ('conv_1_b', 'conv_1'),
+                             ('conv_1', 'conv_1_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                             'conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)),
+                                          'output_channel_dim': 3, 'input_channel_dim': 2,
+                                          'dims_number': 4},
+                             'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)},
+                             'conv_1_data': {'is_output': True}
+                             })
+        ref_weights = np.ones((11, 11, 3, 96)) * np.reshape(np.array([1, 2, 3]), (3, 1))
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'conv_1'),
+                                 ('conv_1_w', 'conv_1'),
+                                 ('conv_1_b', 'conv_1'),
+                                 ('conv_1', 'conv_1_data'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'conv_1_w': {'shape': ref_weights.shape, 'value': ref_weights,
+                                              'output_channel_dim': 3, 'input_channel_dim': 2,
+                                              'dims_number': 4},
+                                 'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)},
+                                 'conv_1_data': {'is_output': True}
+                                 })
+
+        _fuse_mul(graph, Node(graph, 'mul_1'), [Node(graph, 'conv_1')], backward=False)
+        graph_clean_up(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'conv_1_data')
+        self.assertTrue(flag, resp)
+
+    # Mul(scalar)->Conv(w+b)
+    def test_fuse_mul_to_conv_2(self):
+        # Placeholder->Mul->Conv
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'conv_1'),
+                             ('conv_1_w', 'conv_1'),
+                             ('conv_1_b', 'conv_1'),
+                             ('conv_1', 'conv_1_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_w': {'shape': np.array([1]), 'value': 6},
+                             'conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)),
+                                          'output_channel_dim': 3, 'input_channel_dim': 2,
+                                          'dims_number': 4},
+                             'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)},
+                             'conv_1_data': {'is_output': True}
+                             })
+        ref_weights = np.ones((11, 11, 3, 96)) * np.reshape(np.array([6, 6, 6]), (3, 1))
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'conv_1'),
+                                 ('conv_1_w', 'conv_1'),
+                                 ('conv_1_b', 'conv_1'),
+                                 ('conv_1', 'conv_1_data'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'conv_1_w': {'shape': ref_weights.shape, 'value': ref_weights,
+                                              'output_channel_dim': 3, 'input_channel_dim': 2,
+                                              'dims_number': 4},
+                                 'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)},
+                                 'conv_1_data': {'is_output': True}
+                                 })
+
+        _fuse_mul(graph, Node(graph, 'mul_1'), [Node(graph, 'conv_1')], backward=False)
+        graph_clean_up(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'conv_1_data')
+        self.assertTrue(flag, resp)
+
+    # Conv(w+b)->Mul(array)
+    def test_fuse_mul_to_conv_3(self):
+        # Placeholder->Conv->Mul
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'conv_1'),
+                             ('conv_1_w', 'conv_1'),
+                             ('conv_1_b', 'conv_1'),
+                             ('conv_1', 'conv_1_data'),
+                             ('conv_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)),
+                                          'output_channel_dim': 3, 'input_channel_dim': 2,
+                                          'dims_number': 4},
+                             'conv_1_b': {'shape': np.array([96]), 'value': np.ones(96)},
+                             'conv_1_data': {'shape': np.array([1, 55, 55, 96])},
+                             'mul_1_data': {'shape': np.array([1, 55, 55, 96]), 'is_output': True},
+                             'mul_1_w': {'shape': np.array([96]), 'value': np.array([x for x in range(96)])},
+                             })
+        ref_weights = np.ones((11, 11, 3, 96)) * np.reshape(np.array([x for x in range(96)]), 96)
+        ref_biases = np.ones(96) * np.array([x for x in range(96)])
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'conv_1'),
+                                 ('conv_1_w', 'conv_1'),
+                                 ('conv_1_b', 'conv_1'),
+                                 ('conv_1', 'conv_1_data'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'conv_1_w': {'shape': ref_weights.shape, 'value': ref_weights,
+                                              'output_channel_dim': 3, 'input_channel_dim': 2,
+                                              'dims_number': 4},
+                                 'conv_1_b': {'shape': ref_biases.shape, 'value': ref_biases},
+                                 'conv_1_data': {'shape': np.array([1, 55, 55, 96]), 'is_output': True}
+                                 })
+
+        _fuse_mul(graph, Node(graph, 'mul_1'), [Node(graph, 'conv_1')], backward=True)
+        graph_clean_up(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'mul_1_data', 'conv_1_data')
+        self.assertTrue(flag, resp)
+
+    # Conv(w)->Mul(scalar)
+    def test_fuse_mul_to_conv_4(self):
+        # Placeholder->Conv->Mul
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'conv_1'),
+                             ('conv_1_w', 'conv_1'),
+                             ('conv_1_b', 'conv_1'),
+                             ('conv_1', 'conv_1_data'),
+                             ('conv_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)),
+                                          'output_channel_dim': 3, 'input_channel_dim': 2,
+                                          'dims_number': 4},
+                             'conv_1_b': {'shape': np.array([96]), 'value': np.ones(96)},
+                             'conv_1_data': {'shape': np.array([1, 55, 55, 96])},
+                             'mul_1_data': {'shape': np.array([1, 55, 55, 96]), 'is_output': True},
+                             'mul_1_w': {'shape': np.array([1]), 'value': 6},
+                             })
+        ref_weights = np.ones((11, 11, 3, 96)) * np.array([6])
+        ref_biases = np.ones(96) * np.array([6])
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'conv_1'),
+                                 ('conv_1_w', 'conv_1'),
+                                 ('conv_1_b', 'conv_1'),
+                                 ('conv_1', 'conv_1_data'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'conv_1_w': {'shape': ref_weights.shape, 'value': ref_weights,
+                                              'output_channel_dim': 3, 'input_channel_dim': 2,
+                                              'dims_number': 4},
+                                 'conv_1_b': {'shape': ref_biases.shape, 'value': ref_biases},
+                                 'conv_1_data': {'shape': np.array([1, 55, 55, 96]), 'is_output': True}
+                                 })
+
+        _fuse_mul(graph, Node(graph, 'mul_1'), [Node(graph, 'conv_1')], backward=True)
+        graph_clean_up(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'mul_1_data', 'conv_1_data')
+        self.assertTrue(flag, resp)
+
+    # Op0-+->Op1--+----+-->Concat     Op0-+->Op1--+--+-->Concat
+    #  |  |       |    |               |  |       |  |
+    #  |  +->Op2--+    |          =>   |  +->Op2--+  |
+    #  +---->Mul->Conv-+               +---->Conv----+
+    def test_fuse_mul_to_conv_5(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'conv_1'),
+                             ('conv_1_w', 'conv_1'),
+                             ('conv_1_b', 'conv_1'),
+                             ('conv_1', 'conv_1_data'),
+                             ('placeholder_1_data', 'placeholder_2'),
+                             ('placeholder_2', 'placeholder_2_data'),
+                             ('placeholder_1_data', 'placeholder_3'),
+                             ('placeholder_3', 'placeholder_3_data'),
+                             ('placeholder_2_data', 'concat_1'),
+                             ('placeholder_3_data', 'concat_1'),
+                             ('conv_1_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_w': {'shape': np.array([1]), 'value': 6},
+                             'conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)),
+                                          'output_channel_dim': 3, 'input_channel_dim': 2,
+                                          'dims_number': 4},
+                             'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)},
+                             'concat_1_data': {'is_output': True}
+                             })
+        ref_weights = np.ones((11, 11, 3, 96)) * np.reshape(np.array([6, 6, 6]), (3, 1))
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'conv_1'),
+                                 ('conv_1_w', 'conv_1'),
+                                 ('conv_1_b', 'conv_1'),
+                                 ('conv_1', 'conv_1_data'),
+                                 ('placeholder_1_data', 'placeholder_2'),
+                                 ('placeholder_2', 'placeholder_2_data'),
+                                 ('placeholder_1_data', 'placeholder_3'),
+                                 ('placeholder_3', 'placeholder_3_data'),
+                                 ('placeholder_2_data', 'concat_1'),
+                                 ('placeholder_3_data', 'concat_1'),
+                                 ('conv_1_data', 'concat_1'),
+                                 ('concat_1', 'concat_1_data'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'conv_1_w': {'shape': ref_weights.shape, 'value': ref_weights,
+                                              'output_channel_dim': 3,
+                                              'input_channel_dim': 2, 'dims_number': 4},
+                                 'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)},
+                                 'conv_1_data': {'is_output': True},
+                                 'placeholder_2_data': {'is_output': True},
+                                 'placeholder_3_data': {'is_output': True},
+                                 })
+
+        _fuse_mul(graph, Node(graph, 'mul_1'), [Node(graph, 'conv_1')], backward=False)
+        graph_clean_up(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data')
+        self.assertTrue(flag, resp)
+
+    def test_fuse_mul_to_conv_5_nparray(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'conv_1'),
+                             ('conv_1_w', 'conv_1'),
+                             ('conv_1_b', 'conv_1'),
+                             ('conv_1', 'conv_1_data'),
+                             ('placeholder_1_data', 'placeholder_2'),
+                             ('placeholder_2', 'placeholder_2_data'),
+                             ('placeholder_1_data', 'placeholder_3'),
+                             ('placeholder_3', 'placeholder_3_data'),
+                             ('placeholder_2_data', 'concat_1'),
+                             ('placeholder_3_data', 'concat_1'),
+                             ('conv_1_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_w': {'shape': np.array([1]), 'value': np.array([6])},
+                             'conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)),
+                                          'output_channel_dim': 3, 'input_channel_dim': 2,
+                                          'dims_number': 4},
+                             'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)},
+                             'concat_1_data': {'is_output': True}
+                             })
+        ref_weights = np.ones((11, 11, 3, 96)) * np.reshape(np.array([6, 6, 6]), (3, 1))
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'conv_1'),
+                                 ('conv_1_w', 'conv_1'),
+                                 ('conv_1_b', 'conv_1'),
+                                 ('conv_1', 'conv_1_data'),
+                                 ('placeholder_1_data', 'placeholder_2'),
+                                 ('placeholder_2', 'placeholder_2_data'),
+                                 ('placeholder_1_data', 'placeholder_3'),
+                                 ('placeholder_3', 'placeholder_3_data'),
+                                 ('placeholder_2_data', 'concat_1'),
+                                 ('placeholder_3_data', 'concat_1'),
+                                 ('conv_1_data', 'concat_1'),
+                                 ('concat_1', 'concat_1_data'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'conv_1_w': {'shape': ref_weights.shape, 'value': ref_weights,
+                                              'output_channel_dim': 3,
+                                              'input_channel_dim': 2, 'dims_number': 4},
+                                 'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)},
+                                 'conv_1_data': {'is_output': True},
+                                 'placeholder_2_data': {'is_output': True},
+                                 'placeholder_3_data': {'is_output': True},
+                                 })
+
+        _fuse_mul(graph, Node(graph, 'mul_1'), [Node(graph, 'conv_1')], backward=False)
+        graph_clean_up(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data')
+        self.assertTrue(flag, resp)
+
+    # Op->Mul(array)-+->Conv(w+b)--+->Concat     Op-+->Conv1-+-->Concat
+    #                |             |         =>     |        |
+    #                +-->Conv(w+b)-+                +->Conv2-+
+    def test_fuse_mul_to_convolutions_1(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'conv_1'),
+                             ('conv_1_w', 'conv_1'),
+                             ('conv_1_b', 'conv_1'),
+                             ('conv_1', 'conv_1_data'),
+                             ('mul_1_data', 'conv_2'),
+                             ('conv_2_w', 'conv_2'),
+                             ('conv_2_b', 'conv_2'),
+                             ('conv_2', 'conv_2_data'),
+                             ('conv_1_data', 'concat_1'),
+                             ('conv_2_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data')
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                             'conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)),
+                                          'output_channel_dim': 3, 'input_channel_dim': 2,
+                                          'dims_number': 4},
+                             'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)},
+                             'conv_1_data': {'shape': np.array([1, 55, 55, 96])},
+                             'conv_2_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)),
+                                          'output_channel_dim': 3, 'input_channel_dim': 2,
+                                          'dims_number': 4},
+                             'conv_2_b': {'shape': np.array([96]), 'value': np.zeros(96)},
+                             'conv_2_data': {'shape': np.array([1, 55, 55, 96])},
+                             'concat_1_data': {'is_output': True}
+                             })
+        ref_weights = np.ones((11, 11, 3, 96)) * np.reshape(np.array([1, 2, 3]), (3, 1))
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'conv_1'),
+                                 ('conv_1_w', 'conv_1'),
+                                 ('conv_1_b', 'conv_1'),
+                                 ('conv_1', 'conv_1_data'),
+                                 ('placeholder_1_data', 'conv_2'),
+                                 ('conv_2_w', 'conv_2'),
+                                 ('conv_2_b', 'conv_2'),
+                                 ('conv_2', 'conv_2_data'),
+                                 ('conv_1_data', 'concat_1'),
+                                 ('conv_2_data', 'concat_1'),
+                                 ('concat_1', 'concat_1_data')
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'conv_1_w': {'shape': ref_weights.shape, 'value': ref_weights,
+                                              'output_channel_dim': 3, 'input_channel_dim': 2,
+                                              'dims_number': 4},
+                                 'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)},
+                                 'conv_1_data': {'shape': np.array([1, 55, 55, 96])},
+                                 'conv_2_w': {'shape': ref_weights.shape, 'value': ref_weights,
+                                              'output_channel_dim': 3, 'input_channel_dim': 2,
+                                              'dims_number': 4},
+                                 'conv_2_b': {'shape': np.array([96]), 'value': np.zeros(96)},
+                                 'conv_2_data': {'shape': np.array([1, 55, 55, 96])},
+                                 })
+
+        _fuse_mul(graph, Node(graph, 'mul_1'), [Node(graph, 'conv_1'), Node(graph, 'conv_2')], backward=False)
+        graph_clean_up(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data')
+        self.assertTrue(flag, resp)
+
+    # Mul(array)->FC(w+b)
+    def test_fuse_mul_to_fc_1(self):
+        # Placeholder->Mul->FC
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'fc_1'),
+                             ('fc_1_w', 'fc_1'),
+                             ('fc_1_b', 'fc_1'),
+                             ('fc_1', 'fc_1_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 2048])},
+                             'mul_1_data': {'shape': np.array([1, 2048])},
+                             'mul_1_w': {'shape': np.array([2048]), 'value': np.array([x for x in range(2048)])},
+                             'fc_1_w': {'shape': np.array([10260, 2048]), 'value': np.ones((10260, 2048)),
+                                        'output_channel_dim': 0, 'input_channel_dim': 1,
+                                        'dims_number': 2},
+                             'fc_1_b': {'shape': np.array([10260]), 'value': np.ones(10260)},
+                             'fc_1_data': {'shape': np.array([1, 10260]), 'is_output': True},
+                             })
+        ref_weights = np.ones((10260, 2048)) * np.array([x for x in range(2048)])
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'fc_1'),
+                                 ('fc_1_w', 'fc_1'),
+                                 ('fc_1_b', 'fc_1'),
+                                 ('fc_1', 'fc_1_data'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 2048])},
+                                 'fc_1_w': {'shape': ref_weights.shape, 'value': ref_weights,
+                                            'output_channel_dim': 0, 'input_channel_dim': 1,
+                                            'dims_number': 2},
+                                 'fc_1_b': {'shape': np.array([10260]), 'value': np.ones(10260)},
+                                 'fc_1_data': {'shape': np.array([1, 10260]), 'is_output': True},
+                                 })
+
+        _fuse_mul(graph, Node(graph, 'mul_1'), [Node(graph, 'fc_1')], backward=False)
+        graph_clean_up(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'fc_1_data')
+        self.assertTrue(flag, resp)
+
+    # Mul(scalar)->Conv(w+b) can_be_fused = False
+    def test_fuse_mul_to_conv_6(self):
+        # Placeholder->Mul->Conv
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'conv_1'),
+                             ('conv_1_w', 'conv_1'),
+                             ('conv_1_b', 'conv_1'),
+                             ('conv_1', 'conv_1_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_w': {'shape': np.array([1]), 'value': 6},
+                             'conv_1': {'can_be_fused': False},
+                             'conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)),
+                                          'output_channel_dim': 3, 'input_channel_dim': 2,
+                                          'dims_number': 4},
+                             'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)},
+                             'conv_1_data': {'is_output': True}
+                             })
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'mul_1'),
+                                 ('mul_1_w', 'mul_1'),
+                                 ('mul_1', 'mul_1_data'),
+                                 ('mul_1_data', 'conv_1'),
+                                 ('conv_1_w', 'conv_1'),
+                                 ('conv_1_b', 'conv_1'),
+                                 ('conv_1', 'conv_1_data'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_w': {'shape': np.array([1]), 'value': 6},
+                                 'conv_1': {'can_be_fused': False},
+                                 'conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)),
+                                              'output_channel_dim': 3, 'input_channel_dim': 2,
+                                              'dims_number': 4},
+                                 'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)},
+                                 'conv_1_data': {'is_output': True}
+                                 })
+
+        _fuse_mul(graph, Node(graph, 'mul_1'), [Node(graph, 'conv_1')], backward=False)
+        graph_clean_up(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'conv_1_data')
+        self.assertTrue(flag, resp)
+
+    # Mul(array)->DWConv(w+b)
+    def test_fuse_mul_to_dwconv_1(self):
+        # Placeholder->Mul->Conv
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'conv_1'),
+                             ('conv_1_w', 'conv_1'),
+                             ('conv_1', 'conv_1_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 112, 112, 6])},
+                             'mul_1_data': {'shape': np.array([1, 112, 112, 6])},
+                             'mul_1_w': {'shape': np.array([6]), 'value': np.array([1, 2, 3, 4, 5, 6])},
+                             'conv_1_w': {'shape': np.array([3, 3, 6, 1]), 'value': np.ones((3, 3, 6, 1)),
+                                          'output_channel_dim': 2, 'input_channel_dim': 2,
+                                          'dims_number': 4},
+                             'conv_1_data': {'is_output': True}
+                             })
+        ref_weights = np.ones((3, 3, 6, 1)) * np.reshape(np.array([1, 2, 3, 4, 5, 6]), (6, 1))
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'conv_1'),
+                                 ('conv_1_w', 'conv_1'),
+                                 ('conv_1', 'conv_1_data'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 112, 112, 6])},
+                                 'conv_1_w': {'shape': ref_weights.shape, 'value': ref_weights,
+                                              'output_channel_dim': 2, 'input_channel_dim': 2,
+                                              'dims_number': 4},
+                                 'conv_1_data': {'is_output': True}
+                                 })
+
+        _fuse_mul(graph, Node(graph, 'mul_1'), [Node(graph, 'conv_1')], backward=False)
+        graph_clean_up(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'conv_1_data')
+        self.assertTrue(flag, resp)
+
+    # DWConv(w)->Mul(scalar)
+    def test_fuse_mul_to_dwconv_2(self):
+        # Placeholder->Conv->Mul
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'conv_1'),
+                             ('conv_1_w', 'conv_1'),
+                             ('conv_1', 'conv_1_data'),
+                             ('conv_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 112, 112, 6])},
+                             'mul_1_data': {'shape': np.array([1, 112, 112, 6]), 'is_output': True},
+                             'mul_1_w': {'shape': np.array([6]), 'value': np.array([1, 2, 3, 4, 5, 6])},
+                             'conv_1_w': {'shape': np.array([3, 3, 6, 1]), 'value': np.ones((3, 3, 6, 1)),
+                                          'output_channel_dim': 2, 'input_channel_dim': 2,
+                                          'dims_number': 4},
+                             'conv_1_data': {'is_output': True}
+                             })
+
+        ref_weights = np.ones((3, 3, 6, 1)) * np.reshape(np.array([1, 2, 3, 4, 5, 6]), (6, 1))
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'conv_1'),
+                                 ('conv_1_w', 'conv_1'),
+                                 ('conv_1', 'conv_1_data'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 112, 112, 6])},
+                                 'conv_1_w': {'shape': ref_weights.shape, 'value': ref_weights,
+                                              'output_channel_dim': 2, 'input_channel_dim': 2,
+                                              'dims_number': 4},
+                                 })
+
+        _fuse_mul(graph, Node(graph, 'mul_1'), [Node(graph, 'conv_1')], backward=True)
+        graph_clean_up(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'mul_1_data', 'conv_1_data')
+        self.assertTrue(flag, resp)
+
+
+# Unit tests for fuse_add
+class FuseAddTests(unittest.TestCase):
+    # Add(array)->FC(w+b)
+    def test_fuse_add_to_fc_1(self):
+        # Placeholder->Add->FC
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'add_1'),
+                             ('add_1_w', 'add_1'),
+                             ('add_1', 'add_1_data'),
+                             ('add_1_data', 'fc_1'),
+                             ('fc_1_w', 'fc_1'),
+                             ('fc_1_b', 'fc_1'),
+                             ('fc_1', 'fc_1_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 2048])},
+                             'add_1_data': {'shape': np.array([1, 2048])},
+                             'add_1_w': {'shape': np.array([2048]), 'value': np.array([x for x in range(2048)])},
+                             'fc_1_w': {'shape': np.array([10260, 2048]), 'value': np.ones((10260, 2048)),
+                                        'output_channel_dim': 0, 'input_channel_dim': 1,
+                                        'dims_number': 2},
+                             'fc_1_b': {'shape': np.array([10260]), 'value': np.ones(10260)},
+                             'fc_1_data': {'shape': np.array([1, 10260]), 'is_output': True},
+                             })
+        ref_weights = np.ones((10260, 2048))
+        ref_biases = np.ones(10260) + np.dot(np.ones((10260, 2048)), np.array([x for x in range(2048)]))
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'fc_1'),
+                                 ('fc_1_w', 'fc_1'),
+                                 ('fc_1_b', 'fc_1'),
+                                 ('fc_1', 'fc_1_data'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 2048])},
+                                 'fc_1_w': {'shape': ref_weights.shape, 'value': ref_weights,
+                                            'output_channel_dim': 0, 'input_channel_dim': 1,
+                                            'dims_number': 2},
+                                 'fc_1_b': {'shape': ref_biases.shape, 'value': ref_biases},
+                                 'fc_1_data': {'shape': np.array([1, 10260]), 'is_output': True},
+                                 })
+
+        _fuse_add(graph, Node(graph, 'add_1'), [Node(graph, 'fc_1')], backward=False)
+        graph_clean_up(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'fc_1_data')
+        self.assertTrue(flag, resp)
+
+    # FC(w)->Add(array)
+    def test_fuse_add_to_fc_2(self):
+        # Placeholder->FC->Add
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'fc_1'),
+                             ('fc_1_w', 'fc_1'),
+                             ('fc_1', 'fc_1_data'),
+                             ('fc_1_data', 'add_1'),
+                             ('add_1_w', 'add_1'),
+                             ('add_1', 'add_1_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 2048])},
+                             'add_1_data': {'shape': np.array([1, 10260]), 'is_output': True},
+                             'add_1_w': {'shape': np.array([10260]), 'value': np.array([x for x in range(10260)]),
+                                         'data_type': None},
+                             'fc_1_w': {'shape': np.array([10260, 2048]), 'value': np.ones((10260, 2048)),
+                                        'output_channel_dim': 0, 'input_channel_dim': 1,
+                                        'dims_number': 2, 'data_type': None},
+                             'fc_1_data': {'shape': np.array([1, 10260])},
+                             })
+
+        ref_weights = np.ones((10260, 2048))
+        ref_biases = np.array([x for x in range(10260)])
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'fc_1'),
+                                 ('fc_1_w', 'fc_1'),
+                                 ('fc_1_b', 'fc_1'),
+                                 ('fc_1', 'fc_1_data'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 2048])},
+                                 'fc_1_w': {'shape': ref_weights.shape, 'value': ref_weights,
+                                            'output_channel_dim': 0, 'input_channel_dim': 1,
+                                            'dims_number': 2},
+                                 'fc_1_b': {'shape': ref_biases.shape, 'value': ref_biases},
+                                 'fc_1_data': {'shape': np.array([1, 10260]), 'is_output': True},
+                                 })
+
+        _fuse_add(graph, Node(graph, 'add_1'), [Node(graph, 'fc_1')], backward=True)
+        graph_clean_up(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'add_1_data', 'fc_1_data')
+        self.assertTrue(flag, resp)
+
+    # FC(w)->Add(scalar)
+    def test_fuse_add_to_fc_3(self):
+        # Placeholder->FC->Add
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'fc_1'),
+                             ('fc_1_w', 'fc_1'),
+                             ('fc_1', 'fc_1_data'),
+                             ('fc_1_data', 'add_1'),
+                             ('add_1_w', 'add_1'),
+                             ('add_1', 'add_1_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 2048])},
+                             'add_1_data': {'shape': np.array([1, 10260]), 'is_output': True},
+                             'add_1_w': {'shape': np.array([1]), 'value': 6, 'data_type': None},
+                             'fc_1_w': {'shape': np.array([10260, 2048]), 'value': np.ones((10260, 2048)),
+                                        'output_channel_dim': 0, 'input_channel_dim': 1,
+                                        'dims_number': 2, 'data_type': None},
+                             'fc_1_data': {'shape': np.array([1, 10260])},
+                             })
+
+        ref_weights = np.ones((10260, 2048))
+        ref_biases = np.full([10260], 6)
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'fc_1'),
+                                 ('fc_1_w', 'fc_1'),
+                                 ('fc_1_b', 'fc_1'),
+                                 ('fc_1', 'fc_1_data'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 2048])},
+                                 'fc_1_w': {'shape': ref_weights.shape, 'value': ref_weights,
+                                            'output_channel_dim': 0, 'input_channel_dim': 1,
+                                            'dims_number': 2},
+                                 'fc_1_b': {'shape': ref_biases.shape, 'value': ref_biases},
+                                 'fc_1_data': {'shape': np.array([1, 10260]), 'is_output': True},
+                                 })
+
+        _fuse_add(graph, Node(graph, 'add_1'), [Node(graph, 'fc_1')], backward=True)
+        graph_clean_up(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'add_1_data', 'fc_1_data')
+        self.assertTrue(flag, resp)
+
+    # Add(array)->FC(w+b) can_be_fused = False
+    def test_fuse_add_to_fc_4(self):
+        # Placeholder->Add->FC
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'add_1'),
+                             ('add_1_w', 'add_1'),
+                             ('add_1', 'add_1_data'),
+                             ('add_1_data', 'fc_1'),
+                             ('fc_1_w', 'fc_1'),
+                             ('fc_1_b', 'fc_1'),
+                             ('fc_1', 'fc_1_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 2048])},
+                             'add_1_data': {'shape': np.array([1, 2048])},
+                             'add_1_w': {'shape': np.array([2048]), 'value': np.array([x for x in range(2048)])},
+                             'fc_1': {'can_be_fused': False},
+                             'fc_1_w': {'shape': np.array([10260, 2048]), 'value': np.ones((10260, 2048)),
+                                        'output_channel_dim': 0, 'input_channel_dim': 1,
+                                        'dims_number': 2},
+                             'fc_1_b': {'shape': np.array([10260]), 'value': np.ones(10260)},
+                             'fc_1_data': {'shape': np.array([1, 10260]), 'is_output': True},
+                             })
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'add_1'),
+                                 ('add_1_w', 'add_1'),
+                                 ('add_1', 'add_1_data'),
+                                 ('add_1_data', 'fc_1'),
+                                 ('fc_1_w', 'fc_1'),
+                                 ('fc_1_b', 'fc_1'),
+                                 ('fc_1', 'fc_1_data'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 2048])},
+                                 'add_1_data': {'shape': np.array([1, 2048])},
+                                 'add_1_w': {'shape': np.array([2048]), 'value': np.array([x for x in range(2048)])},
+                                 'fc_1': {'can_be_fused': False},
+                                 'fc_1_w': {'shape': np.array([10260, 2048]), 'value': np.ones((10260, 2048)),
+                                            'output_channel_dim': 0, 'input_channel_dim': 1,
+                                            'dims_number': 2},
+                                 'fc_1_b': {'shape': np.array([10260]), 'value': np.ones(10260)},
+                                 'fc_1_data': {'shape': np.array([1, 10260]), 'is_output': True},
+                                 })
+
+        _fuse_add(graph, Node(graph, 'add_1'), [Node(graph, 'fc_1')], backward=False)
+        graph_clean_up(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'fc_1_data')
+        self.assertTrue(flag, resp)
+
+
+# Unit tests for fuse_linear_ops
+class FuseLinOpsTests(unittest.TestCase):
+    # Op->Mul(array)-+->Conv(w+b)->Add-+->Concat     Op-+->Conv1-+-->Concat
+    #                |                 |         =>     |        |
+    #                +-->Conv(w+b)-----+                +->Conv2-+
+    def test_fuse_lin_ops_1(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'conv_1'),
+                             ('conv_1_w', 'conv_1'),
+                             ('conv_1_b', 'conv_1'),
+                             ('conv_1', 'conv_1_data'),
+                             ('mul_1_data', 'conv_2'),
+                             ('conv_2_w', 'conv_2'),
+                             ('conv_2_b', 'conv_2'),
+                             ('conv_2', 'conv_2_data'),
+                             ('conv_1_data', 'concat_1'),
+                             ('conv_2_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data')
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                             'conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)),
+                                          'output_channel_dim': 3, 'input_channel_dim': 2,
+                                          'dims_number': 4},
+                             'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)},
+                             'conv_1_data': {'shape': np.array([1, 55, 55, 96])},
+                             'conv_2_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)),
+                                          'output_channel_dim': 3, 'input_channel_dim': 2,
+                                          'dims_number': 4},
+                             'conv_2_b': {'shape': np.array([96]), 'value': np.zeros(96)},
+                             'conv_2_data': {'shape': np.array([1, 55, 55, 96])},
+                             'concat_1_data': {'is_output': True}
+                             })
+        ref_weights = np.ones((11, 11, 3, 96)) * np.reshape(np.array([1, 2, 3]), (3, 1))
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'conv_1'),
+                                 ('conv_1_w', 'conv_1'),
+                                 ('conv_1_b', 'conv_1'),
+                                 ('conv_1', 'conv_1_data'),
+                                 ('placeholder_1_data', 'conv_2'),
+                                 ('conv_2_w', 'conv_2'),
+                                 ('conv_2_b', 'conv_2'),
+                                 ('conv_2', 'conv_2_data'),
+                                 ('conv_1_data', 'concat_1'),
+                                 ('conv_2_data', 'concat_1'),
+                                 ('concat_1', 'concat_1_data')
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'conv_1_w': {'shape': ref_weights.shape, 'value': ref_weights,
+                                              'output_channel_dim': 3, 'input_channel_dim': 2,
+                                              'dims_number': 4},
+                                 'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)},
+                                 'conv_1_data': {'shape': np.array([1, 55, 55, 96])},
+                                 'conv_2_w': {'shape': ref_weights.shape, 'value': ref_weights,
+                                              'output_channel_dim': 3, 'input_channel_dim': 2,
+                                              'dims_number': 4},
+                                 'conv_2_b': {'shape': np.array([96]), 'value': np.zeros(96)},
+                                 'conv_2_data': {'shape': np.array([1, 55, 55, 96])},
+                                 })
+
+        fuse_linear_ops(graph)
+        graph_clean_up(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data')
+        self.assertTrue(flag, resp)
+
+    # Mul(array)->FC(w+b)
+    def test_fuse_mul_to_fc_1(self):
+        # Placeholder->Mul->FC
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'fc_1'),
+                             ('fc_1_w', 'fc_1'),
+                             ('fc_1_b', 'fc_1'),
+                             ('fc_1', 'fc_1_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 2048])},
+                             'mul_1_data': {'shape': np.array([1, 2048])},
+                             'mul_1_w': {'shape': np.array([2048]), 'value': np.array([x for x in range(2048)])},
+                             'fc_1_w': {'shape': np.array([10260, 2048]), 'value': np.ones((10260, 2048)),
+                                        'output_channel_dim': 0, 'input_channel_dim': 1,
+                                        'dims_number': 2},
+                             'fc_1_b': {'shape': np.array([10260]), 'value': np.ones(10260)},
+                             'fc_1_data': {'shape': np.array([1, 10260]), 'is_output': True},
+                             })
+        ref_weights = np.ones((10260, 2048)) * np.array([x for x in range(2048)])
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'fc_1'),
+                                 ('fc_1_w', 'fc_1'),
+                                 ('fc_1_b', 'fc_1'),
+                                 ('fc_1', 'fc_1_data'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 2048])},
+                                 'fc_1_w': {'shape': ref_weights.shape, 'value': ref_weights,
+                                            'output_channel_dim': 0, 'input_channel_dim': 1,
+                                            'dims_number': 2},
+                                 'fc_1_b': {'shape': np.array([10260]), 'value': np.ones(10260)},
+                                 'fc_1_data': {'shape': np.array([1, 10260]), 'is_output': True},
+                                 })
+
+        fuse_linear_ops(graph)
+        graph_clean_up(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'fc_1_data')
+        self.assertTrue(flag, resp)
+
+    # FC(w)->Add(scalar)
+    def test_fuse_add_to_fc_3(self):
+        # Placeholder->FC->Add
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'fc_1'),
+                             ('fc_1_w', 'fc_1'),
+                             ('fc_1', 'fc_1_data'),
+                             ('fc_1_data', 'add_1'),
+                             ('add_1_w', 'add_1'),
+                             ('add_1', 'add_1_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 2048])},
+                             'add_1_data': {'shape': np.array([1, 10260]), 'is_output': True},
+                             'add_1_w': {'shape': np.array([1]), 'value': np.array([6]), 'data_type': None},
+                             'fc_1_w': {'shape': np.array([10260, 2048]), 'value': np.ones((10260, 2048)),
+                                        'output_channel_dim': 0, 'input_channel_dim': 1,
+                                        'dims_number': 2, 'data_type': None},
+                             'fc_1_data': {'shape': np.array([1, 10260])},
+                             })
+
+        ref_weights = np.ones((10260, 2048))
+        ref_biases = np.array([6 for x in range(10260)])
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'fc_1'),
+                                 ('fc_1_w', 'fc_1'),
+                                 ('fc_1_b', 'fc_1'),
+                                 ('fc_1', 'fc_1_data'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 2048])},
+                                 'fc_1_w': {'shape': ref_weights.shape, 'value': ref_weights,
+                                            'output_channel_dim': 0, 'input_channel_dim': 1,
+                                            'dims_number': 2},
+                                 'fc_1_b': {'shape': ref_biases.shape, 'value': ref_biases},
+                                 'fc_1_data': {'shape': np.array([1, 10260]), 'is_output': True},
+                                 })
+
+        fuse_linear_ops(graph)
+        graph_clean_up(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'add_1_data', 'fc_1_data')
+        self.assertTrue(flag, resp)
+
+    #                 +-----------+
+    #                 |           |           =>  Same
+    # Placeholder--->Add->Mul-----+->Concat
+    def test_fuse_lin_op_1(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1_data', 'conv_1'),
+                             ('conv_1', 'conv_1_data'),
+                             ('conv_1_w', 'conv_1'),
+                             ('conv_1_b', 'conv_1'),
+                             ('conv_1_data', 'add_1'),
+                             ('add_1_w', 'add_1'),
+                             ('add_1', 'add_1_data'),
+                             ('concat_1', 'concat_1_data'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('add_1_data', 'concat_1'),
+                             ('mul_1_data', 'concat_1'),
+                             ('add_1_data', 'mul_1')],
+
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'conv_1_w': {'shape': np.array([1, 1, 3, 3]), 'value': np.zeros((1, 1, 3, 3)),
+                                          'output_channel_dim': 3, 'input_channel_dim': 2,
+                                          'dims_number': 4},
+                             'conv_1_b': {'shape': np.array([3]), 'value': np.zeros(3)},
+                             'conv_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'add_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_w': {'shape': np.array([1]), 'value': np.array([6])},
+                             'add_1_w': {'shape': np.array([1]), 'value': np.array([1])},
+                             'concat_1_data': {'is_output': True}
+                             })
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1_data', 'conv_1'),
+                                 ('conv_1', 'conv_1_data'),
+                                 ('conv_1_w', 'conv_1'),
+                                 ('conv_1_b', 'conv_1'),
+                                 ('conv_1_data', 'concat_1'),
+                                 ('mul_1_w', 'mul_1'),
+                                 ('conv_1_data', 'mul_1'),
+                                 ('concat_1', 'concat_1_data'),
+                                 ('mul_1', 'mul_1_data'),
+                                 ('mul_1_data', 'concat_1')],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'conv_1_w': {'shape': np.array([1, 1, 3, 3]), 'value': np.zeros((1, 1, 3, 3)),
+                                              'output_channel_dim': 3, 'input_channel_dim': 2,
+                                              'dims_number': 4},
+                                 'conv_1_b': {'shape': np.array([3]), 'value': np.ones(3)},
+                                 'conv_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_w': {'shape': np.array([1]), 'value': np.array([6])},
+                                 'concat_1_data': {'is_output': True}
+                                 })
+
+        fuse_linear_ops(graph)
+        graph_clean_up(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data')
+        self.assertTrue(flag, resp)
+
+    # Op->Mul(array)-+->Conv(w+b)------+->Concat
+    #                |                 |         =>  Same('can_be_fused': False)
+    #                +-->Conv(w+b)-----+
+    def test_fuse_lin_ops_2(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'conv_1'),
+                             ('conv_1_w', 'conv_1'),
+                             ('conv_1_b', 'conv_1'),
+                             ('conv_1', 'conv_1_data'),
+                             ('mul_1_data', 'conv_2'),
+                             ('conv_2_w', 'conv_2'),
+                             ('conv_2_b', 'conv_2'),
+                             ('conv_2', 'conv_2_data'),
+                             ('conv_1_data', 'concat_1'),
+                             ('conv_2_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data')
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                             'conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)),
+                                          'output_channel_dim': 3, 'input_channel_dim': 2,
+                                          'dims_number': 4},
+                             'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)},
+                             'conv_1_data': {'shape': np.array([1, 55, 55, 96])},
+                             'conv_2': {'can_be_fused': False},
+                             'conv_2_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)),
+                                          'output_channel_dim': 3, 'input_channel_dim': 2,
+                                          'dims_number': 4},
+                             'conv_2_b': {'shape': np.array([96]), 'value': np.zeros(96)},
+                             'conv_2_data': {'shape': np.array([1, 55, 55, 96])},
+                             'concat_1_data': {'is_output': True}
+                             })
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'mul_1'),
+                                 ('mul_1_w', 'mul_1'),
+                                 ('mul_1', 'mul_1_data'),
+                                 ('mul_1_data', 'conv_1'),
+                                 ('conv_1_w', 'conv_1'),
+                                 ('conv_1_b', 'conv_1'),
+                                 ('conv_1', 'conv_1_data'),
+                                 ('mul_1_data', 'conv_2'),
+                                 ('conv_2_w', 'conv_2'),
+                                 ('conv_2_b', 'conv_2'),
+                                 ('conv_2', 'conv_2_data'),
+                                 ('conv_1_data', 'concat_1'),
+                                 ('conv_2_data', 'concat_1'),
+                                 ('concat_1', 'concat_1_data')
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                                 'conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)),
+                                              'output_channel_dim': 3, 'input_channel_dim': 2,
+                                              'dims_number': 4},
+                                 'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)},
+                                 'conv_1_data': {'shape': np.array([1, 55, 55, 96])},
+                                 'conv_2': {'can_be_fused': False},
+                                 'conv_2_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)),
+                                              'output_channel_dim': 3, 'input_channel_dim': 2,
+                                              'dims_number': 4},
+                                 'conv_2_b': {'shape': np.array([96]), 'value': np.zeros(96)},
+                                 'conv_2_data': {'shape': np.array([1, 55, 55, 96])},
+                                 'concat_1_data': {'is_output': True}
+                                 })
+
+        fuse_linear_ops(graph)
+        graph_clean_up(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data')
+        self.assertTrue(flag, resp)
+
+    # Op->Mul(array)-+->Conv(w+b)------+->Concat
+    #                |                 |         =>  Same('can_be_fused': False)
+    #                +-->Conv(w+b)-----+
+    def test_fuse_lin_ops_3(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'conv_1'),
+                             ('conv_1_w', 'conv_1'),
+                             ('conv_1_b', 'conv_1'),
+                             ('conv_1', 'conv_1_data'),
+                             ('mul_1_data', 'conv_2'),
+                             ('conv_2_w', 'conv_2'),
+                             ('conv_2_b', 'conv_2'),
+                             ('conv_2', 'conv_2_data'),
+                             ('conv_1_data', 'concat_1'),
+                             ('conv_2_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data')
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1': {'can_be_fused': False},
+                             'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                             'conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)),
+                                          'output_channel_dim': 3, 'input_channel_dim': 2,
+                                          'dims_number': 4},
+                             'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)},
+                             'conv_1_data': {'shape': np.array([1, 55, 55, 96])},
+                             'conv_2_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)),
+                                          'output_channel_dim': 3, 'input_channel_dim': 2,
+                                          'dims_number': 4},
+                             'conv_2_b': {'shape': np.array([96]), 'value': np.zeros(96)},
+                             'conv_2_data': {'shape': np.array([1, 55, 55, 96])},
+                             'concat_1_data': {'is_output': True}
+                             })
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'mul_1'),
+                                 ('mul_1_w', 'mul_1'),
+                                 ('mul_1', 'mul_1_data'),
+                                 ('mul_1_data', 'conv_1'),
+                                 ('conv_1_w', 'conv_1'),
+                                 ('conv_1_b', 'conv_1'),
+                                 ('conv_1', 'conv_1_data'),
+                                 ('mul_1_data', 'conv_2'),
+                                 ('conv_2_w', 'conv_2'),
+                                 ('conv_2_b', 'conv_2'),
+                                 ('conv_2', 'conv_2_data'),
+                                 ('conv_1_data', 'concat_1'),
+                                 ('conv_2_data', 'concat_1'),
+                                 ('concat_1', 'concat_1_data')
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1': {'can_be_fused': False},
+                                 'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                                 'conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)),
+                                              'output_channel_dim': 3, 'input_channel_dim': 2,
+                                              'dims_number': 4},
+                                 'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)},
+                                 'conv_1_data': {'shape': np.array([1, 55, 55, 96])},
+                                 'conv_2_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)),
+                                              'output_channel_dim': 3, 'input_channel_dim': 2,
+                                              'dims_number': 4},
+                                 'conv_2_b': {'shape': np.array([96]), 'value': np.zeros(96)},
+                                 'conv_2_data': {'shape': np.array([1, 55, 55, 96])},
+                                 'concat_1_data': {'is_output': True}
+                                 })
+
+        fuse_linear_ops(graph)
+        graph_clean_up(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data')
+        self.assertTrue(flag, resp)
diff --git a/model-optimizer/mo/middle/passes/fusing/fuse_linear_seq_test.py b/model-optimizer/mo/middle/passes/fusing/fuse_linear_seq_test.py
new file mode 100644 (file)
index 0000000..d320b57
--- /dev/null
@@ -0,0 +1,908 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.middle.passes.fusing.fuse_linear_seq import fuse_mul_add_sequence
+from mo.utils.unittest.graph import build_graph, compare_graphs
+
+nodes_attributes = {
+    'placeholder_1': {'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+    'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    # ScaleShift layer
+    'scaleshift_1': {'type': 'ScaleShift', 'kind': 'op', 'op': 'ScaleShift'},
+    'scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'data'},
+    'scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'data'},
+    'scaleshift_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # Mul and Add operations
+    'mul_1': {'type': 'Mul', 'kind': 'op', 'op': 'Mul', 'can_be_fused': True},
+    'mul_1_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'mul_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'add_1': {'type': 'Add', 'kind': 'op', 'op': 'Add', 'can_be_fused': True},
+    'add_1_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'add_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    # Mul2 and Add2 operations
+    'mul_2': {'type': 'Mul', 'kind': 'op', 'op': 'Mul', 'can_be_fused': True},
+    'mul_2_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'mul_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'add_2': {'type': 'Add', 'kind': 'op', 'op': 'Add', 'can_be_fused': True},
+    'add_2_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'add_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    # Mul3 and Add3 operations
+    'mul_3': {'type': 'Mul', 'kind': 'op', 'op': 'Mul', 'can_be_fused': True},
+    'mul_3_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'mul_3_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'add_3': {'type': 'Add', 'kind': 'op', 'op': 'Add', 'can_be_fused': True},
+    'add_3_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'add_3_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    # Mul4 and Add4 operations
+    'mul_4': {'type': 'Mul', 'kind': 'op', 'op': 'Mul', 'can_be_fused': True},
+    'mul_4_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'mul_4_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'add_4': {'type': 'Add', 'kind': 'op', 'op': 'Add', 'can_be_fused': True},
+    'add_4_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'add_4_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    # Concat1 operation
+    'concat_1': {'type': 'Concat', 'kind': 'op', 'op': 'Concat'},
+    'concat_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # Convolutions
+    'conv_1': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NHWC'},
+    'conv_1_w': {'value': None, 'shape': None, 'kind': 'data'},
+    'conv_1_b': {'value': None, 'shape': None, 'kind': 'data'},
+    'conv_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+    'conv_2': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NHWC'},
+    'conv_2_w': {'value': None, 'shape': None, 'kind': 'data'},
+    'conv_2_b': {'value': None, 'shape': None, 'kind': 'data'},
+    'conv_2_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # FullyConnected
+    'fc_1': {'type': 'FullyConnected', 'kind': 'op', 'op': 'InnerProduct', 'layout': 'NHWC'},
+    'fc_1_w': {'value': None, 'shape': None, 'kind': 'data'},
+    'fc_1_b': {'value': None, 'shape': None, 'kind': 'data'},
+    'fc_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # Placeholders
+    'placeholder_2': {'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+    'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'placeholder_3': {'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+    'placeholder_3_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+}
+
+
+# Unit tests for fuse_mul_add_sequence
+class LinSeqFusingTests(unittest.TestCase):
+    # Placeholder-+->Mul->Add->Mul-+->Concat
+    #             |                |
+    #             +----------------+
+    def test_fuse_lin_seq_1(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'add_1'),
+                             ('add_1_w', 'add_1'),
+                             ('add_1', 'add_1_data'),
+                             ('add_1_data', 'mul_2'),
+                             ('mul_2_w', 'mul_2'),
+                             ('mul_2', 'mul_2_data'),
+                             ('mul_2_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data'),
+                             ('placeholder_1_data', 'concat_1'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'add_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_2_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_w': {'shape': np.array([1]), 'value': 6},
+                             'add_1_w': {'shape': np.array([1]), 'value': 6},
+                             'mul_2_w': {'shape': np.array([1]), 'value': 6},
+                             'concat_1_data': {'is_output': True}
+                             },
+                            nodes_with_edges_only=True)
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'mul_1'),
+                                 ('mul_1_w', 'mul_1'),
+                                 ('mul_1', 'mul_1_data'),
+                                 ('mul_1_data', 'add_1'),
+                                 ('add_1_w', 'add_1'),
+                                 ('add_1', 'add_1_data'),
+                                 ('add_1_data', 'concat_1'),
+                                 ('concat_1', 'concat_1_data'),
+                                 ('placeholder_1_data', 'concat_1'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'add_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_w': {'shape': np.array([1]), 'value': np.array([36])},
+                                 'add_1_w': {'shape': np.array([1]), 'value': np.array([36])},
+                                 'mul_1': {'can_be_fused': True},
+                                 'add_1': {'can_be_fused': True},
+                                 'concat_1_data': {'is_output': True}
+                                 },
+                                nodes_with_edges_only=True)
+
+        graph.graph['layout'] = 'NHWC'
+        fuse_mul_add_sequence(graph)
+        self.assertTrue(len(graph.node) == len(graph_ref.node),
+                        "Graphs has different number of nodes: {} and {}".format(len(graph.node), len(graph_ref.node)))
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data')
+        self.assertTrue(flag, resp)
+
+    #             +----------------+
+    #             |                |
+    # Placeholder-+->Mul->Add->Mul-+---------------+->Concat
+    #                           |                  |
+    #                           +-->Placeholder----+
+    def test_fuse_lin_seq_2(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'add_1'),
+                             ('add_1_w', 'add_1'),
+                             ('add_1', 'add_1_data'),
+                             ('add_1_data', 'mul_2'),
+                             ('mul_2_w', 'mul_2'),
+                             ('mul_2', 'mul_2_data'),
+                             ('mul_2_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data'),
+                             ('placeholder_1_data', 'concat_1'),
+                             ('mul_2_data', 'placeholder_2'),
+                             ('placeholder_2', 'placeholder_2_data'),
+                             ('placeholder_2_data', 'concat_1')
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'placeholder_2_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'add_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_2_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_w': {'shape': np.array([1]), 'value': 6},
+                             'add_1_w': {'shape': np.array([1]), 'value': 6},
+                             'mul_2_w': {'shape': np.array([1]), 'value': 6},
+                             'concat_1_data': {'is_output': True}
+                             },
+                            nodes_with_edges_only=True)
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'mul_1'),
+                                 ('mul_1_w', 'mul_1'),
+                                 ('mul_1', 'mul_1_data'),
+                                 ('mul_1_data', 'add_1'),
+                                 ('add_1_w', 'add_1'),
+                                 ('add_1', 'add_1_data'),
+                                 ('add_1_data', 'concat_1'),
+                                 ('concat_1', 'concat_1_data'),
+                                 ('placeholder_1_data', 'concat_1'),
+                                 ('add_1_data', 'placeholder_2'),
+                                 ('placeholder_2', 'placeholder_2_data'),
+                                 ('placeholder_2_data', 'concat_1')
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'placeholder_2_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'add_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_w': {'shape': np.array([1]), 'value': np.array([36])},
+                                 'add_1_w': {'shape': np.array([1]), 'value': np.array([36])},
+                                 'mul_1': {'can_be_fused': True},
+                                 'add_1': {'can_be_fused': True},
+                                 'concat_1_data': {'is_output': True}
+                                 },
+                                nodes_with_edges_only=True)
+        graph.graph['layout'] = 'NHWC'
+        fuse_mul_add_sequence(graph)
+        self.assertTrue(len(graph.node) == len(graph_ref.node),
+                        "Graphs has different number of nodes: {} and {}".format(len(graph.node), len(graph_ref.node)))
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data')
+        self.assertTrue(flag, resp)
+
+    #                      +----->Placeholder
+    #                      |        |          =>  The same graph
+    # Placeholder--->Mul->Add->Mul--+->Concat
+    def test_fuse_lin_seq_3(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'add_1'),
+                             ('add_1_w', 'add_1'),
+                             ('add_1', 'add_1_data'),
+                             ('add_1_data', 'mul_2'),
+                             ('mul_2_w', 'mul_2'),
+                             ('mul_2', 'mul_2_data'),
+                             ('mul_2_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data'),
+                             ('add_1_data', 'placeholder_2'),
+                             ('placeholder_2', 'placeholder_2_data'),
+                             ('placeholder_2_data', 'concat_1')
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'placeholder_2_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'add_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_2_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_w': {'shape': np.array([1]), 'value': 6},
+                             'add_1_w': {'shape': np.array([1]), 'value': 6},
+                             'mul_2_w': {'shape': np.array([1]), 'value': 6},
+                             'concat_1_data': {'is_output': True}
+                             },
+                            nodes_with_edges_only=True)
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'mul_1'),
+                                 ('mul_1_w', 'mul_1'),
+                                 ('mul_1', 'mul_1_data'),
+                                 ('mul_1_data', 'add_1'),
+                                 ('add_1_w', 'add_1'),
+                                 ('add_1', 'add_1_data'),
+                                 ('add_1_data', 'mul_2'),
+                                 ('mul_2_w', 'mul_2'),
+                                 ('mul_2', 'mul_2_data'),
+                                 ('mul_2_data', 'concat_1'),
+                                 ('concat_1', 'concat_1_data'),
+                                 ('add_1_data', 'placeholder_2'),
+                                 ('placeholder_2', 'placeholder_2_data'),
+                                 ('placeholder_2_data', 'concat_1')
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'placeholder_2_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'add_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_2_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_w': {'shape': np.array([1]), 'value': 6},
+                                 'add_1_w': {'shape': np.array([1]), 'value': 6},
+                                 'mul_2_w': {'shape': np.array([1]), 'value': 6},
+                                 'concat_1_data': {'is_output': True}
+                                 },
+                                nodes_with_edges_only=True)
+
+        fuse_mul_add_sequence(graph)
+        self.assertTrue(len(graph.node) == len(graph_ref.node),
+                        "Graphs has different number of nodes: {} and {}".format(len(graph.node), len(graph_ref.node)))
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data')
+        self.assertTrue(flag, resp)
+
+    #                 +-------->Placeholder                          +-------->Placeholder
+    #                 |            |           =>                    |            |
+    # Placeholder--->Mul->Add->Mul-+->Concat         Placeholder-+->Mul->Mul->Add-+->Concat
+    def test_fuse_lin_seq_4(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'add_1'),
+                             ('add_1_w', 'add_1'),
+                             ('add_1', 'add_1_data'),
+                             ('add_1_data', 'mul_2'),
+                             ('mul_2_w', 'mul_2'),
+                             ('mul_2', 'mul_2_data'),
+                             ('mul_2_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data'),
+                             ('mul_1_data', 'placeholder_2'),
+                             ('placeholder_2', 'placeholder_2_data'),
+                             ('placeholder_2_data', 'concat_1')
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'placeholder_2_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'add_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_2_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_w': {'shape': np.array([1]), 'value': 6},
+                             'add_1_w': {'shape': np.array([1]), 'value': 6},
+                             'mul_2_w': {'shape': np.array([1]), 'value': 6},
+                             'concat_1_data': {'is_output': True}
+                             },
+                            nodes_with_edges_only=True)
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'mul_1'),
+                                 ('mul_1_w', 'mul_1'),
+                                 ('mul_1', 'mul_1_data'),
+                                 ('mul_1_data', 'mul_2'),
+                                 ('mul_2_w', 'mul_2'),
+                                 ('mul_2', 'mul_2_data'),
+                                 ('mul_2_data', 'add_1'),
+                                 ('add_1_w', 'add_1'),
+                                 ('add_1', 'add_1_data'),
+                                 ('add_1_data', 'concat_1'),
+                                 ('concat_1', 'concat_1_data'),
+                                 ('mul_1_data', 'placeholder_2'),
+                                 ('placeholder_2', 'placeholder_2_data'),
+                                 ('placeholder_2_data', 'concat_1')
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'placeholder_2_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'add_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_2_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_w': {'shape': np.array([1]), 'value': 6},
+                                 'add_1_w': {'shape': np.array([1]), 'value': np.array([36])},
+                                 'mul_2_w': {'shape': np.array([1]), 'value': np.array([6])},
+                                 'concat_1_data': {'is_output': True}
+                                 },
+                                nodes_with_edges_only=True)
+
+        graph.graph['layout'] = 'NHWC'
+        fuse_mul_add_sequence(graph)
+        self.assertTrue(len(graph.node) == len(graph_ref.node),
+                        "Graphs has different number of nodes: {} and {}".format(len(graph.node), len(graph_ref.node)))
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data')
+        self.assertTrue(flag, resp)
+
+    #                 +-------->Placeholder                          +->Placeholder
+    #                 |            |           =>                    |            |
+    # Placeholder--->Mul->Add->Mul-+->Concat         Placeholder--->Mul-----------+->Concat
+    def test_fuse_lin_seq_5(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'add_1'),
+                             ('add_1_w', 'add_1'),
+                             ('add_1', 'add_1_data'),
+                             ('add_1_data', 'mul_2'),
+                             ('mul_2_w', 'mul_2'),
+                             ('mul_2', 'mul_2_data'),
+                             ('mul_2_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data'),
+                             ('mul_1_data', 'placeholder_2'),
+                             ('placeholder_2', 'placeholder_2_data'),
+                             ('placeholder_2_data', 'concat_1')
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'placeholder_2_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'add_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_2_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_w': {'shape': np.array([1]), 'value': 6},
+                             'add_1_w': {'shape': np.array([1]), 'value': 0},
+                             'mul_2_w': {'shape': np.array([1]), 'value': 1},
+                             'concat_1_data': {'is_output': True}
+                             },
+                            nodes_with_edges_only=True)
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'mul_1'),
+                                 ('mul_1_w', 'mul_1'),
+                                 ('mul_1', 'mul_1_data'),
+                                 ('mul_1_data', 'concat_1'),
+                                 ('concat_1', 'concat_1_data'),
+                                 ('mul_1_data', 'placeholder_2'),
+                                 ('placeholder_2', 'placeholder_2_data'),
+                                 ('placeholder_2_data', 'concat_1')
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'placeholder_2_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_w': {'shape': np.array([1]), 'value': 6},
+                                 'concat_1_data': {'is_output': True}
+                                 },
+                                nodes_with_edges_only=True)
+
+        graph.graph['layout'] = 'NHWC'
+        fuse_mul_add_sequence(graph)
+
+        self.assertTrue(len(graph.node) == len(graph_ref.node),
+                        "Graphs has different number of nodes: {} and {}".format(len(graph.node), len(graph_ref.node)))
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data')
+        self.assertTrue(flag, resp)
+
+    #                 +-------->Placeholder                          +->Placeholder
+    #                 |            |           =>                    |            |
+    # Placeholder--->Mul->Add->Mul-+->Concat         Placeholder--->Mul-->Add-----+->Concat
+    def test_fuse_lin_seq_6(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'add_1'),
+                             ('add_1_w', 'add_1'),
+                             ('add_1', 'add_1_data'),
+                             ('add_1_data', 'mul_2'),
+                             ('mul_2_w', 'mul_2'),
+                             ('mul_2', 'mul_2_data'),
+                             ('mul_2_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data'),
+                             ('mul_1_data', 'placeholder_2'),
+                             ('placeholder_2', 'placeholder_2_data'),
+                             ('placeholder_2_data', 'concat_1')
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'placeholder_2_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'add_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_2_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_w': {'shape': np.array([1]), 'value': 6},
+                             'add_1_w': {'shape': np.array([1]), 'value': 6},
+                             'mul_2_w': {'shape': np.array([1]), 'value': 1},
+                             'concat_1_data': {'is_output': True}
+                             },
+                            nodes_with_edges_only=True)
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'mul_1'),
+                                 ('mul_1_w', 'mul_1'),
+                                 ('mul_1', 'mul_1_data'),
+                                 ('mul_1_data', 'add_1'),
+                                 ('add_1_w', 'add_1'),
+                                 ('add_1', 'add_1_data'),
+                                 ('add_1_data', 'concat_1'),
+                                 ('concat_1', 'concat_1_data'),
+                                 ('mul_1_data', 'placeholder_2'),
+                                 ('placeholder_2', 'placeholder_2_data'),
+                                 ('placeholder_2_data', 'concat_1')
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'placeholder_2_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_w': {'shape': np.array([1]), 'value': 6},
+                                 'add_1_w': {'shape': np.array([1]), 'value': np.array([6])},
+                                 'concat_1_data': {'is_output': True}
+                                 },
+                                nodes_with_edges_only=True)
+
+        graph.graph['layout'] = 'NHWC'
+        fuse_mul_add_sequence(graph)
+        self.assertTrue(len(graph.node) == len(graph_ref.node),
+                        "Graphs has different number of nodes: {} and {}".format(len(graph.node), len(graph_ref.node)))
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data')
+        self.assertTrue(flag, resp)
+
+    #                 +-------->Placeholder                          +->Placeholder
+    #                 |            |           =>                    |            |
+    # Placeholder--->Mul->Add->Mul-+->Concat         Placeholder--->Mul-->Mul-----+->Concat
+    def test_fuse_lin_seq_7(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'add_1'),
+                             ('add_1_w', 'add_1'),
+                             ('add_1', 'add_1_data'),
+                             ('add_1_data', 'mul_2'),
+                             ('mul_2_w', 'mul_2'),
+                             ('mul_2', 'mul_2_data'),
+                             ('mul_2_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data'),
+                             ('mul_1_data', 'placeholder_2'),
+                             ('placeholder_2', 'placeholder_2_data'),
+                             ('placeholder_2_data', 'concat_1')
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'placeholder_2_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'add_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_2_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_w': {'shape': np.array([1]), 'value': 6},
+                             'add_1_w': {'shape': np.array([1]), 'value': 0},
+                             'mul_2_w': {'shape': np.array([1]), 'value': 6},
+                             'concat_1_data': {'is_output': True}
+                             },
+                            nodes_with_edges_only=True)
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'mul_1'),
+                                 ('mul_1_w', 'mul_1'),
+                                 ('mul_1', 'mul_1_data'),
+                                 ('mul_1_data', 'mul_2'),
+                                 ('mul_2_w', 'mul_2'),
+                                 ('mul_2', 'mul_2_data'),
+                                 ('mul_2_data', 'concat_1'),
+                                 ('concat_1', 'concat_1_data'),
+                                 ('mul_1_data', 'placeholder_2'),
+                                 ('placeholder_2', 'placeholder_2_data'),
+                                 ('placeholder_2_data', 'concat_1')
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'placeholder_2_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_w': {'shape': np.array([1]), 'value': 6},
+                                 'mul_2_w': {'shape': np.array([1]), 'value': np.array([6])},
+                                 'concat_1_data': {'is_output': True}
+                                 },
+                                nodes_with_edges_only=True)
+
+        graph.graph['layout'] = 'NHWC'
+        fuse_mul_add_sequence(graph)
+        self.assertTrue(len(graph.node) == len(graph_ref.node),
+                        "Graphs has different number of nodes: {} and {}".format(len(graph.node), len(graph_ref.node)))
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data')
+        self.assertTrue(flag, resp)
+
+    # Placeholder--->Mul->Add->Mul-+->Concat         Placeholder->Concat
+    def test_fuse_lin_seq_8(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'add_1'),
+                             ('add_1_w', 'add_1'),
+                             ('add_1', 'add_1_data'),
+                             ('add_1_data', 'mul_2'),
+                             ('mul_2_w', 'mul_2'),
+                             ('mul_2', 'mul_2_data'),
+                             ('mul_2_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'add_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_2_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_w': {'shape': np.array([1]), 'value': 1},
+                             'add_1_w': {'shape': np.array([1]), 'value': 0},
+                             'mul_2_w': {'shape': np.array([1]), 'value': 1},
+                             'concat_1_data': {'is_output': True}
+                             },
+                            nodes_with_edges_only=True)
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'concat_1'),
+                                 ('concat_1', 'concat_1_data'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'concat_1_data': {'is_output': True}
+                                 },
+                                nodes_with_edges_only=True)
+
+        graph.graph['layout'] = 'NHWC'
+        fuse_mul_add_sequence(graph)
+        self.assertTrue(len(graph.node) == len(graph_ref.node),
+                        "Graphs has different number of nodes: {} and {}".format(len(graph.node), len(graph_ref.node)))
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data')
+        self.assertTrue(flag, resp)
+
+    # Placeholder--->Mul->Add->Mul-+->Concat         Placeholder->Mul->Add->Concat
+    def test_fuse_lin_seq_9(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'add_1'),
+                             ('add_1_w', 'add_1'),
+                             ('add_1', 'add_1_data'),
+                             ('add_1_data', 'mul_2'),
+                             ('mul_2_w', 'mul_2'),
+                             ('mul_2', 'mul_2_data'),
+                             ('mul_2_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'add_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_2_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_w': {'shape': np.array([1]), 'value': 6},
+                             'add_1_w': {'shape': np.array([1]), 'value': 6},
+                             'mul_2_w': {'shape': np.array([1]), 'value': 6},
+                             'concat_1_data': {'is_output': True}
+                             },
+                            nodes_with_edges_only=True)
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'mul_1'),
+                                 ('mul_1_w', 'mul_1'),
+                                 ('mul_1', 'mul_1_data'),
+                                 ('mul_1_data', 'add_1'),
+                                 ('add_1_w', 'add_1'),
+                                 ('add_1', 'add_1_data'),
+                                 ('add_1_data', 'concat_1'),
+                                 ('concat_1', 'concat_1_data'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'add_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_w': {'shape': np.array([1]), 'value': np.array([36])},
+                                 'add_1_w': {'shape': np.array([1]), 'value': np.array([36])},
+                                 'concat_1_data': {'is_output': True}
+                                 },
+                                nodes_with_edges_only=True)
+
+        graph.graph['layout'] = 'NHWC'
+        fuse_mul_add_sequence(graph)
+        self.assertTrue(len(graph.node) == len(graph_ref.node),
+                        "Graphs has different number of nodes: {} and {}".format(len(graph.node), len(graph_ref.node)))
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data')
+        self.assertTrue(flag, resp)
+
+    # Placeholder--->Mul->Add->Mul-+->Concat         Placeholder->Mul->Add->Concat
+    def test_fuse_lin_seq_10(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'add_1'),
+                             ('add_1_w', 'add_1'),
+                             ('add_1', 'add_1_data'),
+                             ('add_1_data', 'mul_2'),
+                             ('mul_2_w', 'mul_2'),
+                             ('mul_2', 'mul_2_data'),
+                             ('mul_2_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'add_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_2_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_w': {'shape': np.array([1]), 'value': 6},
+                             'add_1_w': {'shape': np.array([3]), 'value': np.array([6, 6, 6])},
+                             'mul_2_w': {'shape': np.array([1]), 'value': 6},
+                             'concat_1_data': {'is_output': True}
+                             },
+                            nodes_with_edges_only=True)
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'mul_1'),
+                                 ('mul_1_w', 'mul_1'),
+                                 ('mul_1', 'mul_1_data'),
+                                 ('mul_1_data', 'add_1'),
+                                 ('add_1_w', 'add_1'),
+                                 ('add_1', 'add_1_data'),
+                                 ('add_1_data', 'concat_1'),
+                                 ('concat_1', 'concat_1_data'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'add_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_w': {'shape': np.array([3]), 'value': np.array([36, 36, 36])},
+                                 'add_1_w': {'shape': np.array([3]), 'value': np.array([36, 36, 36])},
+                                 'concat_1_data': {'is_output': True}
+                                 },
+                                nodes_with_edges_only=True)
+
+        graph.graph['layout'] = 'NHWC'
+        fuse_mul_add_sequence(graph)
+        self.assertTrue(len(graph.node) == len(graph_ref.node),
+                        "Graphs has different number of nodes: {} and {}".format(len(graph.node), len(graph_ref.node)))
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data')
+        self.assertTrue(flag, resp)
+
+    # Placeholder-+->Mul->Add->Mul-+->Concat
+    #             |                |            With 'can_be_fused' = False
+    #             +----------------+
+    def test_fuse_lin_seq_11(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'add_1'),
+                             ('add_1_w', 'add_1'),
+                             ('add_1', 'add_1_data'),
+                             ('add_1_data', 'mul_2'),
+                             ('mul_2_w', 'mul_2'),
+                             ('mul_2', 'mul_2_data'),
+                             ('mul_2_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data'),
+                             ('placeholder_1_data', 'concat_1'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'add_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_2_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_w': {'shape': np.array([1]), 'value': 6},
+                             'add_1_w': {'shape': np.array([1]), 'value': 6},
+                             'mul_2_w': {'shape': np.array([1]), 'value': 6},
+                             'mul_1': {'can_be_fused': False},
+                             'add_1': {'can_be_fused': False},
+                             'concat_1_data': {'is_output': True}
+                             },
+                            nodes_with_edges_only=True)
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'mul_1'),
+                                 ('mul_1_w', 'mul_1'),
+                                 ('mul_1', 'mul_1_data'),
+                                 ('mul_1_data', 'add_1'),
+                                 ('add_1_w', 'add_1'),
+                                 ('add_1', 'add_1_data'),
+                                 ('add_1_data', 'mul_2'),
+                                 ('mul_2_w', 'mul_2'),
+                                 ('mul_2', 'mul_2_data'),
+                                 ('mul_2_data', 'concat_1'),
+                                 ('concat_1', 'concat_1_data'),
+                                 ('placeholder_1_data', 'concat_1'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'add_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_2_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_w': {'shape': np.array([1]), 'value': 6},
+                                 'add_1_w': {'shape': np.array([1]), 'value': 6},
+                                 'mul_2_w': {'shape': np.array([1]), 'value': 6},
+                                 'mul_1': {'can_be_fused': False},
+                                 'add_1': {'can_be_fused': False},
+                                 'concat_1_data': {'is_output': True}
+                                 },
+                                nodes_with_edges_only=True)
+
+        graph.graph['layout'] = 'NHWC'
+        fuse_mul_add_sequence(graph)
+        self.assertTrue(len(graph.node) == len(graph_ref.node),
+                        "Graphs has different number of nodes: {} and {}".format(len(graph.node), len(graph_ref.node)))
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data')
+        self.assertTrue(flag, resp)
+
+    # Placeholder-+->Mul->Add->Mul-+->Concat
+    #             |                |            With 'can_be_fused' = False
+    #             +----------------+
+    def test_fuse_lin_seq_12(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'add_1'),
+                             ('add_1_w', 'add_1'),
+                             ('add_1', 'add_1_data'),
+                             ('add_1_data', 'mul_2'),
+                             ('mul_2_w', 'mul_2'),
+                             ('mul_2', 'mul_2_data'),
+                             ('mul_2_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data'),
+                             ('placeholder_1_data', 'concat_1'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'add_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_2_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_w': {'shape': np.array([1]), 'value': 6},
+                             'add_1_w': {'shape': np.array([1]), 'value': 6},
+                             'mul_2_w': {'shape': np.array([1]), 'value': 6},
+                             'add_1': {'can_be_fused': False},
+                             'concat_1_data': {'is_output': True}
+                             },
+                            nodes_with_edges_only=True)
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'mul_1'),
+                                 ('mul_1_w', 'mul_1'),
+                                 ('mul_1', 'mul_1_data'),
+                                 ('mul_1_data', 'add_1'),
+                                 ('add_1_w', 'add_1'),
+                                 ('add_1', 'add_1_data'),
+                                 ('add_1_data', 'mul_2'),
+                                 ('mul_2_w', 'mul_2'),
+                                 ('mul_2', 'mul_2_data'),
+                                 ('mul_2_data', 'concat_1'),
+                                 ('concat_1', 'concat_1_data'),
+                                 ('placeholder_1_data', 'concat_1'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'add_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_2_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_w': {'shape': np.array([1]), 'value': 6},
+                                 'add_1_w': {'shape': np.array([1]), 'value': 6},
+                                 'mul_2_w': {'shape': np.array([1]), 'value': 6},
+                                 'add_1': {'can_be_fused': False},
+                                 'concat_1_data': {'is_output': True}
+                                 },
+                                nodes_with_edges_only=True)
+
+        graph.graph['layout'] = 'NHWC'
+        fuse_mul_add_sequence(graph)
+        self.assertTrue(len(graph.node) == len(graph_ref.node),
+                        "Graphs has different number of nodes: {} and {}".format(len(graph.node), len(graph_ref.node)))
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data')
+        self.assertTrue(flag, resp)
+
+    # Placeholder-+->Mul->Add->Mul-+->Concat
+    #             |                |
+    #             +->Mul->Mul->----+  (This Mul ops has shared weights with upper Mul ops)
+    def test_fuse_lin_seq_shared_weights_1(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'add_1'),
+                             ('add_1_w', 'add_1'),
+                             ('add_1', 'add_1_data'),
+                             ('add_1_data', 'mul_2'),
+                             ('mul_2_w', 'mul_2'),
+                             ('mul_2', 'mul_2_data'),
+                             ('mul_2_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data'),
+                             ('placeholder_1_data', 'mul_3'),
+                             ('mul_3', 'mul_3_data'),
+                             ('mul_1_w', 'mul_3'),
+                             ('mul_3_data', 'mul_4'),
+                             ('mul_2_w', 'mul_4'),
+                             ('mul_4', 'mul_4_data'),
+                             ('mul_4_data', 'concat_1'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'add_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_2_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_3_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_4_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_w': {'shape': np.array([1]), 'value': 6},
+                             'add_1_w': {'shape': np.array([1]), 'value': 6},
+                             'mul_2_w': {'shape': np.array([1]), 'value': 6},
+                             'concat_1_data': {'is_output': True}
+                             },
+                            nodes_with_edges_only=True)
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'mul_1'),
+                                 ('mul_1_w', 'mul_1'),
+                                 ('mul_1', 'mul_1_data'),
+                                 ('mul_1_data', 'add_1'),
+                                 ('add_1_w', 'add_1'),
+                                 ('add_1', 'add_1_data'),
+                                 ('add_1_data', 'concat_1'),
+                                 ('concat_1', 'concat_1_data'),
+                                 ('placeholder_1_data', 'mul_3'),
+                                 ('mul_3', 'mul_3_data'),
+                                 ('mul_3_w', 'mul_3'),
+                                 ('mul_3_data', 'concat_1'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'add_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_3_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_w': {'shape': np.array([1]), 'value': np.array([36])},
+                                 'mul_3_w': {'shape': np.array([1]), 'value': np.array([36])},
+                                 'add_1_w': {'shape': np.array([1]), 'value': np.array([36])},
+                                 'mul_1': {'can_be_fused': True},
+                                 'add_1': {'can_be_fused': True},
+                                 'concat_1_data': {'is_output': True}
+                                 },
+                                nodes_with_edges_only=True)
+
+        graph.graph['layout'] = 'NHWC'
+        fuse_mul_add_sequence(graph)
+        self.assertTrue(len(graph.node) == len(graph_ref.node),
+                        "Graphs has different number of nodes: {} and {}".format(len(graph.node),
+                                                                                 len(graph_ref.node)))
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data')
+        self.assertTrue(flag, resp)
diff --git a/model-optimizer/mo/middle/passes/fusing/helpers_test.py b/model-optimizer/mo/middle/passes/fusing/helpers_test.py
new file mode 100644 (file)
index 0000000..feb2020
--- /dev/null
@@ -0,0 +1,307 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+from mo.graph.graph import Node
+from mo.middle.passes.fusing.helpers import forward_bfs, backward_bfs, get_next_operation
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {
+    'placeholder_1': {'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+    'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    # ScaleShift layer
+    'scaleshift_1': {'type': 'ScaleShift', 'kind': 'op', 'op': 'ScaleShift'},
+    'scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'data'},
+    'scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'data'},
+    'scaleshift_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # Mul and Add operations
+    'mul_1': {'type': 'Mul', 'kind': 'op', 'op': 'Mul'},
+    'mul_1_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'mul_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'add_1': {'type': 'Add', 'kind': 'op', 'op': 'Add'},
+    'add_1_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'add_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    # Mul2 and Add2 operations
+    'mul_2': {'type': 'Mul', 'kind': 'op', 'op': 'Mul'},
+    'mul_2_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'mul_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'add_2': {'type': 'Add', 'kind': 'op', 'op': 'Add'},
+    'add_2_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'add_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    # Concat1 operation
+    'concat_1': {'type': 'Concat', 'kind': 'op', 'op': 'Concat'},
+    'concat_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # Convolutions
+    'conv_1': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NHWC'},
+    'conv_1_w': {'value': None, 'shape': None, 'kind': 'data'},
+    'conv_1_b': {'value': None, 'shape': None, 'kind': 'data'},
+    'conv_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+    'conv_2': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NHWC'},
+    'conv_2_w': {'value': None, 'shape': None, 'kind': 'data'},
+    'conv_2_b': {'value': None, 'shape': None, 'kind': 'data'},
+    'conv_2_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # FullyConnected
+    'fc_1': {'type': 'FullyConnected', 'kind': 'op', 'op': 'InnerProduct', 'layout': 'NHWC'},
+    'fc_1_w': {'value': None, 'shape': None, 'kind': 'data'},
+    'fc_1_b': {'value': None, 'shape': None, 'kind': 'data'},
+    'fc_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # Placeholders
+    'placeholder_2': {'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+    'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'placeholder_3': {'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+    'placeholder_3_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+}
+
+
+# Unit tests for forward and backward bfs (forward_bfs, backward_bfs)
+class BFSTests(unittest.TestCase):
+    def test_forward_bfs_simple(self):
+        # Placeholder->ScaleShift->Mul->Add
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'scaleshift_1'),
+                             ('scaleshift_1_w', 'scaleshift_1'),
+                             ('scaleshift_1', 'scaleshift_1_data'),
+                             ('scaleshift_1_data', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'add_1'),
+                             ('add_1', 'add_1_data')
+                             ],
+                            {'add_1_data': {'is_output': True}})
+
+        res = forward_bfs(Node(graph, 'placeholder_1'), ['ScaleShift', 'Mul'], ['Add'])
+        self.assertTrue(len(res) == 1 and res[0].id == 'add_1', 'Add operation was not found by bfs')
+
+        res = forward_bfs(Node(graph, 'placeholder_1'), [], ['Add'], allowed_all=True)
+        self.assertTrue(len(res) == 1 and res[0].id == 'add_1', 'Add operation was not found by bfs')
+
+        res = forward_bfs(Node(graph, 'placeholder_1_data'), ['ScaleShift'], ['Add'])
+        self.assertTrue(len(res) == 0, 'No one node should be found! But bfs found {} nodes'.format(len(res)))
+
+        res = forward_bfs(Node(graph, 'placeholder_1_data'), ['ScaleShift'], ['Mul', 'Add'])
+        self.assertTrue(len(res) == 1 and res[0].id == 'mul_1', 'BFS should find only one Mul operation')
+
+    def test_backward_bfs_simple(self):
+        # Placeholder->ScaleShift->Mul->Add
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'scaleshift_1'),
+                             ('scaleshift_1_w', 'scaleshift_1'),
+                             ('scaleshift_1', 'scaleshift_1_data'),
+                             ('scaleshift_1_data', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'add_1'),
+                             ('add_1', 'add_1_data')
+                             ],
+                            {'add_1_data': {'is_output': True}})
+
+        res = backward_bfs(Node(graph, 'add_1_data'), ['Add', 'ScaleShift', 'Mul'], ['Placeholder'])
+        self.assertTrue(len(res) == 1 and res[0].id == 'placeholder_1', 'Placeholder operation was not found by bfs')
+
+        res = backward_bfs(Node(graph, 'add_1'), [], ['Placeholder'], allowed_all=True)
+        self.assertTrue(len(res) == 1 and res[0].id == 'placeholder_1', 'Placeholder operation was not found by bfs')
+
+        res = backward_bfs(Node(graph, 'add_1_data'), ['Add'], ['ScaleShift'])
+        self.assertTrue(len(res) == 0, 'No one node should be found! But bfs found {} nodes'.format(len(res)))
+
+        res = backward_bfs(Node(graph, 'add_1_data'), ['Add', 'Mul'], ['Placeholder', 'ScaleShift'])
+        self.assertTrue(len(res) == 1 and res[0].id == 'scaleshift_1', 'BFS should find only one ScaleShift operation')
+
+    def test_forward_bfs_hard(self):
+        # Placeholder->ScaleShift->Mul1->Add1---->Concat
+        #             `----------->Add2->Mul2--'
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'scaleshift_1'),
+                             ('placeholder_1_data', 'add_2'),
+                             ('scaleshift_1_w', 'scaleshift_1'),
+                             ('scaleshift_1', 'scaleshift_1_data'),
+                             ('scaleshift_1_data', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'add_1'),
+                             ('add_1', 'add_1_data'),
+                             ('add_2', 'add_2_data'),
+                             ('add_2_data', 'mul_2'),
+                             ('mul_2', 'mul_2_data'),
+                             ('add_1_data', 'concat_1'),
+                             ('mul_2_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data')
+                             ],
+                            {'concat_1_data': {'is_output': True}})
+
+        res = forward_bfs(Node(graph, 'placeholder_1'), ['ScaleShift', 'Mul', 'Add'], ['Concat'])
+        self.assertTrue(len(res) == 1 and res[0].id == 'concat_1', 'Probably Concat operation was not found by bfs')
+
+        res = forward_bfs(Node(graph, 'placeholder_1'), ['ScaleShift', 'Mul'], ['Add'])
+        self.assertTrue(len(res) == 2 and all([res[x].id in ['add_1', 'add_2'] for x in range(len(res))]),
+                        'Add operations was not found by bfs')
+
+        res = forward_bfs(Node(graph, 'placeholder_1'), ['ScaleShift'], ['Add'])
+        self.assertTrue(len(res) == 0, 'BFS shouldn\'t find any operations')
+
+        res = forward_bfs(Node(graph, 'placeholder_1'), [], ['Add'], allowed_all=True)
+        self.assertTrue(len(res) == 2 and all([res[x].id in ['add_1', 'add_2'] for x in range(len(res))]),
+                        'Add operations was not found by bfs')
+
+        res = forward_bfs(Node(graph, 'placeholder_1_data'), ['ScaleShift'], ['Concat'])
+        self.assertTrue(len(res) == 0, 'No one node should be found! But bfs found {} nodes'.format(len(res)))
+
+    def test_backward_bfs_hard(self):
+        # Placeholder->ScaleShift->Mul1->Add1---->Concat
+        #             `----------->Add2->Mul2--'
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'scaleshift_1'),
+                             ('placeholder_1_data', 'add_2'),
+                             ('scaleshift_1_w', 'scaleshift_1'),
+                             ('scaleshift_1', 'scaleshift_1_data'),
+                             ('scaleshift_1_data', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'add_1'),
+                             ('add_1', 'add_1_data'),
+                             ('add_2', 'add_2_data'),
+                             ('add_2_data', 'mul_2'),
+                             ('mul_2', 'mul_2_data'),
+                             ('add_1_data', 'concat_1'),
+                             ('mul_2_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data')
+                             ],
+                            {'concat_1_data': {'is_output': True}})
+
+        res = backward_bfs(Node(graph, 'concat_1'), ['ScaleShift', 'Mul', 'Add'], ['Placeholder'])
+        self.assertTrue(len(res) == 0, 'Smth went wrong with bfs')
+
+        res = backward_bfs(Node(graph, 'concat_1'), ['Mul'], ['Add'])
+        self.assertTrue(len(res) == 2 and all([res[x].id in ['add_1', 'add_2'] for x in range(len(res))]),
+                        'Add operations was not found by bfs')
+
+        res = backward_bfs(Node(graph, 'concat_1'), ['ScaleShift'], ['Add'])
+        self.assertTrue(len(res) == 0, 'BFS shouldn\'t find any operations')
+
+        res = backward_bfs(Node(graph, 'concat_1'), [], ['Add'], allowed_all=True)
+        self.assertTrue(len(res) == 2 and all([res[x].id in ['add_1', 'add_2'] for x in range(len(res))]),
+                        'Add operations was not found by bfs')
+
+        res = backward_bfs(Node(graph, 'concat_1'), ['ScaleShift'], ['ScaleShift'])
+        self.assertTrue(len(res) == 0, 'No one node should be found! But bfs found {} nodes'.format(len(res)))
+
+    def test_backward_bfs_hard2(self):
+        # Placeholder->ScaleShift->Mul1->Add1---->Concat
+        #             `----------->Add2->Mul2--'
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'add_2'),
+                             ('scaleshift_1_w', 'scaleshift_1'),
+                             ('scaleshift_1', 'scaleshift_1_data'),
+                             ('scaleshift_1_data', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'add_1'),
+                             ('add_1', 'add_1_data'),
+                             ('add_2', 'add_2_data'),
+                             ('add_2_data', 'mul_2'),
+                             ('mul_2', 'mul_2_data'),
+                             ('add_1_data', 'concat_1'),
+                             ('mul_2_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data')
+                             ],
+                            {'concat_1_data': {'is_output': True}})
+
+        res = backward_bfs(Node(graph, 'concat_1'), ['Mul', 'Add'], ['Placeholder'])
+        self.assertTrue(len(res) == 0, 'Smth went wrong with bfs')
+
+        res = backward_bfs(Node(graph, 'concat_1'), ['Mul'], ['Add'])
+        self.assertTrue(len(res) == 2 and all([res[x].id in ['add_1', 'add_2'] for x in range(len(res))]),
+                        'Add operations was not found by bfs')
+
+        res = backward_bfs(Node(graph, 'concat_1'), ['ScaleShift'], ['Add'])
+        self.assertTrue(len(res) == 0, 'BFS shouldn\'t find any operations')
+
+        res = backward_bfs(Node(graph, 'concat_1'), [], ['Add'], allowed_all=True)
+        self.assertTrue(len(res) == 2 and all([res[x].id in ['add_1', 'add_2'] for x in range(len(res))]),
+                        'Add operations was not found by bfs')
+
+        res = backward_bfs(Node(graph, 'concat_1'), ['ScaleShift'], ['ScaleShift'])
+        self.assertTrue(len(res) == 0, 'No one node should be found! But bfs found {} nodes'.format(len(res)))
+
+    def test_backward_bfs_cycle(self):
+        # Placeholder->ScaleShift->Mul->Add
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'scaleshift_1'),
+                             ('scaleshift_1_w', 'scaleshift_1'),
+                             ('scaleshift_1', 'scaleshift_1_data'),
+                             ('scaleshift_1_data', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'add_1'),
+                             ('add_1', 'add_1_data'),
+                             ('add_1_data', 'placeholder_1')
+                             ],
+                            {'add_1_data': {'is_output': True}})
+
+        res = backward_bfs(Node(graph, 'add_1_data'), ['Add', 'ScaleShift', 'Mul', 'Placeholder'], ['Conv2D'])
+        self.assertTrue(len(res) == 0, 'Sholdn\'t find any nodes due to cycle in graph')
+
+
+# Unit tests for get_next_operation
+class GetNextOperationTests(unittest.TestCase):
+    def test_get_next_operation_1(self):
+        # Placeholder->ScaleShift->Mul->Add
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'scaleshift_1'),
+                             ('scaleshift_1_w', 'scaleshift_1'),
+                             ('scaleshift_1', 'scaleshift_1_data'),
+                             ('scaleshift_1_data', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'add_1'),
+                             ('add_1', 'add_1_data')
+                             ],
+                            {'add_1_data': {'is_output': True}})
+
+        res = get_next_operation(Node(graph, 'mul_1'))
+        self.assertTrue(len(res) == 1 and res[0].id == 'add_1', 'get_nex_operation returned wrong op')
+
+    def test_get_next_operation_2(self):
+        # Placeholder->ScaleShift->Mul->Add
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'mul_1'),
+                             ('placeholder_1_data', 'add_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'add_1'),
+                             ('add_1', 'add_1_data')
+                             ],
+                            {'add_1_data': {'is_output': True}})
+
+        res = get_next_operation(Node(graph, 'placeholder_1'))
+        self.assertTrue(len(res) == 2 and all([x.id in ['add_1', 'mul_1'] for x in res]),
+                        'get_nex_operation returned wrong op')
+
+    def test_get_next_operation_3(self):
+        # Placeholder-+--->ScaleShift
+        #             +-----^
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1', 'placeholder_2_data'),
+                             ('placeholder_1_data', 'mul_1'),
+                             ('placeholder_2_data', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ],
+                            {'mul_1_data': {'is_output': True}})
+
+        res = get_next_operation(Node(graph, 'placeholder_1'))
+        self.assertTrue(len(res) == 1 and res[0].id == 'mul_1', 'get_nex_operation returned wrong op')
diff --git a/model-optimizer/mo/middle/passes/fusing/mark_unfused_nodes_test.py b/model-optimizer/mo/middle/passes/fusing/mark_unfused_nodes_test.py
new file mode 100644 (file)
index 0000000..f68c7ed
--- /dev/null
@@ -0,0 +1,312 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.middle.passes.fusing.mark_unfused_nodes import mark_unfused_nodes
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {
+    'placeholder_1': {'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+    'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    # ScaleShift layer
+    'scaleshift_1': {'type': 'ScaleShift', 'kind': 'op', 'op': 'ScaleShift'},
+    'scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'data'},
+    'scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'data'},
+    'scaleshift_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # Mul and Add operations
+    'mul_1': {'type': 'Mul', 'kind': 'op', 'op': 'Mul'},
+    'mul_1_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'mul_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'add_1': {'type': 'Add', 'kind': 'op', 'op': 'Add'},
+    'add_1_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'add_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    # Mul2 and Add2 operations
+    'mul_2': {'type': 'Mul', 'kind': 'op', 'op': 'Mul'},
+    'mul_2_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'mul_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'add_2': {'type': 'Add', 'kind': 'op', 'op': 'Add'},
+    'add_2_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'add_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    # Concat1 operation
+    'concat_1': {'type': 'Concat', 'kind': 'op', 'op': 'Concat'},
+    'concat_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # Convolutions
+    'conv_1': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NHWC'},
+    'conv_1_w': {'value': None, 'shape': None, 'kind': 'data'},
+    'conv_1_b': {'value': None, 'shape': None, 'kind': 'data'},
+    'conv_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+    'conv_2': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NHWC'},
+    'conv_2_w': {'value': None, 'shape': None, 'kind': 'data'},
+    'conv_2_b': {'value': None, 'shape': None, 'kind': 'data'},
+    'conv_2_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # FullyConnected
+    'fc_1': {'type': 'FullyConnected', 'kind': 'op', 'op': 'InnerProduct', 'layout': 'NHWC'},
+    'fc_1_w': {'value': None, 'shape': None, 'kind': 'data'},
+    'fc_1_b': {'value': None, 'shape': None, 'kind': 'data'},
+    'fc_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # Placeholders
+    'placeholder_2': {'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+    'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'placeholder_3': {'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+    'placeholder_3_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+}
+
+
+# Unit tests for forward and backward bfs (forward_bfs, backward_bfs)
+class MarkFusedNodes(unittest.TestCase):
+    def test_mark_unfused_nodes_1(self):
+        # Placeholder->ScaleShift->Mul->Add
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'add_1'),
+                             ('add_1_w', 'add_1'),
+                             ('add_1', 'add_1_data'),
+                             ('add_1_data', 'mul_2'),
+                             ('mul_2_w', 'mul_2'),
+                             ('mul_2', 'mul_2_data'),
+                             ('mul_2_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data'),
+                             ('placeholder_1_data', 'concat_1'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'add_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_2_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_w': {'shape': np.array([1]), 'value': 6},
+                             'add_1_w': {'shape': np.array([1]), 'value': 6},
+                             'mul_2_w': {'shape': np.array([1]), 'value': 6},
+                             'concat_1_data': {'is_output': True}
+                             })
+
+        graph.graph['layout'] = 'NHWC'
+
+        mark_unfused_nodes(graph, '.*mul.*')
+
+        self.assertFalse(graph.node['mul_1']['can_be_fused'], "can_be_fused should be False")
+        self.assertFalse(graph.node['mul_2']['can_be_fused'], "can_be_fused should be False")
+        self.assertTrue(graph.node['add_1']['can_be_fused'], "can_be_fused should be True")
+
+    def test_mark_unfused_nodes_2(self):
+        # Placeholder->ScaleShift->Mul->Add
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'add_1'),
+                             ('add_1_w', 'add_1'),
+                             ('add_1', 'add_1_data'),
+                             ('add_1_data', 'mul_2'),
+                             ('mul_2_w', 'mul_2'),
+                             ('mul_2', 'mul_2_data'),
+                             ('mul_2_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data'),
+                             ('placeholder_1_data', 'concat_1'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'add_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_2_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_w': {'shape': np.array([1]), 'value': 6},
+                             'add_1_w': {'shape': np.array([1]), 'value': 6},
+                             'mul_2_w': {'shape': np.array([1]), 'value': 6},
+                             'concat_1_data': {'is_output': True}
+                             })
+        graph.graph['layout'] = 'NHWC'
+
+        mark_unfused_nodes(graph, '.*')
+
+        self.assertFalse(graph.node['mul_1']['can_be_fused'], "can_be_fused should be False")
+        self.assertFalse(graph.node['mul_2']['can_be_fused'], "can_be_fused should be False")
+        self.assertFalse(graph.node['add_1']['can_be_fused'], "can_be_fused should be False")
+        self.assertFalse(graph.node['placeholder_1']['can_be_fused'], "can_be_fused should be False")
+        self.assertFalse(graph.node['concat_1']['can_be_fused'], "can_be_fused should be False")
+
+    def test_mark_unfused_nodes_3(self):
+        # Placeholder->ScaleShift->Mul->Add
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'add_1'),
+                             ('add_1_w', 'add_1'),
+                             ('add_1', 'add_1_data'),
+                             ('add_1_data', 'mul_2'),
+                             ('mul_2_w', 'mul_2'),
+                             ('mul_2', 'mul_2_data'),
+                             ('mul_2_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data'),
+                             ('placeholder_1_data', 'concat_1'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'add_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_2_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_w': {'shape': np.array([1]), 'value': 6},
+                             'add_1_w': {'shape': np.array([1]), 'value': 6},
+                             'mul_2_w': {'shape': np.array([1]), 'value': 6},
+                             'concat_1_data': {'is_output': True}
+                             })
+        graph.graph['layout'] = 'NHWC'
+
+        mark_unfused_nodes(graph, 'mul_1,add_1')
+
+        self.assertFalse(graph.node['mul_1']['can_be_fused'], "can_be_fused should be False")
+        self.assertFalse(graph.node['add_1']['can_be_fused'], "can_be_fused should be False")
+        self.assertTrue(graph.node['mul_2']['can_be_fused'], "can_be_fused should be True")
+
+    def test_mark_unfused_nodes_4(self):
+        # Placeholder->ScaleShift->Mul->Add
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'add_1'),
+                             ('add_1_w', 'add_1'),
+                             ('add_1', 'add_1_data'),
+                             ('add_1_data', 'mul_2'),
+                             ('mul_2_w', 'mul_2'),
+                             ('mul_2', 'mul_2_data'),
+                             ('mul_2_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data'),
+                             ('placeholder_1_data', 'concat_1'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'add_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_2_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                             'add_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                             'mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                             'concat_1_data': {'is_output': True}
+                             })
+        graph.graph['layout'] = 'NHWC'
+
+        mark_unfused_nodes(graph, '')
+
+        self.assertTrue(graph.node['mul_1']['can_be_fused'], "can_be_fused should be True")
+        self.assertTrue(graph.node['add_1']['can_be_fused'], "can_be_fused should be True")
+        self.assertTrue(graph.node['mul_2']['can_be_fused'], "can_be_fused should be True")
+
+    def test_mark_unfused_nodes_5(self):
+        # Placeholder->ScaleShift->Mul->Add
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'add_1'),
+                             ('add_1_w', 'add_1'),
+                             ('add_1', 'add_1_data'),
+                             ('add_1_data', 'mul_2'),
+                             ('mul_2_w', 'mul_2'),
+                             ('mul_2', 'mul_2_data'),
+                             ('mul_2_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data'),
+                             ('placeholder_1_data', 'concat_1'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'add_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_2_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                             'add_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                             'mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                             'concat_1_data': {'is_output': True}
+                             })
+        graph.graph['layout'] = 'NCHW'
+
+        mark_unfused_nodes(graph, '')
+
+        self.assertTrue(graph.node['mul_1']['can_be_fused'], "can_be_fused should be True")
+        self.assertTrue(graph.node['add_1']['can_be_fused'], "can_be_fused should be True")
+        self.assertTrue(graph.node['mul_2']['can_be_fused'], "can_be_fused should be True")
+
+        def test_mark_unfused_nodes_5(self):
+            # Placeholder->ScaleShift->Mul->Add
+            graph = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'mul_1'),
+                                 ('mul_1_w', 'mul_1'),
+                                 ('mul_1', 'mul_1_data'),
+                                 ('mul_1_data', 'add_1'),
+                                 ('add_1_w', 'add_1'),
+                                 ('add_1', 'add_1_data'),
+                                 ('add_1_data', 'mul_2'),
+                                 ('mul_2_w', 'mul_2'),
+                                 ('mul_2', 'mul_2_data'),
+                                 ('mul_2_data', 'concat_1'),
+                                 ('concat_1', 'concat_1_data'),
+                                 ('placeholder_1_data', 'concat_1'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'add_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_2_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                                 'add_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                                 'mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                                 'concat_1_data': {'is_output': True}
+                                 })
+            graph.graph['layout'] = 'NCHW'
+
+            mark_unfused_nodes(graph, '')
+
+            self.assertFalse(graph.node['mul_1']['can_be_fused'], "can_be_fused should be False")
+            self.assertFalse(graph.node['add_1']['can_be_fused'], "can_be_fused should be False")
+            self.assertFalse(graph.node['mul_2']['can_be_fused'], "can_be_fused should be False")
+
+    def test_mark_unfused_nodes_6(self):
+        # Placeholder->ScaleShift->Mul->Add
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_data', 'add_1'),
+                             ('add_1_w', 'add_1'),
+                             ('add_1', 'add_1_data'),
+                             ('add_1_data', 'mul_2'),
+                             ('mul_2_w', 'mul_2'),
+                             ('mul_2', 'mul_2_data'),
+                             ('mul_2_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data'),
+                             ('placeholder_1_data', 'concat_1'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'add_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_2_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                             'add_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                             'mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                             'concat_1_data': {'is_output': True}
+                             })
+        graph.graph['layout'] = 'NHWC'
+
+        mark_unfused_nodes(graph, '')
+
+        self.assertTrue(graph.node['mul_1']['can_be_fused'], "can_be_fused should be True")
+        self.assertTrue(graph.node['add_1']['can_be_fused'], "can_be_fused should be True")
+        self.assertTrue(graph.node['mul_2']['can_be_fused'], "can_be_fused should be True")
diff --git a/model-optimizer/mo/middle/passes/fusing/resnet_optimization_test.py b/model-optimizer/mo/middle/passes/fusing/resnet_optimization_test.py
new file mode 100644 (file)
index 0000000..0065775
--- /dev/null
@@ -0,0 +1,646 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.front.common.partial_infer.elemental import copy_shape_infer
+from mo.front.common.partial_infer.eltwise import eltwise_infer
+from mo.middle.passes.fusing.resnet_optimization import stride_optimization
+from mo.ops.convolution import Convolution
+from mo.ops.pooling import Pooling
+from mo.utils.unittest.graph import build_graph, compare_graphs
+
+max_elt_lambda = lambda node: eltwise_infer(node, lambda a, b: np.maximum(a, b))
+
+nodes_attributes = {
+    # Placeholders
+    'placeholder_1': {'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+    'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    # Concat1 operation
+    'eltwise_1': {'type': 'Eltwise', 'kind': 'op', 'op': 'Concat', 'infer': max_elt_lambda, 'operation': 'max'},
+    'eltwise_1_data': {'name': 'eltwise_1_data', 'value': None, 'shape': None, 'kind': 'data'},
+    # Convolutions
+    'conv_1': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NCHW',
+               'output_spatial_shape': None, 'output_shape': None, 'bias_term': True, 'group': 1,
+               'spatial_dims': np.array([2, 3]),
+               'channel_dims': np.array([1]), 'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
+               'dilation': np.array([1, 1, 1, 1]),
+               'batch_dims': np.array([0]), 'infer': Convolution.infer,
+               'kernel_spatial_idx': np.array([2, 3], dtype=np.int64), 'input_feature_channel': 1,
+               'output_feature_channel': 0, },
+    'conv_1_w': {'value': None, 'shape': None, 'kind': 'data',
+                 'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
+    'conv_1_b': {'value': None, 'shape': None, 'kind': 'data'},
+    'conv_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+
+    'conv_2': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NCHW',
+               'output_spatial_shape': None, 'output_shape': None, 'bias_term': True, 'group': 1,
+               'spatial_dims': np.array([2, 3]),
+               'channel_dims': np.array([1]), 'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
+               'dilation': np.array([1, 1, 1, 1]),
+               'batch_dims': np.array([0]), 'infer': Convolution.infer,
+               'kernel_spatial_idx': np.array([2, 3], dtype=np.int64), 'input_feature_channel': 1,
+               'output_feature_channel': 0, },
+    'conv_2_w': {'value': None, 'shape': None, 'kind': 'data',
+                 'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
+    'conv_2_b': {'value': None, 'shape': None, 'kind': 'data'},
+    'conv_2_data': {'value': None, 'shape': None, 'kind': 'data'},
+
+    'conv_3': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NCHW',
+               'output_spatial_shape': None, 'output_shape': None, 'bias_term': True, 'group': 1,
+               'spatial_dims': np.array([2, 3]),
+               'channel_dims': np.array([1]), 'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
+               'dilation': np.array([1, 1, 1, 1]),
+               'batch_dims': np.array([0]), 'infer': Convolution.infer,
+               'kernel_spatial_idx': np.array([2, 3], dtype=np.int64), 'input_feature_channel': 1,
+               'output_feature_channel': 0, },
+    'conv_3_w': {'value': None, 'shape': None, 'kind': 'data',
+                 'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
+    'conv_3_b': {'value': None, 'shape': None, 'kind': 'data'},
+    'conv_3_data': {'value': None, 'shape': None, 'kind': 'data'},
+
+    'conv_4': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NCHW',
+               'output_spatial_shape': None, 'output_shape': None, 'bias_term': True, 'group': 1,
+               'spatial_dims': np.array([2, 3]),
+               'channel_dims': np.array([1]), 'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
+               'dilation': np.array([1, 1, 1, 1]),
+               'batch_dims': np.array([0]), 'infer': Convolution.infer,
+               'kernel_spatial_idx': np.array([2, 3], dtype=np.int64), 'input_feature_channel': 1,
+               'output_feature_channel': 0, },
+    'conv_4_w': {'value': None, 'shape': None, 'kind': 'data',
+                 'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
+    'conv_4_b': {'value': None, 'shape': None, 'kind': 'data'},
+    'conv_4_data': {'value': None, 'shape': None, 'kind': 'data'},
+
+    'conv_5': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NCHW',
+               'output_spatial_shape': None, 'output_shape': None, 'bias_term': True, 'group': 1,
+               'spatial_dims': np.array([2, 3]),
+               'channel_dims': np.array([1]), 'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
+               'dilation': np.array([1, 1, 1, 1]),
+               'batch_dims': np.array([0]), 'infer': Convolution.infer,
+               'kernel_spatial_idx': np.array([2, 3], dtype=np.int64), 'input_feature_channel': 1,
+               'output_feature_channel': 0, },
+    'conv_5_w': {'value': None, 'shape': None, 'kind': 'data',
+                 'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
+    'conv_5_b': {'value': None, 'shape': None, 'kind': 'data'},
+    'conv_5_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # ReLU
+    'relu_1': {'shape': None, 'type': 'ReLU', 'kind': 'op', 'op': 'ReLU', 'infer': copy_shape_infer},
+    'relu_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'relu_2': {'shape': None, 'type': 'ReLU', 'kind': 'op', 'op': 'ReLU', 'infer': copy_shape_infer},
+    'relu_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    'relu_3': {'shape': None, 'type': 'ReLU', 'kind': 'op', 'op': 'ReLU', 'infer': copy_shape_infer},
+    'relu_3_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    # Pooling
+    'pool_1': {'type': 'Pooling', 'kind': 'op', 'op': 'Pooling',
+               'spatial_dims': np.array([2, 3]),
+               'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
+               'infer': Pooling.infer},
+    'pool_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+}
+
+
+# In description of unit tests below will be used next syntax: Operation(NxM,XxY), where NxM - kernel size, XxY - stride
+class ResnetOptimizationTests(unittest.TestCase):
+    # Pl->Conv(1x1,1x1)->Conv(1x1,2x2) => Pl->Conv(1x1,2x2)->Conv(1x1,1x1)
+    def test_resnet_optimization_1(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'conv_1'),
+                             ('conv_1_w', 'conv_1'),
+                             ('conv_1_b', 'conv_1'),
+                             ('conv_1', 'conv_1_data'),
+                             ('conv_1_data', 'conv_2'),
+                             ('conv_2_w', 'conv_2'),
+                             ('conv_2_b', 'conv_2'),
+                             ('conv_2', 'conv_2_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
+
+                             'conv_1_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
+                             'conv_1': {'kernel_spatial': np.array([1, 1]),
+                                        'stride': np.array([1, 1, 1, 1]),
+                                        'output': np.array([3]), },
+                             'conv_1_data': {'shape': np.array([1, 3, 224, 224])},
+
+                             'conv_2_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
+                             'conv_2': {'kernel_spatial': np.array([1, 1]),
+                                        'stride': np.array([1, 1, 2, 2]),
+                                        'output': np.array([3]), },
+                             'conv_2_data': {'shape': np.array([1, 3, 112, 112])},
+                             },
+                            nodes_with_edges_only=True)
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'conv_1'),
+                                 ('conv_1_w', 'conv_1'),
+                                 ('conv_1_b', 'conv_1'),
+                                 ('conv_1', 'conv_1_data'),
+                                 ('conv_1_data', 'conv_2'),
+                                 ('conv_2_w', 'conv_2'),
+                                 ('conv_2_b', 'conv_2'),
+                                 ('conv_2', 'conv_2_data'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
+
+                                 'conv_1_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
+                                 'conv_1': {'kernel_spatial': np.array([1, 1]),
+                                            'stride': np.array([1, 1, 2, 2]),
+                                            'output': np.array([3]), },
+                                 'conv_1_data': {'shape': np.array([1, 3, 112, 112])},
+
+                                 'conv_2_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
+                                 'conv_2': {'kernel_spatial': np.array([1, 1]),
+                                            'stride': np.array([1, 1, 1, 1]),
+                                            'output': np.array([3]), },
+                                 'conv_2_data': {'shape': np.array([1, 3, 112, 112])},
+                                 },
+                                nodes_with_edges_only=True)
+
+        graph.graph['layout'] = 'NCHW'
+        graph_ref.graph['layout'] = 'NCHW'
+
+        stride_optimization(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'conv_2_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    # Pl->Conv(3x3,2x2)->Conv(1x1,2x2) => Pl->Conv(3x3,4x4)->Conv(1x1,1x1)
+    def test_resnet_optimization_2(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'conv_1'),
+                             ('conv_1_w', 'conv_1'),
+                             ('conv_1_b', 'conv_1'),
+                             ('conv_1', 'conv_1_data'),
+                             ('conv_1_data', 'conv_2'),
+                             ('conv_2_w', 'conv_2'),
+                             ('conv_2_b', 'conv_2'),
+                             ('conv_2', 'conv_2_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
+
+                             'conv_1_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
+                             'conv_1': {'kernel_spatial': np.array([1, 1]),
+                                        'stride': np.array([1, 1, 2, 2]),
+                                        'output': np.array([3]), },
+                             'conv_1_data': {'shape': np.array([1, 3, 112, 112])},
+
+                             'conv_2_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
+                             'conv_2': {'kernel_spatial': np.array([1, 1]),
+                                        'stride': np.array([1, 1, 2, 2]),
+                                        'output': np.array([3]), },
+                             'conv_2_data': {'shape': np.array([1, 3, 56, 56])},
+                             },
+                            nodes_with_edges_only=True)
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'conv_1'),
+                                 ('conv_1_w', 'conv_1'),
+                                 ('conv_1_b', 'conv_1'),
+                                 ('conv_1', 'conv_1_data'),
+                                 ('conv_1_data', 'conv_2'),
+                                 ('conv_2_w', 'conv_2'),
+                                 ('conv_2_b', 'conv_2'),
+                                 ('conv_2', 'conv_2_data'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
+
+                                 'conv_1_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
+                                 'conv_1': {'kernel_spatial': np.array([1, 1]),
+                                            'stride': np.array([1, 1, 4, 4]),
+                                            'output': np.array([3]), },
+                                 'conv_1_data': {'shape': np.array([1, 3, 56, 56])},
+
+                                 'conv_2_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
+                                 'conv_2': {'kernel_spatial': np.array([1, 1]),
+                                            'stride': np.array([1, 1, 1, 1]),
+                                            'output': np.array([3]), },
+                                 'conv_2_data': {'shape': np.array([1, 3, 56, 56])},
+                                 },
+                                nodes_with_edges_only=True)
+
+        graph.graph['layout'] = 'NCHW'
+        graph_ref.graph['layout'] = 'NCHW'
+
+        stride_optimization(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'conv_2_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    # Pl->Conv(3x3,2x2)->Conv(3x3,2x2) => Same
+    def test_resnet_optimization_3(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'conv_1'),
+                             ('conv_1_w', 'conv_1'),
+                             ('conv_1_b', 'conv_1'),
+                             ('conv_1', 'conv_1_data'),
+                             ('conv_1_data', 'conv_2'),
+                             ('conv_2_w', 'conv_2'),
+                             ('conv_2_b', 'conv_2'),
+                             ('conv_2', 'conv_2_data'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
+
+                             'conv_1_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])},
+                             'conv_1': {'kernel_spatial': np.array([3, 3]),
+                                        'stride': np.array([1, 1, 2, 2]),
+                                        'output': np.array([3]), },
+                             'conv_1_data': {'shape': np.array([1, 3, 112, 112])},
+
+                             'conv_2_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])},
+                             'conv_2': {'kernel_spatial': np.array([3, 3]),
+                                        'stride': np.array([1, 1, 2, 2]),
+                                        'output': np.array([3]), },
+                             'conv_2_data': {'shape': np.array([1, 3, 56, 56])},
+                             },
+                            nodes_with_edges_only=True)
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'conv_1'),
+                                 ('conv_1_w', 'conv_1'),
+                                 ('conv_1_b', 'conv_1'),
+                                 ('conv_1', 'conv_1_data'),
+                                 ('conv_1_data', 'conv_2'),
+                                 ('conv_2_w', 'conv_2'),
+                                 ('conv_2_b', 'conv_2'),
+                                 ('conv_2', 'conv_2_data'),
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
+
+                                 'conv_1_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])},
+                                 'conv_1': {'kernel_spatial': np.array([3, 3]),
+                                            'stride': np.array([1, 1, 2, 2]),
+                                            'output': np.array([3]), },
+                                 'conv_1_data': {'shape': np.array([1, 3, 112, 112])},
+
+                                 'conv_2_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])},
+                                 'conv_2': {'kernel_spatial': np.array([3, 3]),
+                                            'stride': np.array([1, 1, 2, 2]),
+                                            'output': np.array([3]), },
+                                 'conv_2_data': {'shape': np.array([1, 3, 56, 56])},
+                                 },
+                                nodes_with_edges_only=True)
+
+        graph.graph['layout'] = 'NCHW'
+        graph_ref.graph['layout'] = 'NCHW'
+
+        stride_optimization(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'conv_2_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    # Pl--->Conv(3x3,2x2)->ReLU--->Eltwise-->Conv(1x1,2x2) => Pl--->Conv(3x3,4x4)->ReLU--->Eltwise-->Conv(1x1,1x1)
+    #   `-->Conv(3x3,2x2)->ReLU---`                             `-->Conv(3x3,4x4)->ReLU---`
+    def test_resnet_optimization_4(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'conv_1'),
+                             ('conv_1_w', 'conv_1'),
+                             ('conv_1_b', 'conv_1'),
+                             ('conv_1', 'conv_1_data'),
+                             ('conv_1_data', 'relu_1'),
+                             ('relu_1', 'relu_1_data'),
+
+                             ('placeholder_1_data', 'conv_2'),
+                             ('conv_2_w', 'conv_2'),
+                             ('conv_2_b', 'conv_2'),
+                             ('conv_2', 'conv_2_data'),
+                             ('conv_2_data', 'relu_2'),
+                             ('relu_2', 'relu_2_data'),
+
+                             ('relu_1_data', 'eltwise_1'),
+                             ('relu_2_data', 'eltwise_1'),
+
+                             ('eltwise_1', 'eltwise_1_data'),
+                             ('eltwise_1_data', 'conv_3'),
+                             ('conv_3_w', 'conv_3'),
+                             ('conv_3_b', 'conv_3'),
+                             ('conv_3', 'conv_3_data'),
+
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
+
+                             'conv_1_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])},
+                             'conv_1': {'kernel_spatial': np.array([3, 3]),
+                                        'stride': np.array([1, 1, 2, 2]),
+                                        'output': np.array([3]), },
+                             'conv_1_data': {'shape': np.array([1, 3, 112, 112])},
+                             'relu_1_data': {'shape': np.array([1, 3, 112, 112])},
+
+                             'conv_2_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])},
+                             'conv_2': {'kernel_spatial': np.array([3, 3]),
+                                        'stride': np.array([1, 1, 2, 2]),
+                                        'output': np.array([3]), },
+                             'conv_2_data': {'shape': np.array([1, 3, 112, 112])},
+                             'relu_2_data': {'shape': np.array([1, 3, 112, 112])},
+
+                             'eltwise_1_data': {'shape': np.array([1, 3, 112, 112])},
+
+                             'conv_3_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
+                             'conv_3': {'kernel_spatial': np.array([1, 1]),
+                                        'stride': np.array([1, 1, 2, 2]),
+                                        'output': np.array([3]), },
+                             'conv_3_data': {'shape': np.array([1, 3, 56, 56])},
+                             },
+                            nodes_with_edges_only=True)
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'conv_1'),
+                                 ('conv_1_w', 'conv_1'),
+                                 ('conv_1_b', 'conv_1'),
+                                 ('conv_1', 'conv_1_data'),
+                                 ('conv_1_data', 'relu_1'),
+                                 ('relu_1', 'relu_1_data'),
+
+                                 ('placeholder_1_data', 'conv_2'),
+                                 ('conv_2_w', 'conv_2'),
+                                 ('conv_2_b', 'conv_2'),
+                                 ('conv_2', 'conv_2_data'),
+                                 ('conv_2_data', 'relu_2'),
+                                 ('relu_2', 'relu_2_data'),
+
+                                 ('relu_1_data', 'eltwise_1'),
+                                 ('relu_2_data', 'eltwise_1'),
+
+                                 ('eltwise_1', 'eltwise_1_data'),
+                                 ('eltwise_1_data', 'conv_3'),
+                                 ('conv_3_w', 'conv_3'),
+                                 ('conv_3_b', 'conv_3'),
+                                 ('conv_3', 'conv_3_data'),
+
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
+
+                                 'conv_1_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])},
+                                 'conv_1': {'kernel_spatial': np.array([3, 3]),
+                                            'stride': np.array([1, 1, 4, 4]),
+                                            'output': np.array([3])},
+                                 'conv_1_data': {'shape': np.array([1, 3, 56, 56])},
+                                 'relu_1_data': {'shape': np.array([1, 3, 56, 56])},
+
+                                 'conv_2_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])},
+                                 'conv_2': {'kernel_spatial': np.array([3, 3]),
+                                            'stride': np.array([1, 1, 4, 4]),
+                                            'output': np.array([3])},
+                                 'conv_2_data': {'shape': np.array([1, 3, 56, 56])},
+                                 'relu_2_data': {'shape': np.array([1, 3, 56, 56])},
+
+                                 'eltwise_1_data': {'shape': np.array([1, 3, 56, 56])},
+
+                                 'conv_3_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
+                                 'conv_3': {'kernel_spatial': np.array([1, 1]),
+                                            'stride': np.array([1, 1, 1, 1]),
+                                            'output': np.array([3])},
+                                 'conv_3_data': {'shape': np.array([1, 3, 56, 56])},
+                                 },
+                                nodes_with_edges_only=True)
+
+        graph.graph['layout'] = 'NCHW'
+        graph_ref.graph['layout'] = 'NCHW'
+
+        #        dump_graph_for_graphviz(graph)
+        #        dump_graph_for_graphviz(graph_ref)
+
+        stride_optimization(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'conv_3_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    # Pl--->Conv(1x1,1x1)->ReLU--->Eltwise-->Conv(1x1,2x2) => Pl--->Conv(1x1,2x2)->ReLU--->Eltwise-->Conv(1x1,1x1)
+    #   `----------------->ReLU---`                             `-->Pool(1x1,2x2)->ReLU---`
+    def test_resnet_optimization_5(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'conv_1'),
+                             ('conv_1_w', 'conv_1'),
+                             ('conv_1_b', 'conv_1'),
+                             ('conv_1', 'conv_1_data'),
+                             ('conv_1_data', 'relu_1'),
+                             ('relu_1', 'relu_1_data'),
+
+                             ('placeholder_1_data', 'relu_2'),
+                             ('relu_2', 'relu_2_data'),
+
+                             ('relu_1_data', 'eltwise_1'),
+                             ('relu_2_data', 'eltwise_1'),
+
+                             ('eltwise_1', 'eltwise_1_data'),
+                             ('eltwise_1_data', 'conv_3'),
+                             ('conv_3_w', 'conv_3'),
+                             ('conv_3_b', 'conv_3'),
+                             ('conv_3', 'conv_3_data'),
+
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
+
+                             'conv_1_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
+                             'conv_1': {'kernel_spatial': np.array([1, 1]),
+                                        'stride': np.array([1, 1, 1, 1]),
+                                        'output': np.array([3]), },
+                             'conv_1_data': {'shape': np.array([1, 3, 224, 224])},
+                             'relu_1_data': {'shape': np.array([1, 3, 224, 224])},
+
+                             'relu_2_data': {'shape': np.array([1, 3, 224, 224])},
+
+                             'eltwise_1_data': {'shape': np.array([1, 3, 224, 224])},
+
+                             'conv_3_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
+                             'conv_3': {'kernel_spatial': np.array([1, 1]),
+                                        'stride': np.array([1, 1, 2, 2]),
+                                        'output': np.array([3]), },
+                             'conv_3_data': {'shape': np.array([1, 3, 112, 112])},
+                             },
+                            nodes_with_edges_only=True)
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'conv_1'),
+                                 ('conv_1_w', 'conv_1'),
+                                 ('conv_1_b', 'conv_1'),
+                                 ('conv_1', 'conv_1_data'),
+                                 ('conv_1_data', 'relu_1'),
+                                 ('relu_1', 'relu_1_data'),
+
+                                 ('placeholder_1_data', 'pool_1'),
+                                 ('pool_1', 'pool_1_data'),
+                                 ('pool_1_data', 'relu_2'),
+                                 ('relu_2', 'relu_2_data'),
+
+                                 ('relu_1_data', 'eltwise_1'),
+                                 ('relu_2_data', 'eltwise_1'),
+
+                                 ('eltwise_1', 'eltwise_1_data'),
+                                 ('eltwise_1_data', 'conv_3'),
+                                 ('conv_3_w', 'conv_3'),
+                                 ('conv_3_b', 'conv_3'),
+                                 ('conv_3', 'conv_3_data'),
+
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
+
+                                 'conv_1_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
+                                 'conv_1': {'kernel_spatial': np.array([1, 1]),
+                                            'stride': np.array([1, 1, 2, 2]),
+                                            'output': np.array([3])},
+                                 'conv_1_data': {'shape': np.array([1, 3, 112, 112])},
+                                 'relu_1_data': {'shape': np.array([1, 3, 112, 112])},
+
+                                 'pool_1': {'stride': np.array([1, 1, 2, 2])},
+                                 'pool_1_data': {'shape': np.array([1, 3, 112, 112])},
+                                 'relu_2_data': {'shape': np.array([1, 3, 112, 112])},
+
+                                 'eltwise_1_data': {'shape': np.array([1, 3, 112, 112])},
+
+                                 'conv_3_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
+                                 'conv_3': {'kernel_spatial': np.array([1, 1]),
+                                            'stride': np.array([1, 1, 1, 1]),
+                                            'output': np.array([3])},
+                                 'conv_3_data': {'shape': np.array([1, 3, 112, 112])},
+                                 },
+                                nodes_with_edges_only=True)
+
+        graph.graph['layout'] = 'NCHW'
+        graph_ref.graph['layout'] = 'NCHW'
+
+        #        dump_graph_for_graphviz(graph)
+        #        dump_graph_for_graphviz(graph_ref)
+
+        stride_optimization(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'conv_3_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    # Pl->Conv(1x1,1x1)->Conv(1x1,2x2)->Conv(3x3,1x1)->Conv(1x1,2x2)
+    #       =>
+    # Pl->Conv(1x1,2x2)->Conv(1x1,1x1)->Conv(3x3,2x2)->Conv(1x1,1x1)
+    def test_resnet_optimization_6(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'conv_1'),
+                             ('conv_1_w', 'conv_1'),
+                             ('conv_1_b', 'conv_1'),
+                             ('conv_1', 'conv_1_data'),
+
+                             ('conv_1_data', 'conv_2'),
+                             ('conv_2_w', 'conv_2'),
+                             ('conv_2_b', 'conv_2'),
+                             ('conv_2', 'conv_2_data'),
+
+                             ('conv_2_data', 'conv_3'),
+                             ('conv_3_w', 'conv_3'),
+                             ('conv_3_b', 'conv_3'),
+                             ('conv_3', 'conv_3_data'),
+
+                             ('conv_3_data', 'conv_4'),
+                             ('conv_4_w', 'conv_4'),
+                             ('conv_4_b', 'conv_4'),
+                             ('conv_4', 'conv_4_data'),
+
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
+
+                             'conv_1_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
+                             'conv_1': {'kernel_spatial': np.array([1, 1]),
+                                        'stride': np.array([1, 1, 1, 1]),
+                                        'output': np.array([3]), },
+                             'conv_1_data': {'shape': np.array([1, 3, 224, 224])},
+
+                             'conv_2_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
+                             'conv_2': {'kernel_spatial': np.array([1, 1]),
+                                        'stride': np.array([1, 1, 2, 2]),
+                                        'output': np.array([3]), },
+                             'conv_2_data': {'shape': np.array([1, 3, 112, 112])},
+
+                             'conv_3_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])},
+                             'conv_3': {'kernel_spatial': np.array([3, 3]),
+                                        'stride': np.array([1, 1, 1, 1]),
+                                        'output': np.array([3]), },
+                             'conv_3_data': {'shape': np.array([1, 3, 110, 110])},
+
+                             'conv_4_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
+                             'conv_4': {'kernel_spatial': np.array([1, 1]),
+                                        'stride': np.array([1, 1, 2, 2]),
+                                        'output': np.array([3]), },
+                             'conv_4_data': {'shape': np.array([1, 3, 55, 55])},
+                             },
+                            nodes_with_edges_only=True)
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'conv_1'),
+                                 ('conv_1_w', 'conv_1'),
+                                 ('conv_1_b', 'conv_1'),
+                                 ('conv_1', 'conv_1_data'),
+
+                                 ('conv_1_data', 'conv_2'),
+                                 ('conv_2_w', 'conv_2'),
+                                 ('conv_2_b', 'conv_2'),
+                                 ('conv_2', 'conv_2_data'),
+
+                                 ('conv_2_data', 'conv_3'),
+                                 ('conv_3_w', 'conv_3'),
+                                 ('conv_3_b', 'conv_3'),
+                                 ('conv_3', 'conv_3_data'),
+
+                                 ('conv_3_data', 'conv_4'),
+                                 ('conv_4_w', 'conv_4'),
+                                 ('conv_4_b', 'conv_4'),
+                                 ('conv_4', 'conv_4_data'),
+
+                                 ],
+                                {'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])},
+
+                                 'conv_1_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
+                                 'conv_1': {'kernel_spatial': np.array([1, 1]),
+                                            'stride': np.array([1, 1, 2, 2]),
+                                            'output': np.array([3])},
+                                 'conv_1_data': {'shape': np.array([1, 3, 112, 112])},
+
+                                 'conv_2_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
+                                 'conv_2': {'kernel_spatial': np.array([1, 1]),
+                                            'stride': np.array([1, 1, 1, 1]),
+                                            'output': np.array([3])},
+                                 'conv_2_data': {'shape': np.array([1, 3, 112, 112])},
+
+                                 'conv_3_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])},
+                                 'conv_3': {'kernel_spatial': np.array([3, 3]),
+                                            'stride': np.array([1, 1, 2, 2]),
+                                            'output': np.array([3])},
+                                 'conv_3_data': {'shape': np.array([1, 3, 55, 55])},
+
+                                 'conv_4_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])},
+                                 'conv_4': {'kernel_spatial': np.array([1, 1]),
+                                            'stride': np.array([1, 1, 1, 1]),
+                                            'output': np.array([3])},
+                                 'conv_4_data': {'shape': np.array([1, 3, 55, 55])},
+                                 },
+                                nodes_with_edges_only=True)
+
+        graph.graph['layout'] = 'NCHW'
+        graph_ref.graph['layout'] = 'NCHW'
+
+        stride_optimization(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'conv_4_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
diff --git a/model-optimizer/mo/middle/passes/infer_test.py b/model-optimizer/mo/middle/passes/infer_test.py
new file mode 100644 (file)
index 0000000..d3b7e65
--- /dev/null
@@ -0,0 +1,531 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.front.common.partial_infer.concat import concat_infer
+from mo.graph.graph import Node
+from mo.middle.passes.infer import override_placeholder_shapes, partial_infer, add_mean_scale_values, scale_input, \
+    check_for_cycle
+from mo.utils.cli_parser import get_mean_scale_dictionary, parse_tuple_pairs
+from mo.utils.error import Error
+from mo.utils.unittest.graph import build_graph, compare_graphs
+
+nodes_attributes = {'node_1': {'type': 'Identity', 'value': None, 'kind': 'op'},
+                    'node_1_data': {'value': None, 'kind': 'data', 'data_type': None},
+                    'node_2': {'type': 'Identity', 'value': None, 'kind': 'op'},
+                    'concat': {'type': 'Concat', 'value': None, 'kind': 'op'},
+                    'node_3': {'type': 'Identity', 'value': None, 'kind': 'op'},
+                    'node_3_data': {'value': None, 'kind': 'data', 'data_type': None},
+                    # Placeholders
+                    'placeholder_1': {'shape': None, 'type': 'Input', 'kind': 'op', 'op': 'Placeholder'},
+                    'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+                    'placeholder_2': {'shape': None, 'type': 'Input', 'kind': 'op', 'op': 'Placeholder'},
+                    'pl_1': {'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+                    'pl_1_data': {'value': None, 'kind': 'data', 'data_type': None},
+                    'pl_2': {'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+                    'pl_2_data': {'value': None, 'kind': 'data', 'data_type': None},
+                    'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+                    # ScaleShift layer
+                    'scaleshift_1': {'type': 'ScaleShift', 'kind': 'op', 'op': 'ScaleShift'},
+                    'scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'data'},
+                    'scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'data'},
+                    'scaleshift_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+                    # Mul op
+                    'mul_1': {'type': None, 'kind': 'op', 'op': 'Mul'},
+                    'mul_1_w': {'value': None, 'shape': None, 'kind': 'data'},
+                    'mul_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+                    }
+
+
+class TestInferPass(unittest.TestCase):
+    def test_override_placeholder_shapes(self):
+        """
+        Test for overriding shape in placeholder by shape from user_shapes.
+        """
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'node_2')],
+                            {'node_2': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 227, 227]), 'op': 'Placeholder'}
+                             },
+                            nodes_with_edges_only=True)
+
+        ph_shape = np.array([1, 3, 224, 224])
+        user_dict = {'node_1': [{'shape': ph_shape}]}
+        override_placeholder_shapes(graph, user_dict)
+        res_shape = graph.node['node_1']['shape']
+        self.assertTrue(np.array_equal(ph_shape, res_shape))
+
+    def test_override_placeholder_no_shape(self):
+        """
+        Test for case when user_shapes is not defined.
+        """
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'node_2')],
+                            {'node_2': {'is_output': True, 'shape': None, 'op': 'Placeholder'},
+                             'node_1': {'shape': np.array([1, 3, 227, 227]), 'op': 'Placeholder'}
+                             },
+                            nodes_with_edges_only=True)
+        out = override_placeholder_shapes(graph, None)
+        res_shape = graph.node['node_1']['shape']
+        placeholder_shape = np.array([1, 3, 227, 227])
+        self.assertIsNone(out)
+        self.assertTrue(np.array_equal(placeholder_shape, res_shape))
+
+    def test_override_placeholder_shapes(self):
+        """
+        Test for case when user_shapes is not None, but it shouldn't rewrite shapes.
+        """
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'node_2')],
+                            {'node_2': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 227, 227]), 'op': 'Placeholder'}
+                             },
+                            nodes_with_edges_only=True)
+
+        node_1_shape = np.array([1, 3, 227, 227])
+        user_dict = {'some_node': [{'shape': np.zeros((3))}]}
+        override_placeholder_shapes(graph, user_dict)
+        res_shape = graph.node['node_1']['shape']
+        self.assertTrue(np.array_equal(node_1_shape, res_shape))
+
+    def test_override_placeholder_shapes_dict(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'node_2')],
+                            {'node_2': {'is_output': True, 'shape': None, 'op': 'Placeholder'},
+                             'node_1': {'shape': np.array([1, 3, 227, 227]), 'op': 'Placeholder'}
+                             },
+                            nodes_with_edges_only=True)
+
+        placeholder_shape = np.array([1, 3, 224, 224])
+        user_shapes = {
+            'node_1': [{'shape': placeholder_shape}],
+            'node_2': [{'shape': placeholder_shape}],
+        }
+        override_placeholder_shapes(graph, user_shapes)
+        res_shape = graph.node['node_1']['shape']
+        res_shape2 = graph.node['node_2']['shape']
+        self.assertTrue(np.array_equal(placeholder_shape, res_shape))
+        self.assertTrue(np.array_equal(placeholder_shape, res_shape2))
+
+    nodes = {
+        'placeholder_1': {'name': 'placeholder_1', 'shape': [1, 2, 3, 4], 'type': 'Placeholder', 'value': None,
+                          'kind': 'op', 'op': 'Placeholder'},
+        'placeholder_2': {'name': 'placeholder_2', 'shape': [5, 6, 7, 8], 'type': 'Placeholder', 'value': None,
+                          'kind': 'op', 'op': 'Placeholder'},
+        '1': {'name': 'node_1', 'type': 'Identity', 'value': None, 'kind': 'op'},
+        '2': {'name': 'node_2', 'type': 'Identity', 'value': None, 'kind': 'op'},
+        '3': {'name': 'concat', 'type': 'Identity', 'value': None, 'kind': 'op'},
+        '4': {'name': 'output', 'type': 'SoftMax', 'value': None, 'kind': 'op'}
+    }
+    edges = [
+        ('placeholder_1', '1'),
+        ('1', '3'),
+        ('placeholder_2', '2'),
+        ('2', '3'),
+        ('3', '4')
+    ]
+
+    def test_override_placeholder_shapes_batch_is_not_set(self):
+        """
+        Test case when batch is not set. (shapes shouldn't change)
+        """
+        graph = build_graph(self.nodes, self.edges)
+        shapes = {}
+        batch = None
+        override_placeholder_shapes(graph, shapes, batch)
+        res_shape_1 = graph.node['placeholder_1']['shape']
+        res_shape_2 = graph.node['placeholder_2']['shape']
+        self.assertTrue(np.array_equal(self.nodes['placeholder_1']['shape'], res_shape_1))
+        self.assertTrue(np.array_equal(self.nodes['placeholder_2']['shape'], res_shape_2))
+
+    def test_override_placeholder_shapes_real_inputs_and_batch(self):
+        """
+        Test case when batch is set and shapes should overwrite by user shapes.
+        """
+        graph = build_graph(self.nodes, self.edges)
+        shapes = {'placeholder_1': [{'shape': np.array([1, 2, 3, 4])}],
+                  'placeholder_2': [{'shape': np.array([1, 5, 6, 7])}]}
+        batch = 4
+        override_placeholder_shapes(graph, shapes, batch)
+        res_shape_1 = graph.node['placeholder_1']['shape']
+        res_shape_2 = graph.node['placeholder_2']['shape']
+        self.assertTrue(np.array_equal(res_shape_1, np.array([4, 2, 3, 4])))
+        self.assertTrue(np.array_equal(res_shape_2, np.array([4, 5, 6, 7])))
+
+    def test_override_placeholder_shapes_real_inputs_and_batch_2(self):
+        """
+        Test case when batch is set, but shapes in user_shapes is None.
+        """
+        graph = build_graph(self.nodes, self.edges)
+        shapes = {'placeholder_1': [{'shape': None}], 'placeholder_2': [{'shape': None}]}
+        batch = 4
+        graph.node['placeholder_2']['shape'] = np.array([1, 2, 3, 4])
+        graph.node['placeholder_2']['shape'] = np.array([1, 5, 6, 7])
+        override_placeholder_shapes(graph, shapes, batch)
+        np.testing.assert_array_equal(graph.node['placeholder_1']['shape'], np.array([4, 2, 3, 4]))
+        np.testing.assert_array_equal(graph.node['placeholder_2']['shape'], np.array([4, 5, 6, 7]))
+
+    def test_partial_infer(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'concat'),
+                             ('node_2', 'concat'),
+                             ('concat', 'node_3')],
+                            {'node_3': {'kind': 'data', 'is_output': True, 'shape': None, 'infer': None},
+                             'node_1': {'kind': 'data', 'shape': np.array([1, 3, 227, 227]), 'infer': None},
+                             'node_2': {'kind': 'data', 'shape': np.array([1, 3, 227, 227]), 'infer': None},
+                             'concat': {'kind': 'op', 'axis': 2, 'infer': concat_infer}
+                             },
+                            nodes_with_edges_only=True)
+
+        start_node = 'concat'
+        partial_infer(graph, start_node)
+        node = Node(graph, start_node)
+        self.assertTrue(node.is_partial_inferred)
+        self.assertTrue(node.out_node().is_partial_inferred)
+
+        # check if previous nodes are not inferred
+        node = Node(graph, start_node)
+        while True:
+            # collect nodes in a list
+            if isinstance(node.in_nodes(), list):
+                in_nodes = node.in_nodes()
+            else:
+                in_nodes = [y for x, y in node.in_nodes().items()]
+
+            # check parents and find next parent
+            for n in in_nodes:
+                if 'embedded_input_' not in n.id:
+                    node = n
+                self.assertFalse(n.has('is_partial_inferred'))
+
+            if not len(in_nodes):
+                break
+
+    def test_partial_infer_no_shape(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'node_2')],
+                            {'node_2': {'is_output': True, 'shape': None, 'infer': None},
+                             'node_1': {'shape': None, 'infer': None}
+                             },
+                            nodes_with_edges_only=True)
+        self.assertRaises(Error, partial_infer, graph, 'node_1')
+
+    def test_partial_infer_cycle(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'concat'),
+                             ('node_2', 'concat'),
+                             ('concat', 'node_3'),
+                             ('node_3', 'concat')],
+                            {'node_3': {'kind': 'data', 'is_output': True, 'shape': None, 'infer': None},
+                             'node_1': {'kind': 'data', 'shape': np.array([1, 3, 227, 227]), 'infer': None},
+                             'node_2': {'kind': 'data', 'shape': np.array([1, 3, 227, 227]), 'infer': None},
+                             'concat': {'kind': 'op', 'axis': 2, 'infer': concat_infer}
+                             },
+                            nodes_with_edges_only=True)
+
+        start_node = 'concat'
+        self.assertRaises(Error, partial_infer, graph, start_node)
+
+    def test_add_mean_scale_values_with_data_name(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'node_2')],
+                            {'node_2': {'is_output': True, 'shape': None, 'data_type': None},
+                             'node_1': {'shape': np.array([1, 3, 227, 227]), 'op': 'Placeholder', 'name': 'data',
+                                        'data_type': None}
+                             },
+                            nodes_with_edges_only=True)
+        graph.graph['layout'] = 'NCHW'
+        mean_values = parse_tuple_pairs('(124,117,104)')
+        scale_values = parse_tuple_pairs('')
+
+        # input = 'data'
+        mean_scale = get_mean_scale_dictionary(mean_values, scale_values, None)
+        self.assertEqual(len(graph), 2)
+        add_mean_scale_values(graph, mean_scale)
+        self.assertEqual(len(graph), 5)
+
+    def test_add_mean_scale_values_without_data_name(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'node_2')],
+                            {'node_2': {'is_output': True, 'shape': None, 'data_type': None},
+                             'node_1': {'shape': np.array([1, 3, 227, 227]), 'op': 'Placeholder', 'name': 'data',
+                                        'data_type': None}
+                             },
+                            nodes_with_edges_only=True)
+        graph.graph['layout'] = 'NCHW'
+        mean_values = parse_tuple_pairs('(124,117,104)')
+        scale_values = parse_tuple_pairs('')
+        # input = None
+        mean_scale = get_mean_scale_dictionary(mean_values, scale_values, None)
+        self.assertEqual(len(graph), 2)
+        add_mean_scale_values(graph, mean_scale)
+        self.assertEqual(len(graph), 5)
+
+    def test_add_mean_scale_values1(self):
+        graph = build_graph(nodes_attributes,
+                            [('pl_1', 'pl_1_data'), ('pl_2', 'pl_2_data')],
+                            {'pl_1_data': {'shape': np.array([1, 3, 38, 38]), 'infer': None},
+                             'pl_2_data': {'shape': np.array([1, 6]), 'infer': None},
+                             'pl_1': {'shape': np.array([1,3,38,38])},
+                             'pl_2': {'shape': np.array([1,6])},
+                             },
+                            nodes_with_edges_only=True)
+        graph.graph['layout'] = 'NCHW'
+        add_mean_scale_values(graph,
+                              {'pl_1': {'mean': np.array([1., 2., 3.])}, 'pl_2': {'mean': np.array([0., 0., 0.])}})
+        mul_op_cnt = 0
+        add_op_cnt = 0
+        for node in graph.nodes():
+            node = Node(graph, node)
+            if node.has_valid('op') and node.op == 'Mul':
+                mul_op_cnt += 1
+            if node.has_valid('op') and node.op == 'Add':
+                add_op_cnt += 1
+
+        self.assertEqual(add_op_cnt, 1, "Found more than one Add op in graph")
+        self.assertEqual(mul_op_cnt, 0, "Found Mul op in graph")
+
+    def test_optimize_scale_and_add_mean_values(self):
+        graph = build_graph(
+            nodes_attributes,
+            [
+                ('pl_1', 'pl_1_data')
+            ],
+            {
+                'pl_1_data': {
+                    'shape': np.array([1, 3, 38, 38]),
+                    'infer': None
+                },
+                'pl_1': {
+                    'shape': np.array([1,3,38,38])
+                }
+            },
+            nodes_with_edges_only=True
+        )
+        graph.graph['layout'] = 'NCHW'
+        add_mean_scale_values(graph,
+                              {
+                                  'pl_1': {
+                                      'scale': np.array([1.]),
+                                      'mean': np.array([1., 2., 3.])
+                                  }
+                              })
+        mul_op_cnt = 0
+        add_op_cnt = 0
+        for node in graph.nodes():
+            node = Node(graph, node)
+            if node.has_valid('op') and node.op == 'Mul':
+                mul_op_cnt += 1
+            if node.has_valid('op') and node.op == 'Add':
+                add_op_cnt += 1
+
+        self.assertEqual(add_op_cnt, 1, "Found more than one Add op in graph")
+        self.assertEqual(mul_op_cnt, 0, "Found Mul op in graph")
+
+    def test_optimize_mean_and_add_scale_values(self):
+        graph = build_graph(
+            nodes_attributes,
+            [
+                ('pl_1', 'pl_1_data')
+            ],
+            {
+                'pl_1_data': {
+                    'shape': np.array([1, 3, 38, 38]),
+                    'infer': None
+                },
+                'pl_1': {
+                    'shape': np.array([1,3,38,38])
+                }
+            },
+            nodes_with_edges_only=True
+        )
+        graph.graph['layout'] = 'NCHW'
+        add_mean_scale_values(graph,
+                              {
+                                  'pl_1': {
+                                      'scale': np.array([1.43]),
+                                      'mean': np.array([0., 0., 0.])
+                                  }
+                              })
+        mul_op_cnt = 0
+        add_op_cnt = 0
+        for node in graph.nodes():
+            node = Node(graph, node)
+            if node.has_valid('op') and node.op == 'Mul':
+                mul_op_cnt += 1
+            if node.has_valid('op') and node.op == 'Add':
+                add_op_cnt += 1
+
+        self.assertEqual(add_op_cnt, 0, "Found more than one Add op in graph")
+        self.assertEqual(mul_op_cnt, 1, "Found Mul op in graph")
+
+    def test_add_mean_scale_values3(self):
+        graph = build_graph(nodes_attributes,
+                            [('pl_1', 'pl_1_data')],
+                            {'pl_1_data': {'shape': np.array([1, 3, 38, 38]), 'infer': None},
+                             'pl_1': {'shape': np.array([1,3,38,38])},
+                             },
+                            nodes_with_edges_only=True)
+        graph.graph['layout'] = 'NCHW'
+        add_mean_scale_values(graph, [[np.array([1., 2., 3.]), np.array([1., 2., 3.])]])
+
+        mul_op_cnt = 0
+        add_op_cnt = 0
+        for node in graph.nodes():
+            node = Node(graph, node)
+            if node.has_valid('op') and node.op == 'Mul':
+                mul_op_cnt += 1
+            if node.has_valid('op') and node.op == 'Add':
+                add_op_cnt += 1
+
+        self.assertEqual(add_op_cnt, 1, "Found more than one Add op in graph")
+        self.assertEqual(mul_op_cnt, 1, "Found more than one Nul op in graph")
+
+    def test_add_mean_scale_values_cut_graph(self):
+        """
+        Test case when user cutted start of the network and specified mean/scale value to the new input node 'node_3'.
+        """
+        graph = build_graph(nodes_attributes,
+                            [('pl_1', 'pl_1_data'),
+                             ('pl_2', 'pl_2_data'),
+                             ('pl_2_data', 'node_3'),
+                             ('node_3', 'node_3_data'),
+                             ('pl_1_data', 'node_1'),
+                             ('node_3_data', 'node_1'),
+                             ],
+                            {'pl_1_data': {'shape': np.array([1, 3, 38, 38]), 'infer': None},
+                             'pl_2_data': {'shape': np.array([1, 3, 38, 38]), 'infer': None},
+                             'pl_2': {'initial_node_name': 'node_3', 'shape': np.array([1,3,38,38])},
+                             'pl_1': {'shape': np.array([1,3,38,38])},
+                             },
+                            nodes_with_edges_only=True)
+        graph.graph['layout'] = 'NCHW'
+        add_mean_scale_values(graph, {'pl_1': {'mean': np.array([1, 2, 3])}, 'node_3': {'scale': np.array([1, 2, 3])}})
+
+        mul_op_cnt = 0
+        add_op_cnt = 0
+        for node in graph.nodes():
+            node = Node(graph, node)
+            if node.has_valid('op') and node.op == 'Mul':
+                mul_op_cnt += 1
+            if node.has_valid('op') and node.op == 'Add':
+                add_op_cnt += 1
+
+        self.assertEqual(add_op_cnt, 1, "There should be exactly one Add op")
+        self.assertEqual(mul_op_cnt, 1, "There should be exactly one Mul op")
+        self.assertEqual(Node(graph, 'pl_2').out_node().out_node().op, 'Mul', "The Mul op should be added after pl_2")
+        self.assertEqual(Node(graph, 'pl_1').out_node().out_node().op, 'Add', "The Add op should be added after pl_1")
+
+
+class ScaleInputTests(unittest.TestCase):
+    def test_scale_input_1(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data')],
+                            {'placeholder_1_data': {'is_output': True},
+                             'placeholder_1': {'shape': np.array([1, 3, 224, 224])}
+                            },
+                            nodes_with_edges_only=True)
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'mul_1_data'),
+                                 ('mul_1_data', 'mul_1'),
+                                 ('mul_1_w', 'mul_1'),
+                                 ('mul_1', 'placeholder_1_data')],
+                                {'mul_1_w': {'shape': np.array([1, 1, 1]), 'value': np.array([1 / 255])},
+                                 'placeholder_1_data': {'is_output': True}},
+                                nodes_with_edges_only=True)
+        graph.graph['layout'] = 'NCHW'
+        scale_input(graph, 255)
+        (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1_data')
+        self.assertTrue(flag, resp)
+
+    def test_scale_input_2(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data')],
+                            {'placeholder_1_data': {'is_output': True}},
+                            nodes_with_edges_only=True)
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data')],
+                                {'placeholder_1_data': {'is_output': True}},
+                                nodes_with_edges_only=True)
+
+        scale_input(graph, 1)
+        (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1_data')
+        self.assertTrue(flag, resp)
+
+    def test_check_for_cycle1(self):
+        # cyclic case
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'node_1_data'),
+                             ('node_1_data', 'node_3'),
+                             ('node_3', 'node_3_data'),
+                             ('node_3_data', 'node_1')],
+                            nodes_with_edges_only=True)
+        with self.assertRaisesRegex(Error, 'Graph contains a cycle. Can not proceed.*'):
+            check_for_cycle(graph)
+
+    def test_check_for_cycle2(self):
+        # acyclic case
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'node_1_data'),
+                             ('node_1_data', 'node_3'),
+                             ('node_3', 'node_3_data'),
+                             ('node_3_data', 'mul_1'),
+                             ('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data')
+                             ],
+                            nodes_with_edges_only=True)
+        try:
+            check_for_cycle(graph)
+        except Error:
+            self.fail("Unexpected Error raised")
+
+    def test_is_not_fully_inferred_param(self):
+        # Node that have is_not_fully_inferred=True
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'concat'),
+                             ('node_2', 'concat'),
+                             ('concat', 'node_3')],
+                            {'node_3': {'kind': 'data', 'is_output': True, 'shape': None, 'infer': None},
+                             'node_1': {'kind': 'data', 'shape': np.array([1, 3, 227, 227]), 'infer': None},
+                             'node_2': {'kind': 'data', 'shape': np.array([1, 3, 227, 227]), 'infer': None},
+                             'concat': {'kind': 'op', 'axis': 2, 'infer': concat_infer, 'is_not_fully_inferred': True}
+                             },
+                            nodes_with_edges_only=True)
+
+        start_node = 'concat'
+        try:
+            partial_infer(graph, start_node)
+        except Error:
+            self.fail("Unexpected Error raised")
+        node = Node(graph, start_node)
+        self.assertTrue(node.is_partial_inferred)
+        self.assertTrue(node.out_node().is_partial_inferred)
+
+    def test_for_is_cyclic1(self):
+        # Test for case of cyclic graph without is_cyclic attrs
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'node_1_data'),
+                             ('node_1_data', 'node_3'),
+                             ('node_3', 'node_3_data'),
+                             ('node_3_data', 'node_1')],
+                            nodes_with_edges_only=True)
+        with self.assertRaisesRegex(Error, 'Graph contains a cycle. Can not proceed.*'):
+            partial_infer(graph)
diff --git a/model-optimizer/mo/middle/passes/mean_scale_values_test.py b/model-optimizer/mo/middle/passes/mean_scale_values_test.py
new file mode 100644 (file)
index 0000000..9bc7b6b
--- /dev/null
@@ -0,0 +1,157 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.middle.passes.mean_scale_values import move_scaleshift_to_preprocess
+from mo.utils.unittest.graph import build_graph, compare_graphs
+
+nodes_attributes = {'node_1': {'type': 'Identity', 'value': None, 'kind': 'op'},
+                    'node_2': {'type': 'Identity', 'value': None, 'kind': 'op'},
+                    'concat': {'type': 'Concat', 'value': None, 'kind': 'op'},
+                    'node_3': {'type': 'Identity', 'value': None, 'kind': 'op'},
+                    # Placeholders
+                    'placeholder_1': {'value': None, 'shape': None, 'type': 'Input', 'kind': 'op', 'op': 'Placeholder'},
+                    'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+                    'placeholder_2': {'value': None, 'shape': None, 'type': 'Input', 'kind': 'op', 'op': 'Placeholder'},
+                    'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+                    # ScaleShift layer
+                    'scaleshift_1': {'type': 'ScaleShift', 'value': None, 'kind': 'op', 'op': 'ScaleShift'},
+                    'scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'data'},
+                    'scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'data'},
+                    'scaleshift_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+                    }
+
+
+class TestScaleShift_To_Preprocess(unittest.TestCase):
+    def test_move_scaleshift_to_preprocess_1(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'scaleshift_1'),
+                             ('scaleshift_1', 'scaleshift_1_data'),
+                             ('scaleshift_1_w', 'scaleshift_1'),
+                             ('scaleshift_1_b', 'scaleshift_1')],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'scaleshift_1_w': {'shape': np.array([3]), 'value': np.ones(3)},
+                             'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([-1, -2, -3])},
+                             'scaleshift_1_data': {'is_output': True}
+                             })
+
+        del graph['placeholder_1']['placeholder_1_data'][0]['in']
+        del graph['scaleshift_1']['scaleshift_1_data'][0]['in']
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'scaleshift_1_data')],
+                                {'scaleshift_1_data': {'is_output': True}})
+
+        move_scaleshift_to_preprocess(graph)
+        self.assertTrue(graph.graph['mean_values'] is not None)
+        self.assertTrue(np.array_equal(graph.graph['mean_values']['placeholder_1'], np.array([1, 2, 3])))
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'scaleshift_1_data')
+        self.assertTrue(flag, resp)
+
+    def test_move_scaleshift_to_preprocess_2(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'scaleshift_1'),
+                             ('scaleshift_1', 'scaleshift_1_data'),
+                             ('scaleshift_1_w', 'scaleshift_1'),
+                             ('scaleshift_1_b', 'scaleshift_1')],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array((1, 2, 3))},
+                             'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([-1, -2, -3])},
+                             'scaleshift_1_data': {'is_output': True}
+                             })
+
+        del graph['placeholder_1']['placeholder_1_data'][0]['in']
+        del graph['scaleshift_1']['scaleshift_1_data'][0]['in']
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'scaleshift_1'),
+                                 ('scaleshift_1', 'scaleshift_1_data'),
+                                 ('scaleshift_1_w', 'scaleshift_1'),
+                                 ('scaleshift_1_b', 'scaleshift_1')],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3]), 'is_output': True},
+                                 'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array((1, 2, 3))},
+                                 'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([-1, -2, -3])},
+                                 'scaleshift_1_data': {'is_output': True}
+                                 })
+
+        move_scaleshift_to_preprocess(graph)
+        self.assertTrue(graph.graph.get('mean_values', None) is None)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'scaleshift_1_data')
+        self.assertTrue(flag, resp)
+
+    def test_move_scaleshift_to_preprocess_3(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'scaleshift_1'),
+                             ('scaleshift_1', 'scaleshift_1_data'),
+                             ('scaleshift_1_w', 'scaleshift_1'), ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array((1, 2, 3))},
+                             'scaleshift_1_data': {'is_output': True}
+                             })
+
+        del graph['placeholder_1']['placeholder_1_data'][0]['in']
+        del graph['scaleshift_1']['scaleshift_1_data'][0]['in']
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'scaleshift_1'),
+                                 ('scaleshift_1', 'scaleshift_1_data'),
+                                 ('scaleshift_1_w', 'scaleshift_1')],
+                                {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3]), 'is_output': True},
+                                 'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array((1, 2, 3))},
+                                 'scaleshift_1_data': {'is_output': True}
+                                 })
+
+        move_scaleshift_to_preprocess(graph)
+        self.assertTrue(graph.graph.get('mean_values', None) == None)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'scaleshift_1_data')
+        self.assertTrue(flag, resp)
+
+    def test_move_scaleshift_to_preprocess_4(self):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'scaleshift_1'),
+                             ('scaleshift_1', 'scaleshift_1_data'),
+                             ('scaleshift_1_w', 'scaleshift_1'),
+                             ('scaleshift_1_b', 'scaleshift_1')],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'scaleshift_1_w': {'shape': np.array([3]), 'value': np.ones(3)},
+                             'scaleshift_1_b': {'shape': np.array([3]), 'value': np.zeros(3)},
+                             'scaleshift_1_data': {'is_output': True}
+                             })
+
+        del graph['placeholder_1']['placeholder_1_data'][0]['in']
+        del graph['scaleshift_1']['scaleshift_1_data'][0]['in']
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'scaleshift_1_data')],
+                                {'scaleshift_1_data': {'is_output': True}})
+
+        move_scaleshift_to_preprocess(graph)
+        self.assertTrue(graph.graph.get('mean_values', None) is None)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'scaleshift_1_data')
+        self.assertTrue(flag, resp)
diff --git a/model-optimizer/mo/middle/passes/pool_test.py b/model-optimizer/mo/middle/passes/pool_test.py
new file mode 100644 (file)
index 0000000..1473f1e
--- /dev/null
@@ -0,0 +1,96 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.middle.passes.eliminate import graph_clean_up
+from mo.middle.passes.pool import mean_to_avgpool
+from mo.utils.unittest.graph import build_graph, compare_graphs
+
+nodes_attributes = {
+    'placeholder_1': {'shape': None, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'},
+    'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
+    # Mean layer
+    'mean_1': {'type': 'Pooling', 'kind': 'op', 'op': 'Mean', 'keep_dims': True},
+    'mean_axis': {'value': None, 'shape': None, 'kind': 'data'},
+    'mean_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # AvgPool layer
+    'pool_1': {'type': 'Pooling', 'kind': 'op', 'op': 'Power', 'scale': None, 'shift': None, 'power': None},
+    'pool_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # Reshape layer
+    'reshape_1': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},
+    'reshape_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+}
+
+
+class MeanToAvgPoolTests(unittest.TestCase):
+    def _create_graph_with_mean(self, axis, keep_dims=True, mean_out_shape=np.array([1, 227, 227, 3])):
+        graph = build_graph(nodes_attributes,
+                            [('placeholder_1', 'placeholder_1_data'),
+                             ('placeholder_1_data', 'mean_1'),
+                             ('mean_1', 'mean_1_data'),
+                             ('mean_axis', 'mean_1'),
+                             ],
+                            {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
+                             'mean_1': {'shape': np.array([1, 227, 227, 3]), 'keep_dims': keep_dims},
+                             'mean_axis': {'shape': np.array(axis.shape) if axis is not None else None,
+                                           'value': np.array(axis) if axis is not None else None},
+                             'mean_1_data': {'shape': mean_out_shape, 'is_output': True},
+                             })
+        del graph['mean_1']['mean_1_data'][0]['in']
+        return graph
+
+    def test_mean_to_avg_1(self):
+        graph = self._create_graph_with_mean(axis=np.array([1, 2]))
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'pool_1'),
+                                 ('pool_1', 'pool_1_data'),
+                                 ],
+                                {'pool_1': {'pool_method': 'avg', 'rounding_type': 'ceil', 'exclude_pad': 'true',
+                                            'op': 'AvgPool', 'shape': np.array([1, 227, 227, 3])},
+                                 'pool_1_data': {'is_output': True, 'shape': np.array([1, 227, 227, 3])}
+                                 })
+
+        mean_to_avgpool(graph)
+        graph_clean_up(graph)
+        (flag, resp) = compare_graphs(graph, graph_ref, 'mean_1_data', 'pool_1_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
+
+    def test_mean_to_avg_2(self):
+        graph = self._create_graph_with_mean(axis=np.array([0]), keep_dims=False,
+                                             mean_out_shape=np.array([227, 227, 3]))
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('placeholder_1', 'placeholder_1_data'),
+                                 ('placeholder_1_data', 'pool_1'),
+                                 ('pool_1', 'pool_1_data'),
+                                 ('pool_1_data', 'reshape_1'),
+                                 ('reshape_1', 'reshape_1_data')
+                                 ],
+                                {'pool_1': {'pool_method': 'avg', 'rounding_type': 'ceil', 'exclude_pad': 'true',
+                                            'op': 'AvgPool', 'shape': np.array([1, 227, 227, 3])},
+                                 'pool_1_data': {'shape': np.array([1, 227, 227, 3])},
+                                 'reshape_1_data': {'is_output': True, 'shape': np.array([227, 227, 3])},
+                                 })
+
+        mean_to_avgpool(graph)
+        graph_clean_up(graph)
+        (flag, resp) = compare_graphs(graph, graph_ref, 'mean_1_data', 'reshape_1_data', check_op_attrs=True)
+        self.assertTrue(flag, resp)
diff --git a/model-optimizer/mo/middle/passes/shared_weights_duplication_test.py b/model-optimizer/mo/middle/passes/shared_weights_duplication_test.py
new file mode 100644 (file)
index 0000000..ef48276
--- /dev/null
@@ -0,0 +1,77 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.middle.passes.shared_weights_duplication import duplicate_shared_weights
+from mo.utils.unittest.graph import build_graph, compare_graphs
+
+nodes_attributes = {
+    # Mul and Add operations
+    'mul_1': {'type': None, 'kind': 'op', 'op': 'Mul'},
+    'mul_1_w': {'value': None, 'shape': None, 'kind': 'data'},
+    'mul_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+    'mul_2': {'type': None, 'kind': 'op', 'op': 'Mul'},
+    'mul_2_w': {'value': None, 'shape': None, 'kind': 'data'},
+    'mul_2_data': {'value': None, 'shape': None, 'kind': 'data'},
+    'mul_3': {'type': None, 'kind': 'op', 'op': 'Mul'},
+    'mul_3_w': {'value': None, 'shape': None, 'kind': 'data'},
+    'mul_3_data': {'value': None, 'shape': None, 'kind': 'data'},
+    # Concat1 operation
+    'concat_1': {'type': 'Concat', 'kind': 'op', 'op': 'Concat'},
+    'concat_1_data': {'value': None, 'shape': None, 'kind': 'data'},
+}
+
+
+class DuplicateSharedWeightsTests(unittest.TestCase):
+    def test_duplicate_shared_weights_1(self):
+        graph = build_graph(nodes_attributes,
+                            [('mul_1_w', 'mul_1'),
+                             ('mul_1', 'mul_1_data'),
+                             ('mul_1_w', 'mul_2'),
+                             ('mul_2', 'mul_2_data'),
+                             ('mul_1_w', 'mul_3'),
+                             ('mul_3', 'mul_3_data'),
+                             ('mul_1_data', 'concat_1'),
+                             ('mul_2_data', 'concat_1'),
+                             ('mul_3_data', 'concat_1'),
+                             ('concat_1', 'concat_1_data')
+                             ],
+                            {'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}})
+
+        graph_ref = build_graph(nodes_attributes,
+                                [('mul_1_w', 'mul_1'),
+                                 ('mul_1', 'mul_1_data'),
+                                 ('mul_2_w', 'mul_2'),
+                                 ('mul_2', 'mul_2_data'),
+                                 ('mul_3_w', 'mul_3'),
+                                 ('mul_3', 'mul_3_data'),
+                                 ('mul_1_data', 'concat_1'),
+                                 ('mul_2_data', 'concat_1'),
+                                 ('mul_3_data', 'concat_1'),
+                                 ('concat_1', 'concat_1_data')
+                                 ],
+                                {'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                                 'mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                                 'mul_3_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
+                                 })
+
+        duplicate_shared_weights(graph)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data')
+        self.assertTrue(flag, resp)
diff --git a/model-optimizer/mo/ops/activation_test.py b/model-optimizer/mo/ops/activation_test.py
new file mode 100644 (file)
index 0000000..b289b96
--- /dev/null
@@ -0,0 +1,111 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.graph.graph import Node
+from mo.ops.activation import Activation
+from mo.utils.unittest.graph import build_graph
+
+
+class TestActivationOp(unittest.TestCase):
+    nodes_attributes = {
+        'node_1': {
+            'shape': np.array([227, 227, 227, 227]),
+            'value': None
+        },
+        'activation_node': {
+            'op': 'Activation',
+            'kind': 'op'
+        },
+        'node_3': {
+            'shape': None
+        }
+    }
+
+    def test_assertion_activation_infer(self):
+        graph = build_graph(self.nodes_attributes,
+                            [
+                                ('node_1', 'activation_node'),
+                                ('activation_node', 'node_3')
+                            ],
+                            {
+                                'activation_node': {'operation': 'test'}
+                            })
+        activation_node = Node(graph, 'activation_node')
+        self.assertEqual(activation_node.op, 'Activation')
+        self.assertRaises(KeyError, Activation.infer, activation_node)
+
+    def test_activation_infer(self):
+        graph = build_graph(self.nodes_attributes,
+                            [
+                                ('node_1', 'activation_node'),
+                                ('activation_node', 'node_3')
+                            ],
+                            {
+                                'node_1': {
+                                    'value': np.array([0, 7, 3, -1])
+                                },
+                                'activation_node': {
+                                    'operation': 'relu6'
+                                },
+                                'node_3': {
+                                    'value': None
+                                }
+                            })
+        graph.graph['layout'] = 'NCHW'
+        activation_node = Node(graph, 'activation_node')
+        Activation.infer(activation_node)
+        exp_shape = np.array([227, 227, 227, 227])
+        res_shape = graph.node['node_3']['shape']
+        res_value = graph.node['node_3']['value']
+        exp_value = np.array([0, 6, 3, 0])
+        for i, value in enumerate(exp_shape):
+            self.assertEqual(res_shape[i], value)
+        for i, value in enumerate(exp_value):
+            self.assertEqual(res_value[i], value)
+
+    def test_activation_elu_infer(self):
+        graph = build_graph(self.nodes_attributes,
+                            [
+                                ('node_1', 'activation_node'),
+                                ('activation_node', 'node_3')
+                            ],
+                            {
+                                'node_1': {
+                                    'value': np.array([6, -4, -2, -1])
+                                },
+                                'activation_node': {
+                                    'operation': 'elu',
+                                    'alpha': 1.0,
+                                },
+                                'node_3': {
+                                    'value': None
+                                }
+                            })
+        graph.graph['layout'] = 'NCHW'
+        activation_node = Node(graph, 'activation_node')
+        Activation.infer(activation_node)
+        exp_shape = np.array([227, 227, 227, 227])
+        res_shape = graph.node['node_3']['shape']
+        res_value = graph.node['node_3']['value']
+        exp_value = np.array([6., -0.98168436, -0.86466472, -0.63212056])
+        for i, value in enumerate(exp_shape):
+            self.assertEqual(res_shape[i], value)
+        for i, value in enumerate(exp_value):
+            self.assertAlmostEqual(res_value[i], value)
diff --git a/model-optimizer/mo/ops/clamp_test.py b/model-optimizer/mo/ops/clamp_test.py
new file mode 100644 (file)
index 0000000..66e38e2
--- /dev/null
@@ -0,0 +1,47 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.front.common.partial_infer.elemental import copy_shape_infer
+from mo.ops.clamp import Clamp
+from mo.utils.unittest.graph import build_graph
+
+
+class TestClampOp(unittest.TestCase):
+    nodes_attributes = {
+        'node_1': {
+            'shape': np.array([227, 227, 227, 227])
+        },
+        'clamp_node': {
+        },
+        'node_3': {
+            'kind': 'data'
+        }
+    }
+
+    def test_clamp_op(self):
+        graph = build_graph(self.nodes_attributes,
+                            [
+                                ('node_1', 'clamp_node'),
+                                ('clamp_node', 'node_3')
+                            ])
+        clamp_node = Clamp(graph, self.nodes_attributes['clamp_node']).add_node()
+        self.assertEqual(clamp_node.type, 'Clamp')
+        self.assertEqual(clamp_node.op, 'Clamp')
+        self.assertEqual(clamp_node.infer, copy_shape_infer)
diff --git a/model-optimizer/mo/ops/concat_test.py b/model-optimizer/mo/ops/concat_test.py
new file mode 100644 (file)
index 0000000..7f39236
--- /dev/null
@@ -0,0 +1,47 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.front.common.partial_infer.concat import concat_infer
+from mo.ops.concat import Concat
+from mo.utils.unittest.graph import build_graph
+
+
+class TestConcatOp(unittest.TestCase):
+    nodes_attributes = {
+        'node_1': {
+            'shape': np.array([227, 227, 227, 227])
+        },
+        'concat_node': {
+        },
+        'node_3': {
+            'kind': 'data'
+        }
+    }
+
+    def test_concat_op(self):
+        graph = build_graph(self.nodes_attributes,
+                            [
+                                ('node_1', 'concat_node'),
+                                ('concat_node', 'node_3')
+                            ])
+        concat_node = Concat(graph, self.nodes_attributes['concat_node']).add_node()
+        self.assertEqual(concat_node.type, 'Concat')
+        self.assertEqual(concat_node.op, 'Concat')
+        self.assertEqual(concat_node.infer, concat_infer)
diff --git a/model-optimizer/mo/ops/convolution_test.py b/model-optimizer/mo/ops/convolution_test.py
new file mode 100644 (file)
index 0000000..6f009b5
--- /dev/null
@@ -0,0 +1,371 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.front.common.partial_infer.utils import int64_array
+from mo.graph.graph import Node
+from mo.ops.convolution import Convolution
+from mo.utils.unittest.extractors import FakeValue
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {'conv_input': {'value': None, 'kind': 'data'},
+                    'conv_node': {'type': 'Convolution', 'kind': 'op'},
+                    'conv_weights': {'value': FakeValue(None), 'kind': 'data'},
+                    'conv_output': {'value': None, 'kind': 'data'}
+                    }
+
+
+class TestConvolutionPartialInfer(unittest.TestCase):
+    def test_caffe_conv2d_infer(self):
+        graph = build_graph(nodes_attributes,
+                            [('conv_input', 'conv_node'),
+                             ('conv_weights', 'conv_node'),
+                             ('conv_node', 'conv_output')],
+                            {'conv_output': {'is_output': True, 'shape': None},
+                             'conv_input': {'shape': np.array([1, 3, 227, 227])},
+                             'conv_weights': {'shape': np.array([64, 3, 3, 3]),
+                                              'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
+                             'conv_node': {'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
+                                           'conv_pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]]),
+                                           'dilation': np.array([1, 1, 1, 1]), 'bias_addable': True, 'bias_term': False,
+                                           'output_spatial_shape': None, 'output_shape': None,
+                                           'stride': np.array([1, 1, 1, 1]), 'group': 1,
+                                           'kernel_spatial_idx': np.array([2, 3]),
+                                           'input_feature_channel': 1,
+                                           'output_feature_channel': 0,
+                                           'output': 64, 'kernel_spatial': np.array([3, 3]),
+                                           'spatial_dims': np.array([2, 3]), 'channel_dims': np.array([1]),
+                                           'batch_dims': np.array([0])}
+                             })
+
+        conv_node = Node(graph, 'conv_node')
+        Convolution.infer(conv_node)
+        exp_shape = np.array([1, 64, 225, 225])
+        res_shape = graph.node['conv_output']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_caffe_conv2d_infer_no_shape(self):
+        graph = build_graph(nodes_attributes,
+                            [('conv_input', 'conv_node'),
+                             ('conv_weights', 'conv_node'),
+                             ('conv_node', 'conv_output')],
+                            {'conv_output': {'is_output': True, 'shape': None},
+                             'conv_input': {'shape': None},
+                             'conv_weights': {'shape': None,
+                                              'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
+                             'conv_node': {'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
+                                           'conv_pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]]),
+                                           'dilation': np.array([1, 1, 1, 1]), 'bias_addable': True, 'bias_term': False,
+                                           'output_spatial_shape': None, 'output_shape': None,
+                                           'stride': np.array([1, 1, 1, 1]), 'group': 1,
+                                           'output': 64, 'kernel_spatial': np.array([3, 3]),
+                                           'spatial_dims': np.array([2, 3]), 'channel_dims': np.array([1]),
+                                           'batch_dims': np.array([0])}
+                             })
+
+        conv_node = Node(graph, 'conv_node')
+        Convolution.infer(conv_node)
+        res_shape = graph.node['conv_output']['shape']
+        self.assertIsNone(res_shape)
+
+    def test_deconv_infer_ideal(self):
+        graph = build_graph(nodes_attributes,
+                            [('conv_input', 'conv_node'),
+                             ('conv_weights', 'conv_node'),
+                             ('conv_node', 'conv_output')],
+                            {'conv_output': {'is_output': True, 'shape': None},
+                             'conv_input': {'shape': np.array([1, 21, 16, 16])},
+                             'conv_weights': {'shape': np.array([1, 21, 4, 4]),
+                                              'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
+                             'conv_node': {#'spatial_dims': np.array([2, 3]), 'batch_dims': np.array([0]),
+                                           'channel_dims': np.array([1]), 'bias_addable': True, 'bias_term': False,
+                                           'batch_dims': np.array([0]),
+                                           'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
+                                           'kernel_spatial': np.array([4, 4]), 'output_spatial_shape': None,
+                                           'kernel_spatial_idx': np.array([2, 3]),
+                                           'input_feature_channel': 1,
+                                           'output_feature_channel': 0,
+                                           'output_padding': np.array([0, 0, 1, 1]),
+                                           'type': 'Deconvolution', 'output': 21, 'dilation': np.array([1, 1, 1, 1]),
+                                           'group': 1, 'stride': np.array([1, 1, 2, 2]), 'output_shape': None}
+                             })
+
+        deconv_node = Node(graph, 'conv_node')
+
+        Convolution.infer(deconv_node)
+        res_shape = deconv_node['output_shape']
+        exp_shape = np.array([1, 21, 35, 35])
+
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+        # Check that after double infer shape and pad attrs do not changes
+        Convolution.infer(deconv_node)
+
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+
+    def test_deconv_infer_no_shape(self):
+        graph = build_graph(nodes_attributes,
+                            [('conv_input', 'conv_node'),
+                             ('conv_weights', 'conv_node'),
+                             ('conv_node', 'conv_output')],
+                            {'conv_output': {'is_output': True, 'shape': None},
+                             'conv_input': {'shape': None},
+                             'conv_weights': {'shape': np.array([1, 21, 16, 16]),
+                                              'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
+                             'conv_node': {'spatial_dims': np.array([2, 3]), 'batch_dims': np.array([0]),
+                                           'channel_dims': np.array([1]),
+                                           'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
+                                           'kernel_spatial': np.array([4, 4]), 'output_spatial_shape': None,
+                                           'kernel_spatial_idx': np.array([2, 3]),
+                                           'input_feature_channel': 1,
+                                           'output_feature_channel': 0,
+                                           'type': 'Deconvolution', 'output': 21, 'dilation': np.array([1, 1, 1, 1]),
+                                           'group': 1, 'stride': np.array([1, 1, 2, 2]), 'output_shape': None}
+                             })
+
+        deconv_node = Node(graph, 'conv_node')
+        Convolution.infer(deconv_node)
+        res_shape = deconv_node['output_shape']
+        self.assertIsNone(res_shape)
+
+    def test_conv_infer_set_default_attrs_nchw(self):
+        graph = build_graph(nodes_attributes,
+                            [
+                                ('conv_input', 'conv_node'),
+                                ('conv_weights', 'conv_node'),
+                                ('conv_node', 'conv_output')
+                            ],
+                            {
+                                'conv_output': {
+                                    'is_output': True,
+                                    'shape': None
+                                },
+                                'conv_input': {
+                                    'shape': int64_array([1, 3, 224, 224])
+                                },
+                                'conv_weights': {
+                                    'shape': int64_array([3, 64, 7, 7]),
+                                    'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']
+                                },
+                                'conv_node': {
+                                    'type': 'Convolution',
+                                    'bias_term': None,
+                                    'stride': None,
+                                    'dilation': None,
+
+                                    'batch_dims': int64_array([0]),
+                                    'channel_dims': int64_array([1]),
+
+                                    'output_spatial_shape': None,
+
+                                    'input_feature_channel': 0,
+                                    'output_feature_channel': 1,
+
+                                    'group': 1,
+                                    'output_shape': None,
+                                    'layout': 'NCHW'
+                                }
+                            })
+
+        conv_node = Node(graph, 'conv_node')
+        conv_output = Node(graph, 'conv_output')
+
+        Convolution.infer(conv_node)
+
+        # Check bias_term attribute
+        self.assertTrue(conv_node.has_valid('bias_term'))
+        self.assertTrue(not conv_node.bias_term)
+        # Check kernel_spatial_idx attr detection
+        self.assertTrue(conv_node.has_valid('kernel_spatial_idx'))
+        self.assertTrue(np.array_equal(int64_array([2, 3]), conv_node.kernel_spatial_idx))
+        # Check spatial_dims attr detection
+        self.assertTrue(conv_node.has_valid('spatial_dims'))
+        self.assertTrue(np.array_equal(int64_array([2, 3]), conv_node.spatial_dims))
+        # Check kernel_spatial attr detection
+        self.assertTrue(conv_node.has_valid('kernel_spatial'))
+        self.assertTrue(np.array_equal(int64_array([7, 7]), conv_node.kernel_spatial))
+        # Check output attribute
+        self.assertTrue(conv_node.has_valid('output'))
+        self.assertEqual(64, conv_node.output)
+        # Check dilation value. Should be set to default
+        self.assertTrue(conv_node.has_valid('dilation'))
+        self.assertTrue(np.array_equal(int64_array([1, 1, 1, 1]), conv_node.dilation))
+        # Check stride value. Should be set to default
+        self.assertTrue(conv_node.has_valid('stride'))
+        self.assertTrue(np.array_equal(int64_array([1, 1, 1, 1]), conv_node.stride))
+        # Check pad value. Should be set to default
+        self.assertTrue(conv_node.has_valid('pad'))
+        self.assertTrue(np.array_equal(int64_array([[0, 0], [0, 0], [0, 0], [0, 0]]), conv_node.pad))
+        # Check pad_spatial_shape
+        self.assertTrue(conv_node.has_valid('pad_spatial_shape'))
+        self.assertTrue(np.array_equal(int64_array([[0, 0], [0, 0]]), conv_node.pad_spatial_shape))
+        # Check resulting output shape
+        self.assertTrue(np.array_equal(int64_array([1, 64, 218, 218]), conv_output.shape))
+
+    def test_conv_infer_set_default_attrs_nhwc(self):
+        graph = build_graph(nodes_attributes,
+                            [
+                                ('conv_input', 'conv_node'),
+                                ('conv_weights', 'conv_node'),
+                                ('conv_node', 'conv_output')
+                            ],
+                            {
+                                'conv_output': {
+                                    'is_output': True,
+                                    'shape': None
+                                },
+                                'conv_input': {
+                                    'shape': int64_array([1, 224, 224, 3])
+                                },
+                                'conv_weights': {
+                                    'shape': int64_array([3, 64, 7, 7]),
+                                    'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']
+                                },
+                                'conv_node': {
+                                    'type': 'Convolution',
+                                    'bias_term': None,
+                                    'stride': None,
+                                    'dilation': None,
+
+                                    'batch_dims': int64_array([0]),
+                                    'channel_dims': int64_array([3]),
+
+                                    'output_spatial_shape': None,
+
+                                    'input_feature_channel': 0,
+                                    'output_feature_channel': 1,
+
+                                    'group': 1,
+                                    'output_shape': None,
+                                    'layout': 'NHWC'
+                                }
+                            })
+
+        conv_node = Node(graph, 'conv_node')
+        conv_output = Node(graph, 'conv_output')
+
+        Convolution.infer(conv_node)
+
+        # Check bias_term attribute
+        self.assertTrue(conv_node.has_valid('bias_term'))
+        self.assertTrue(not conv_node.bias_term)
+        # Check kernel_spatial_idx attr detection
+        self.assertTrue(conv_node.has_valid('kernel_spatial_idx'))
+        self.assertTrue(np.array_equal(int64_array([2, 3]), conv_node.kernel_spatial_idx))
+        # Check spatial_dims attr detection
+        self.assertTrue(conv_node.has_valid('spatial_dims'))
+        self.assertTrue(np.array_equal(int64_array([1, 2]), conv_node.spatial_dims))
+        # Check kernel_spatial attr detection
+        self.assertTrue(conv_node.has_valid('kernel_spatial'))
+        self.assertTrue(np.array_equal(int64_array([7, 7]), conv_node.kernel_spatial))
+        # Check output attribute
+        self.assertTrue(conv_node.has_valid('output'))
+        self.assertEqual(64, conv_node.output)
+        # Check dilation value. Should be set to default
+        self.assertTrue(conv_node.has_valid('dilation'))
+        self.assertTrue(np.array_equal(int64_array([1, 1, 1, 1]), conv_node.dilation))
+        # Check stride value. Should be set to default
+        self.assertTrue(conv_node.has_valid('stride'))
+        self.assertTrue(np.array_equal(int64_array([1, 1, 1, 1]), conv_node.stride))
+        # Check pad value. Should be set to default
+        self.assertTrue(conv_node.has_valid('pad'))
+        self.assertTrue(np.array_equal(int64_array([[0, 0], [0, 0], [0, 0], [0, 0]]), conv_node.pad))
+        # Check pad_spatial_shape
+        self.assertTrue(conv_node.has_valid('pad_spatial_shape'))
+        self.assertTrue(np.array_equal(int64_array([[0, 0], [0, 0]]), conv_node.pad_spatial_shape))
+        # Check resulting output shape
+        self.assertTrue(np.array_equal(int64_array([1, 218, 218, 64]), conv_output.shape))
+
+    def test_conv_infer_3D_convolution(self):
+        graph = build_graph(nodes_attributes,
+                            [
+                                ('conv_input', 'conv_node'),
+                                ('conv_weights', 'conv_node'),
+                                ('conv_node', 'conv_output')
+                            ],
+                            {
+                                'conv_output': {
+                                    'is_output': True,
+                                    'shape': None
+                                },
+                                'conv_input': {
+                                    'shape': int64_array([1, 3, 16, 224, 224])
+                                },
+                                'conv_weights': {
+                                    'shape': int64_array([3, 64, 1, 7, 7]),
+                                    'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']
+                                },
+                                'conv_node': {
+                                    'type': 'Convolution',
+                                    'bias_term': None,
+                                    'stride': None,
+                                    'dilation': None,
+
+                                    'batch_dims': int64_array([0]),
+                                    'channel_dims': int64_array([1]),
+
+                                    'output_spatial_shape': None,
+
+                                    'input_feature_channel': 0,
+                                    'output_feature_channel': 1,
+
+                                    'group': 1,
+                                    'output_shape': None,
+                                    'layout': 'NCHW'
+                                }
+                            })
+
+        conv_node = Node(graph, 'conv_node')
+        conv_output = Node(graph, 'conv_output')
+
+        Convolution.infer(conv_node)
+
+        # Check bias_term attribute
+        self.assertTrue(conv_node.has_valid('bias_term'))
+        self.assertTrue(not conv_node.bias_term)
+        # Check kernel_spatial_idx attr detection
+        self.assertTrue(conv_node.has_valid('kernel_spatial_idx'))
+        self.assertTrue(np.array_equal(int64_array([2, 3, 4]), conv_node.kernel_spatial_idx))
+        # Check spatial_dims attr detection
+        self.assertTrue(conv_node.has_valid('spatial_dims'))
+        self.assertTrue(np.array_equal(int64_array([2, 3, 4]), conv_node.spatial_dims))
+        # Check kernel_spatial attr detection
+        self.assertTrue(conv_node.has_valid('kernel_spatial'))
+        self.assertTrue(np.array_equal(int64_array([1, 7, 7]), conv_node.kernel_spatial))
+        # Check output attribute
+        self.assertTrue(conv_node.has_valid('output'))
+        self.assertEqual(64, conv_node.output)
+        # Check dilation value. Should be set to default
+        self.assertTrue(conv_node.has_valid('dilation'))
+        self.assertTrue(np.array_equal(int64_array([1, 1, 1, 1, 1]), conv_node.dilation))
+        # Check stride value. Should be set to default
+        self.assertTrue(conv_node.has_valid('stride'))
+        self.assertTrue(np.array_equal(int64_array([1, 1, 1, 1, 1]), conv_node.stride))
+        # Check pad value. Should be set to default
+        self.assertTrue(conv_node.has_valid('pad'))
+        self.assertTrue(np.array_equal(int64_array([[0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]), conv_node.pad))
+        # Check pad_spatial_shape
+        self.assertTrue(conv_node.has_valid('pad_spatial_shape'))
+        self.assertTrue(np.array_equal(int64_array([[0, 0], [0, 0], [0, 0]]), conv_node.pad_spatial_shape))
+        # Check resulting output shape
+        self.assertTrue(np.array_equal(int64_array([1, 64, 16, 218, 218]), conv_output.shape))
diff --git a/model-optimizer/mo/ops/crop_test.py b/model-optimizer/mo/ops/crop_test.py
new file mode 100644 (file)
index 0000000..9eb5412
--- /dev/null
@@ -0,0 +1,187 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.front.common.partial_infer.utils import int64_array
+from mo.graph.graph import Node
+from mo.ops.crop import Crop
+from mo.utils.unittest.graph import build_graph
+
+
+class TestCropPartialInfer(unittest.TestCase):
+    @staticmethod
+    def _create_graph_type1():
+        nodes_attributes = {'crop_input': {'shape': None, 'value': None, 'kind': 'data'},
+                            'crop_node': {'type': 'Crop', 'kind': 'op'},
+                            'crop_output': {'shape': None, 'value': None, 'kind': 'data'}
+                            }
+        return build_graph(nodes_attributes,
+                           [
+                               ('crop_input', 'crop_node'), ('crop_node', 'crop_output')
+                           ],
+                           {
+                               'crop_input': {'shape': int64_array([1, 3, 224, 224])},
+                               'crop_node': {'axis': int64_array([2, 3]),
+                                             'crop_begin': int64_array([10, 15]),
+                                             'crop_end': int64_array([10, 15])
+                                             },
+                           })
+
+    @staticmethod
+    def _create_graph_type2():
+        nodes_attributes = {'crop_input': {'shape': None, 'value': None, 'kind': 'data'},
+                            'crop_node': {'type': 'Crop', 'kind': 'op'},
+                            'crop_output': {'shape': None, 'value': None, 'kind': 'data'}
+                            }
+        return build_graph(nodes_attributes,
+                           [
+                               ('crop_input', 'crop_node'), ('crop_node', 'crop_output')
+                           ],
+                           {
+                               'crop_input': {'shape': int64_array([1, 3, 224, 224])},
+                               'crop_node': {'axis': int64_array([2, 3]), 'dim': int64_array([100, 150])},
+                           })
+
+    @staticmethod
+    def _create_graph_type3():
+        nodes_attributes = {'crop_input': {'shape': None, 'value': None, 'kind': 'data'},
+                            'crop_input2': {'shape': None, 'value': None, 'kind': 'data'},
+                            'crop_node': {'type': 'Crop', 'kind': 'op'},
+                            'crop_output': {'shape': None, 'value': None, 'kind': 'data'}
+                            }
+        return build_graph(nodes_attributes,
+                           [
+                               ('crop_input', 'crop_node'), ('crop_input2', 'crop_node'), ('crop_node', 'crop_output')
+                           ],
+                           {
+                               'crop_input': {'shape': int64_array([1, 3, 224, 224])},
+                               'crop_input2': {'shape': int64_array([1, 3, 100, 150])},
+                               'crop_node': {'axis': 2, 'offset': int64_array([10, 15])},
+                           })
+
+    def test_crop_type1_infer(self):
+        graph = self._create_graph_type1()
+
+        crop_node = Node(graph, 'crop_node')
+        Crop.infer(crop_node)
+
+        exp_shape = int64_array([1, 3, 204, 194])
+        res_shape = graph.node['crop_output']['shape']
+
+        self.assertTrue(np.array_equal(exp_shape, res_shape),
+                        'shapes do not match expected: {} and given: {}'.format(exp_shape, res_shape))
+
+    def test_crop_type1_infer_neg1(self):
+        graph = self._create_graph_type1()
+
+        crop_node = Node(graph, 'crop_node')
+        crop_node['axis'] = None
+
+        Crop.infer(crop_node)
+        self.assertIsNone(crop_node.out_node().shape)
+
+    def test_crop_type1_infer_neg2(self):
+        graph = self._create_graph_type1()
+
+        crop_node = Node(graph, 'crop_node')
+        crop_node['crop_begin'] = int64_array([1, 2, 3])
+
+        Crop.infer(crop_node)
+        self.assertIsNone(crop_node.out_node().shape)
+
+    def test_crop_type2_infer(self):
+        graph = self._create_graph_type2()
+
+        crop_node = Node(graph, 'crop_node')
+        Crop.infer(crop_node)
+
+        exp_shape = int64_array([1, 3, 100, 150])
+        res_shape = graph.node['crop_output']['shape']
+
+        self.assertTrue(np.array_equal(exp_shape, res_shape),
+                        'shapes do not match expected: {} and given: {}'.format(exp_shape, res_shape))
+
+    def test_crop_type2_infer_neg1(self):
+        graph = self._create_graph_type2()
+
+        crop_node = Node(graph, 'crop_node')
+        crop_node['dim'] = int64_array([1, 2, 3])
+
+        Crop.infer(crop_node)
+        self.assertIsNone(crop_node.out_node().shape)
+
+    def test_crop_type2_infer_neg2(self):
+        graph = self._create_graph_type2()
+
+        crop_node = Node(graph, 'crop_node')
+        crop_node['dim'] = None
+        crop_node['crop_begin'] = None
+
+        Crop.infer(crop_node)
+        self.assertIsNone(crop_node.out_node().shape)
+
+    def test_crop_type3_infer(self):
+        graph = self._create_graph_type3()
+
+        crop_node = Node(graph, 'crop_node')
+        Crop.infer(crop_node)
+
+        exp_shape = int64_array([1, 3, 100, 150])
+        res_shape = graph.node['crop_output']['shape']
+
+        self.assertTrue(np.array_equal(exp_shape, res_shape),
+                        'shapes do not match expected: {} and given: {}'.format(exp_shape, res_shape))
+
+    def test_crop_type3_infer_neg1(self):
+        graph = self._create_graph_type3()
+
+        crop_node = Node(graph, 'crop_node')
+        crop_input2 = Node(graph, 'crop_input2')
+        crop_input2.shape = None
+
+        Crop.infer(crop_node)
+        self.assertIsNone(crop_node.out_node().shape)
+
+    def test_crop_type3_infer_neg2(self):
+        graph = self._create_graph_type3()
+
+        crop_node = Node(graph, 'crop_node')
+        crop_node['axis'] = None
+
+        Crop.infer(crop_node)
+        self.assertIsNone(crop_node.out_node().shape)
+
+    def test_crop_type3_infer_neg3(self):
+        graph = self._create_graph_type3()
+
+        crop_node = Node(graph, 'crop_node')
+        crop_node['offset'] = None
+
+        Crop.infer(crop_node)
+        self.assertIsNone(crop_node.out_node().shape)
+
+    def test_crop_type3_infer_neg4(self):
+        graph = self._create_graph_type3()
+
+        crop_node = Node(graph, 'crop_node')
+        crop_input2 = Node(graph, 'crop_input2')
+        crop_input2.shape = int64_array([1, 4, 423, 563])
+
+        Crop.infer(crop_node)
+        self.assertIsNone(crop_node.out_node().shape)
diff --git a/model-optimizer/mo/ops/flatten_onnx_test.py b/model-optimizer/mo/ops/flatten_onnx_test.py
new file mode 100644 (file)
index 0000000..1e68fbb
--- /dev/null
@@ -0,0 +1,65 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+from generator import generator, generate
+
+from mo.graph.graph import Node
+from mo.ops.flatten_onnx import FlattenONNX
+from mo.utils.unittest.graph import build_graph
+
+
+@generator
+class TestFlattenONNXOp(unittest.TestCase):
+    # There are tests for InnerProduct.infer in mo/front/common/partial_infer/inner_product_test.py
+    nodes_attributes = {
+        'data_1': {
+            'kind': 'data',
+            'shape': np.array([1, 3, 224, 224])
+        },
+        'flatten': {
+            'type': 'Reshape',
+            'axis': None,
+            'kind': 'op',
+        },
+        'data_2': {
+            'kind': 'data',
+            'shape': None,
+        }
+    }
+
+    def _create_graph_with_flatten(self, axis):
+        graph = build_graph(self.nodes_attributes,
+                            [('data_1', 'flatten'),
+                             ('flatten', 'data_2')],
+                            {'flatten': {'axis': axis}})
+        return graph
+
+    @generate(*[(0, [1, 3 * 224 * 224]),
+                (1, [1, 3 * 224 * 224]),
+                (2, [3, 224 * 224]),
+                (3, [3 * 224, 224]),
+                (4, [3 * 224 * 224, 1]),
+                ])
+    def test_flatten_infer_1(self, axis, ref):
+        graph = self._create_graph_with_flatten(axis)
+        flatten_node = Node(graph, 'flatten')
+
+        FlattenONNX.infer(flatten_node)
+
+        self.assertTrue(np.array_equal(flatten_node.out_node().shape, np.array(ref)))
diff --git a/model-optimizer/mo/ops/flatten_test.py b/model-optimizer/mo/ops/flatten_test.py
new file mode 100644 (file)
index 0000000..9d58401
--- /dev/null
@@ -0,0 +1,62 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.graph.graph import Node
+from mo.ops.flatten import Flatten
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {'node_1': {'value': None, 'kind': 'data'},
+                    'flatten_1': {'type': 'Flatten', 'value': None, 'kind': 'op'},
+                    'node_2': {'value': None, 'kind': 'data'}
+                    }
+
+
+class TestFlattenPartialInfer(unittest.TestCase):
+    def test_flatten_infer(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'flatten_1'),
+                             ('flatten_1', 'node_2')],
+                            {'node_2': {'is_output': True, 'shape': np.array([1, 3 * 256 * 256])},
+                             'node_1': {'shape': np.array([1, 3, 256, 256])},
+                             'flatten_1': {'axis': 1, 'dim': []}
+                             })
+
+        flatten_node = Node(graph, 'flatten_1')
+
+        Flatten.infer(flatten_node)
+        exp_shape = np.array([1, 3 * 256 * 256])
+        res_shape = graph.node['node_2']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_flatten_infer_no_shape(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'flatten_1'),
+                             ('flatten_1', 'node_2')],
+                            {'node_2': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': None},
+                             'flatten_1': {'axis': 1}
+                             })
+
+        flatten_node = Node(graph, 'flatten_1')
+
+        Flatten.infer(flatten_node)
+        res_shape = graph.node['node_2']['shape']
+        self.assertIsNone(res_shape)
diff --git a/model-optimizer/mo/ops/inner_product_test.py b/model-optimizer/mo/ops/inner_product_test.py
new file mode 100644 (file)
index 0000000..22d3c4a
--- /dev/null
@@ -0,0 +1,48 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.front.common.partial_infer.inner_product import caffe_inner_product
+from mo.ops.inner_product import InnerProduct
+from mo.utils.unittest.graph import build_graph
+
+
+class TestInnerProductOp(unittest.TestCase):
+    # There are tests for InnerProduct.infer in mo/front/common/partial_infer/inner_product_test.py
+    nodes_attributes = {
+        'node_1': {
+            'shape': np.array([227, 5, 2, 1])
+        },
+        'fc_node': {
+        },
+        'node_3': {
+            'kind': 'data'
+        }
+    }
+
+    def test_concat_op(self):
+        graph = build_graph(self.nodes_attributes,
+                            [
+                                ('node_1', 'fc_node'),
+                                ('fc_node', 'node_3')
+                            ])
+        fc_node = InnerProduct(graph, self.nodes_attributes['fc_node']).add_node()
+        self.assertEqual(fc_node.type, 'FullyConnected')
+        self.assertEqual(fc_node.op, 'FullyConnected')
+        self.assertEqual(fc_node.infer, caffe_inner_product)
diff --git a/model-optimizer/mo/ops/pad_test.py b/model-optimizer/mo/ops/pad_test.py
new file mode 100644 (file)
index 0000000..0013ed3
--- /dev/null
@@ -0,0 +1,95 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.graph.graph import Node
+from mo.ops.pad import Pad
+from mo.utils.unittest.graph import build_graph
+
+
+class TestPadONNXOp(unittest.TestCase):
+    # There are tests for InnerProduct.infer in mo/front/common/partial_infer/inner_product_test.py
+    node_attrs = {
+        'data_in': {
+            'kind': 'data',
+            'shape': np.array([1, 3, 100, 200])
+        },
+        # optional input for one of the two flavors of pad op
+        'data_pads': {
+            'kind': 'data',
+            'value': np.array([[0, 0], [0, 0], [1, 3], [2, 4]], dtype=np.int64),
+            'shape': np.array([2, 4], dtype=np.int64)
+        },
+        'pad': {
+            'op': 'Pad',
+            'kind': 'op',
+            'pads': None,
+        },
+        'data_out': {
+            'kind': 'data',
+            'shape': None,
+        }
+    }
+
+    edge_attrs = [
+        ('data_in', 'pad'),
+        ('pad', 'data_out')
+    ]
+
+    def test_one_input(self):
+        graph = build_graph(
+            self.node_attrs,
+            self.edge_attrs,
+            {'pad': {'pads': np.array([[0, 0], [0, 0], [1, 3], [2, 4]], dtype=np.int64)}},
+            nodes_with_edges_only=True,
+        )
+        pad_node = Node(graph, 'pad')
+        Pad.infer(pad_node)
+        self.assertTrue(np.array_equal(Node(graph, 'data_out').shape, np.array([1, 3, 100 + 1 + 3, 200 + 2 + 4])))
+
+    def test_two_inputs(self):
+        graph = build_graph(
+            self.node_attrs,
+            self.edge_attrs + [('data_pads', 'pad')],
+            nodes_with_edges_only=True,
+        )
+        pad_node = Node(graph, 'pad')
+        Pad.infer(pad_node)
+        self.assertTrue(np.array_equal(Node(graph, 'data_out').shape, np.array([1, 3, 100 + 1 + 3, 200 + 2 + 4])))
+
+    def test_one_input_and_no_pads(self):
+        graph = build_graph(
+            self.node_attrs,
+            self.edge_attrs,
+            nodes_with_edges_only=True,
+        )
+        pad_node = Node(graph, 'pad')
+        with self.assertRaisesRegex(AssertionError, ".*pads attribute is missing.*"):
+            Pad.infer(pad_node)
+
+    def test_two_inputs_and_pads(self):
+        graph = build_graph(
+            self.node_attrs,
+            self.edge_attrs + [('data_pads', 'pad')],
+            {'pad': {'pads': np.array([[0, 0], [0, 0], [1, 3], [2, 4]], dtype=np.int64)}},
+            nodes_with_edges_only=True,
+        )
+        pad_node = Node(graph, 'pad')
+        with self.assertRaisesRegex(AssertionError, ".*unexpected additional input argument.*"):
+            Pad.infer(pad_node)
diff --git a/model-optimizer/mo/ops/permute_test.py b/model-optimizer/mo/ops/permute_test.py
new file mode 100644 (file)
index 0000000..cf26cc7
--- /dev/null
@@ -0,0 +1,96 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import itertools
+import unittest
+
+import numpy as np
+from generator import generator, generate
+
+from mo.graph.graph import Node
+from mo.ops.permute import Permute
+from mo.utils.unittest.graph import build_graph
+
+
+@generator
+class TestPermuteOp(unittest.TestCase):
+    nodes_attributes = {
+        'data_1': {
+            'kind': 'data',
+            'shape': np.array([1, 3, 224, 224])
+        },
+        'transpose': {
+            'type': 'Permute',
+            'order': None,
+            'reverse_order': False,
+            'kind': 'op',
+        },
+        'data_2': {
+            'kind': 'data',
+            'shape': None,
+        }
+    }
+
+    def _create_graph_with_transpose(self, order):
+        graph = build_graph(self.nodes_attributes,
+                            [('data_1', 'transpose'),
+                             ('transpose', 'data_2')],
+                            {'transpose': {'order': order}})
+        return graph
+
+    @generate(*[list(order) for order in list(itertools.permutations(np.arange(4)))])
+    def test_transpose_infer_1(self, order):
+        graph = self._create_graph_with_transpose(order)
+        transpose_node = Node(graph, 'transpose')
+
+        Permute.infer(transpose_node)
+
+        ref = [transpose_node.in_node().shape[i] for i in order]
+        self.assertTrue(np.array_equal(transpose_node.out_node().shape, np.array(ref)))
+
+    def test_transpose_infer_2(self):
+        order = None
+        graph = self._create_graph_with_transpose(order)
+        transpose_node = Node(graph, 'transpose')
+        transpose_node['reverse_order'] = True
+
+        Permute.infer(transpose_node)
+
+        ref = np.array([x for x in reversed(transpose_node.in_node().shape)])
+        self.assertTrue(np.array_equal(transpose_node.out_node().shape, ref),
+                        "Shapes are not the same: {} and {}".format(transpose_node.out_node().shape, ref))
+
+    def test_transpose_infer_neg_1(self):
+        order = np.array([0, 1, 2, 3])
+        graph = self._create_graph_with_transpose(order)
+        transpose_node = Node(graph, 'transpose')
+        transpose_node['reverse_order'] = True
+
+        Permute.infer(transpose_node)
+
+        ref = None
+        self.assertTrue(transpose_node.out_node().shape is None, "Output shape should be None")
+
+    def test_transpose_infer_neg_2(self):
+        order = None
+        graph = self._create_graph_with_transpose(order)
+        transpose_node = Node(graph, 'transpose')
+        transpose_node['reverse_order'] = False
+
+        Permute.infer(transpose_node)
+
+        ref = None
+        self.assertTrue(transpose_node.out_node().shape is None, "Output shape should be None")
diff --git a/model-optimizer/mo/ops/pooling_test.py b/model-optimizer/mo/ops/pooling_test.py
new file mode 100644 (file)
index 0000000..ea11b72
--- /dev/null
@@ -0,0 +1,122 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.graph.graph import Node
+from mo.ops.pooling import Pooling
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {'node_1': {'value': None, 'kind': 'data'},
+                    'pool': {'type': 'Pooling', 'value': None, 'kind': 'op'},
+                    'node_2': {'value': None, 'kind': 'data'},
+                    }
+
+
+class TestPoolingPartialInfer(unittest.TestCase):
+    def test_pooling_infer(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'pool'),
+                             ('pool', 'node_2')],
+                            {'node_2': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 256, 256])},
+                             'pool': {'window': np.array([1, 1, 1, 1]), 'stride': np.array([1, 1, 2, 2]),
+                                      'pad': np.array([[0, 0], [0, 0], [3, 3], [3, 3]]),
+                                      'pad_spatial_shape': np.array([[3, 3], [3, 3]]),
+                                      'pool_method': 'avg', 'exclude_pad': 'false', 'global_pool': 0,
+                                      'output_spatial_shape': None, 'output_shape': None,
+                                      'kernel_spatial': np.array([3, 3]), 'spatial_dims': np.array([2, 3]),
+                                      'channel_dims': np.array([1]), 'batch_dims': np.array([0]),
+                                      'pooling_convention': 'full'}
+                             })
+
+        pool_node = Node(graph, 'pool')
+
+        Pooling.infer(pool_node)
+        exp_shape = np.array([1, 3, 131, 131])
+        res_shape = graph.node['node_2']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_pooling_infer_decrement_input_spatial(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'pool'),
+                             ('pool', 'node_2')],
+                            {'node_2': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 224, 224])},
+                             'pool': {'window': np.array([1, 1, 1, 1]), 'stride': np.array([1, 1, 3, 3]),
+                                      'pad': np.array([[0, 0], [0, 0], [3, 3], [3, 3]]),
+                                      'pad_spatial_shape': np.array([[1, 1], [1, 1]]),
+                                      'pool_method': 'avg', 'exclude_pad': 'false', 'global_pool': 0,
+                                      'output_spatial_shape': None, 'output_shape': None,
+                                      'kernel_spatial': np.array([3, 3]), 'spatial_dims': np.array([2, 3]),
+                                      'channel_dims': np.array([1]), 'batch_dims': np.array([0]),
+                                      'pooling_convention': 'full'}
+                             })
+
+        pool_node = Node(graph, 'pool')
+
+        Pooling.infer(pool_node)
+        exp_shape = np.array([1, 3, 75, 75])
+        res_shape = graph.node['node_2']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_pooling_infer_no_convention(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'pool'),
+                             ('pool', 'node_2')],
+                            {'node_2': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': np.array([1, 3, 256, 256])},
+                             'pool': {'window': np.array([1, 1, 1, 1]), 'stride': np.array([1, 1, 2, 2]),
+                                      'pad': np.array([[0, 0], [0, 0], [3, 3], [3, 3]]),
+                                      'pad_spatial_shape': np.array([[3, 3], [3, 3]]),
+                                      'pool_method': 'avg', 'exclude_pad': 'false', 'global_pool': 0,
+                                      'output_spatial_shape': None, 'output_shape': None,
+                                      'kernel_spatial': np.array([3, 3]), 'spatial_dims': np.array([2, 3]),
+                                      'channel_dims': np.array([1]), 'batch_dims': np.array([0])}
+                             })
+
+        pool_node = Node(graph, 'pool')
+
+        Pooling.infer(pool_node)
+        exp_shape = np.array([1, 3, 130, 130])
+        res_shape = graph.node['node_2']['shape']
+        for i in range(0, len(exp_shape)):
+            self.assertEqual(exp_shape[i], res_shape[i])
+
+    def test_pooling_infer_no_shape(self):
+        graph = build_graph(nodes_attributes,
+                            [('node_1', 'pool'),
+                             ('pool', 'node_2')],
+                            {'node_2': {'is_output': True, 'shape': None},
+                             'node_1': {'shape': None},
+                             'pool': {'window': np.array([1, 1, 1, 1]), 'stride': np.array([1, 1, 2, 2]),
+                                      'pad': np.array([[0, 0], [0, 0], [3, 3], [3, 3]]),
+                                      'pad_spatial_shape': np.array([[3, 3], [3, 3]]),
+                                      'pool_method': 'avg', 'exclude_pad': 'false',
+                                      'output_spatial_shape': None, 'output_shape': None,
+                                      'kernel_spatial': np.array([3, 3]), 'spatial_dims': np.array([2, 3]),
+                                      'channel_dims': np.array([1]), 'batch_dims': np.array([0]),
+                                      'pooling_convention': 'full'}
+                             })
+
+        pool_node = Node(graph, 'pool')
+        Pooling.infer(pool_node)
+        res_shape = graph.node['node_2']['shape']
+        self.assertIsNone(res_shape)
diff --git a/model-optimizer/mo/ops/power_test.py b/model-optimizer/mo/ops/power_test.py
new file mode 100644 (file)
index 0000000..e0a3b97
--- /dev/null
@@ -0,0 +1,100 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.graph.graph import Node
+from mo.ops.power import Power
+from mo.utils.unittest.graph import build_graph
+
+
+class TestPowerOp(unittest.TestCase):
+    @staticmethod
+    def create_graph(single_input=True):
+        nodes_attributes = {
+            'input1': {
+                'kind': 'data',
+                'shape': np.array([1, 3, 224, 224]),
+                'value': None,
+            },
+            'input2': {
+                'kind': 'data',
+                'shape': np.array([]),
+                'value': np.array(1.0),
+            },
+            'power': {
+                'kind': 'op',
+                'shape': np.array([1, 3, 224, 224]),
+            },
+            'power_data': {
+                'kind': 'data',
+                'shape': None,
+            },
+        }
+        if single_input:
+            return build_graph(nodes_attributes,
+                               [
+                                   ('input1', 'power'),
+                                   ('power', 'power_data')
+                               ])
+        else:
+            return build_graph(nodes_attributes,
+                               [
+                                   ('input1', 'power'),
+                                   ('input2', 'power'),
+                                   ('power', 'power_data')
+                               ])
+
+    def test_power_single_input_infer1(self):
+        graph = self.create_graph(single_input=True)
+        graph.graph['layout'] = 'NCHW'
+        power_node = Node(graph, 'power')
+        power_node['power'] = 1.0
+
+        Power.infer(power_node)
+
+        self.assertTrue(np.array_equal(power_node.out_node().shape, power_node.in_node(0).shape))
+
+    def test_power_two_input_infer1(self):
+        graph = self.create_graph(single_input=False)
+        graph.graph['layout'] = 'NCHW'
+        power_node = Node(graph, 'power')
+
+        Power.infer(power_node)
+
+        self.assertTrue(np.array_equal(power_node.out_node().shape, power_node.in_node(0).shape))
+
+    def test_power_two_input_infer2(self):
+        graph = self.create_graph(single_input=False)
+        power_node = Node(graph, 'power')
+        input2 = Node(graph, 'input2')
+        input2.value = np.ones((1, 2, 3))
+
+        Power.infer(power_node)
+
+        self.assertIsNone(power_node.out_node().shape)
+
+    def test_power_two_input_infer3(self):
+        graph = self.create_graph(single_input=False)
+        power_node = Node(graph, 'power')
+        input2 = Node(graph, 'input2')
+        input2.value = None
+
+        Power.infer(power_node)
+
+        self.assertIsNone(power_node.out_node().shape)
diff --git a/model-optimizer/mo/ops/slice_test.py b/model-optimizer/mo/ops/slice_test.py
new file mode 100644 (file)
index 0000000..2061e30
--- /dev/null
@@ -0,0 +1,117 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+import unittest
+
+import numpy as np
+from generator import generator
+
+from mo.graph.graph import Node
+from mo.ops.slice import Slice
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {
+    'data_1': {
+        'kind': 'data',
+        'shape': None,
+        'value': None,
+    },
+    'begin': {
+        'kind': 'data',
+        'shape': None,
+        'value': None,
+    },
+    'size': {
+        'kind': 'data',
+        'shape': None,
+        'value': None,
+    },
+    'slice': {
+        'op': 'Slice',
+        'axis': None,
+        'start': None,
+        'end': None,
+        'kind': 'op',
+    },
+    'data_2': {
+        'kind': 'data',
+        'shape': None,
+        'value': None,
+    }
+}
+
+
+@generator
+class TestSliceOp(unittest.TestCase):
+    def test_slice_infer_constant(self):
+        # Testing constant path case
+        graph = build_graph(nodes_attributes,
+                            [('data_1', 'slice'),
+                             ('begin', 'slice'),
+                             ('size', 'slice'),
+                             ('slice', 'data_2')],
+                            {'data_1': {'shape': np.array([4]), 'value': np.array([1, 3, 224, 224])},
+                             'slice': {'start': np.array([1]), 'end': np.array([2])},
+                             'size': {'value': np.array([1])},
+                             'begin': {'value': np.array([1])}})
+
+        slice_node = Node(graph, 'slice')
+        Slice.infer(slice_node)
+
+        self.assertTrue(np.array_equal(slice_node.out_node().value, np.array([3])))
+        self.assertTrue(np.array_equal(slice_node.out_node().shape, np.array([1])))
+        self.assertTrue(np.array_equal(slice_node['slices'], np.array([slice(1, 2, 1)])))
+
+    def test_slice_infer_non_constant(self):
+        # Testing non-constant path case (when value in input is None)
+        # with multiply params
+        graph = build_graph(nodes_attributes,
+                            [('data_1', 'slice'),
+                             ('begin', 'slice'),
+                             ('size', 'slice'),
+                             ('slice', 'data_2')],
+                            {'data_1': {'shape': np.array([4, 5, 6])},
+                             'slice': {'start': np.array([1, 2]),
+                                       'end': np.array([4, 3])},
+                             'size': {'value': np.array([3, 1])},
+                             'begin': {'value': np.array([1, 2])}})
+
+        slice_node = Node(graph, 'slice')
+
+        Slice.infer(slice_node)
+        self.assertTrue(np.array_equal(slice_node.out_node().value, None))
+        self.assertTrue(np.array_equal(slice_node.out_node().shape, np.array([3, 1, 6])))
+        self.assertTrue(np.array_equal(slice_node['slices'], np.array([slice(1, 4, 1), slice(2, 3, 1), slice(0, 6, 1)])))
+
+    def test_slice_infer_multiply_params(self):
+        # Test case when size[i] == -1 (that means all
+        # remaining elements in dimension i are included in the slice)
+        graph = build_graph(nodes_attributes,
+                            [('data_1', 'slice'),
+                             ('begin', 'slice'),
+                             ('size', 'slice'),
+                             ('slice', 'data_2')],
+                            {'data_1': {'shape': np.array([4, 5, 6])},
+                             'slice': {'start': np.array([1, 2]),
+                                       'end': np.array([4, 1])},
+                             'size': {'value': np.array([3, -1])},
+                             'begin': {'value': np.array([1, 2])}})
+
+        slice_node = Node(graph, 'slice')
+
+        Slice.infer(slice_node)
+        self.assertTrue(np.array_equal(slice_node.out_node().value, None))
+        self.assertTrue(np.array_equal(slice_node.out_node().shape, np.array([3, 3, 6])))
+        self.assertTrue(np.array_equal(slice_node['slices'], np.array([slice(1, 4, 1), slice(2, 5, 1), slice(0, 6, 1)])))
diff --git a/model-optimizer/mo/ops/tile_test.py b/model-optimizer/mo/ops/tile_test.py
new file mode 100644 (file)
index 0000000..af0d189
--- /dev/null
@@ -0,0 +1,196 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.graph.graph import Node
+from mo.ops.tile import Tile
+from mo.utils.unittest.graph import build_graph
+
+nodes_attributes = {'data': {'value': None, 'shape': np.array([10, 20, 30, 40]), 'kind': 'data'},
+                    'tile_values': {'value': None, 'shape': np.array([4]), 'kind': 'data'},
+                    'tile': {'type': 'Tile', 'kind': 'op'},
+                    'tile_out': {'value': None, 'shape': None, 'kind': 'data'},
+                    }
+
+
+class TestTileInfer(unittest.TestCase):
+    def test_tile_infer_correct(self):
+        graph = build_graph(nodes_attributes,
+                            [('data', 'tile'),
+                             ('tile_values', 'tile'),
+                             ('tile', 'tile_out')],
+                            {'tile_values': {'value': np.array([7, 1, 1, 1])}})
+        tile_node = Node(graph, 'tile')
+        Tile.infer(tile_node)
+        self.assertTrue(np.all(np.array([70, 20, 30, 40]) == graph.node['tile_out']['shape']))
+        self.assertEqual(tile_node.axis, 0)
+        self.assertEqual(tile_node.tiles, 7)
+
+    def test_tile_infer_correct_2(self):
+        graph = build_graph(nodes_attributes,
+                            [('data', 'tile'),
+                             ('tile_values', 'tile'),
+                             ('tile', 'tile_out')],
+                            {'tile_values': {'value': np.array([1, 7, 1, 1])}})
+        tile_node = Node(graph, 'tile')
+        Tile.infer(tile_node)
+        self.assertTrue(np.all(np.array([10, 140, 30, 40]) == graph.node['tile_out']['shape']))
+        self.assertEqual(tile_node.axis, 1)
+        self.assertEqual(tile_node.tiles, 7)
+
+    def test_tile_infer_correct_2d_tensor(self):
+        graph = build_graph(nodes_attributes,
+                            [('data', 'tile'),
+                             ('tile_values', 'tile'),
+                             ('tile', 'tile_out')],
+                            {'data': {'shape': np.array([3, 7])},
+                             'tile_values': {'value': np.array([5, 1])}})
+        tile_node = Node(graph, 'tile')
+        Tile.infer(tile_node)
+        self.assertTrue(np.all(np.array([15, 7]) == graph.node['tile_out']['shape']))
+        self.assertEqual(tile_node.axis, 0)
+        self.assertEqual(tile_node.tiles, 5)
+
+    def test_tile_infer_all_ones(self):
+        graph = build_graph(nodes_attributes,
+                            [('data', 'tile'),
+                             ('tile_values', 'tile'),
+                             ('tile', 'tile_out')],
+                            {'tile_values': {'value': np.array([1, 1, 1, 1])}})
+        tile_node = Node(graph, 'tile')
+        Tile.infer(tile_node)
+        self.assertTrue(np.all(np.array([10, 20, 30, 40]) == graph.node['tile_out']['shape']))
+        self.assertEqual(tile_node.axis, 0)
+        self.assertEqual(tile_node.tiles, 1)
+
+    def test_tile_infer_two_non_one(self):
+        graph = build_graph(nodes_attributes,
+                            [('data', 'tile'),
+                             ('tile_values', 'tile'),
+                             ('tile', 'tile_out')],
+                            {'tile_values': {'value': np.array([2, 1, 1, 2])}})
+        tile_node = Node(graph, 'tile')
+        Tile.infer(tile_node)
+        self.assertIsNone(graph.node['tile']['type'])
+        self.assertTrue(np.all(np.array([20, 20, 30, 80]) == graph.node['tile_out']['shape']))
+        self.assertFalse(tile_node.has_and_set('axis'))
+        self.assertFalse(tile_node.has_and_set('tiles'))
+
+    def test_tile_infer_three_non_one(self):
+        graph = build_graph(nodes_attributes,
+                            [('data', 'tile'),
+                             ('tile_values', 'tile'),
+                             ('tile', 'tile_out')],
+                            {'tile_values': {'value': np.array([2, 1, 5, 2])}})
+        tile_node = Node(graph, 'tile')
+        Tile.infer(tile_node)
+        self.assertIsNone(graph.node['tile']['type'])
+        self.assertTrue(np.all(np.array([20, 20, 150, 80]) == graph.node['tile_out']['shape']))
+
+    def test_tile_infer_none_input_shape(self):
+        graph = build_graph(nodes_attributes,
+                            [('data', 'tile'),
+                             ('tile_values', 'tile'),
+                             ('tile', 'tile_out')],
+                            {'data': {'shape': None},
+                             'tile_values': {'value': np.array([1, 7, 1, 1])}})
+        tile_node = Node(graph, 'tile')
+        Tile.infer(tile_node)
+        self.assertIsNone(graph.node['tile_out']['shape'])
+
+    def test_tile_infer_values_test(self):
+        input_data = np.arange(-30, 60, 0.25).reshape([2, 4, 3, -1])
+        tile_values = np.array([3, 1, 1, 1])
+        graph = build_graph(nodes_attributes,
+                            [('data', 'tile'),
+                             ('tile_values', 'tile'),
+                             ('tile', 'tile_out')],
+                            {'data': {'shape': input_data.shape, 'value': input_data},
+                             'tile_values': {'value': tile_values}})
+        tile_node = Node(graph, 'tile')
+        Tile.infer(tile_node)
+        self.assertTrue(np.all(np.tile(input_data, tile_values) == graph.node['tile_out']['value']))
+        self.assertEqual(tile_node.axis, 0)
+        self.assertEqual(tile_node.tiles, 3)
+
+    def test_tile_infer_values_const_propagation(self):
+        """
+        Test for constant propagation even if tile with multiple tile indices is not supported
+        """
+        input_data = np.arange(-30, 60, 0.25).reshape([2, 4, 3, -1])
+        tile_values = np.array([4, 3, 2, 5])
+        graph = build_graph(nodes_attributes,
+                            [('data', 'tile'),
+                             ('tile_values', 'tile'),
+                             ('tile', 'tile_out')],
+                            {'data': {'shape': input_data.shape, 'value': input_data},
+                             'tile_values': {'value': tile_values}})
+        tile_node = Node(graph, 'tile')
+        Tile.infer(tile_node)
+        self.assertTrue(np.all(np.tile(input_data, tile_values) == graph.node['tile_out']['value']))
+        self.assertIsNone(tile_node.type)
+
+    def test_tile_infer_undefined_tile_values(self):
+        graph = build_graph(nodes_attributes,
+                            [('data', 'tile'),
+                             ('tile_values', 'tile'),
+                             ('tile', 'tile_out')],
+                            {'tile_values': {'value': None}})
+        tile_node = Node(graph, 'tile')
+        Tile.infer(tile_node)
+        self.assertIsNone(graph.node['tile_out']['shape'])
+
+    def test_tile_infer_shapes_mismatch(self):
+        graph = build_graph(nodes_attributes,
+                            [('data', 'tile'),
+                             ('tile_values', 'tile'),
+                             ('tile', 'tile_out')],
+                            {'tile_values': {'value': np.array([1, 2, 1]), 'shape': np.array([3])}})
+        tile_node = Node(graph, 'tile')
+        Tile.infer(tile_node)
+        self.assertIsNone(graph.node['tile_out']['shape'])
+
+    def test_tile_infer_one_input_correct(self):
+        graph = build_graph(nodes_attributes,
+                            [('data', 'tile'),
+                             ('tile', 'tile_out')],
+                            {'tile': {'axis': 1, 'tiles': 7}})
+        tile_node = Node(graph, 'tile')
+        Tile.infer(tile_node)
+        self.assertTrue(np.all(np.array([10, 140, 30, 40]) == graph.node['tile_out']['shape']))
+        self.assertEqual(tile_node.axis, 1)
+        self.assertEqual(tile_node.tiles, 7)
+
+    def test_tile_infer_one_input_correct_missing_axis(self):
+        graph = build_graph(nodes_attributes,
+                            [('data', 'tile'),
+                             ('tile', 'tile_out')],
+                            {'tile': {'tiles': 7}})
+        tile_node = Node(graph, 'tile')
+        Tile.infer(tile_node)
+        self.assertIsNone(graph.node['tile_out']['shape'])
+
+    def test_tile_infer_one_input_correct_missing_tiles(self):
+        graph = build_graph(nodes_attributes,
+                            [('data', 'tile'),
+                             ('tile', 'tile_out')],
+                            {'tile': {'axis': 1}})
+        tile_node = Node(graph, 'tile')
+        Tile.infer(tile_node)
+        self.assertIsNone(graph.node['tile_out']['shape'])
diff --git a/model-optimizer/mo/ops/unsqueeze_test.py b/model-optimizer/mo/ops/unsqueeze_test.py
new file mode 100644 (file)
index 0000000..06d25b0
--- /dev/null
@@ -0,0 +1,67 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+from generator import generator
+
+from mo.graph.graph import Node
+from mo.ops.unsqueeze import Unsqueeze
+from mo.utils.unittest.graph import build_graph, compare_graphs
+
+
+@generator
+class TestUnsqueezeOp(unittest.TestCase):
+    nodes_attributes = {
+        'data_1': {
+            'kind': 'data',
+            'shape': None,
+            'value': None,
+        },
+        'unsq': {
+            'op': 'Unsqueeze',
+            'kind': 'op',
+            'unsqueeze_dims': None,
+        },
+        'data_2': {
+            'kind': 'data',
+            'shape': None,
+            'value': None,
+        }
+    }
+
+    def test_unsqueeze_infer(self):
+        graph = build_graph(self.nodes_attributes,
+                            [('data_1', 'unsq'),
+                             ('unsq', 'data_2')],
+                            {'data_1': {'shape': np.array([1, 3, 64, 64])},
+                             'unsq': {'unsqueeze_dims': np.array([0, 4])}
+                             })
+
+        graph_ref = build_graph(self.nodes_attributes,
+                                [('data_1', 'unsq'),
+                                 ('unsq', 'data_2')],
+                                {'data_1': {'shape': np.array([1, 3, 64, 64])},
+                                 'unsq': {'unsqueeze_dims': np.array([0, 4])},
+                                 'data_2': {'shape': np.array([1, 1, 3, 64, 1, 64])}
+                                 })
+
+        unsqueeze_node = Node(graph, 'unsq')
+        Unsqueeze.infer(unsqueeze_node)
+
+        (flag, resp) = compare_graphs(graph, graph_ref, 'data_2')
+        self.assertTrue(flag, resp)
diff --git a/model-optimizer/mo/pipeline/common_test.py b/model-optimizer/mo/pipeline/common_test.py
new file mode 100644 (file)
index 0000000..a877700
--- /dev/null
@@ -0,0 +1,241 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+from generator import generator, generate
+
+from mo.graph.graph import Node
+from mo.pipeline.common import determined_sort, get_fw_tensor_debug_info, get_sorted_outputs
+from mo.utils.unittest.graph import build_graph_with_edge_attrs
+
+
+@generator
+class TestTopologicalSort(unittest.TestCase):
+    @generate(
+        [('A', 'Ad', {'out': 0}),
+         ('Ad', 'B', {'in': 0}),
+         ('B', 'Bd', {'out': 0}),
+         ('Bd', 'C', {'in': 0}),
+         ('C', 'Cd', {'out': 0}),
+         ('Cd', 'D', {'in': 0}),
+         ('D', 'Dd', {'out': 0}),
+         ('Dd', 'E', {'in': 0}),
+         ('E', 'Ed', {'out': 0}),
+         ('Ed', 'I', {'in': 0}),
+         ('Cd', 'F', {'in': 0}),
+         ('F', 'Fd', {'out': 0}),
+         ('Fd', 'G', {'in': 0}),
+         ('G', 'Gd', {'out': 0}),
+         ('Gd', 'I', {'in': 1}),
+         ('Cd', 'H', {'in': 0}),
+         ('H', 'Hd', {'out': 0}),
+         ('Hd', 'I', {'in': 2}),
+         ('I', 'Id', {'out': 0}),
+         ('Id', 'J', {'in': 0}),
+         ('J', 'Jd', {'out': 0}),
+         ('Jd', 'K', {'in': 0}),
+         ('K', 'Kd', {'out': 0})],
+
+        [('A', 'Ad', {'out': 0}),
+         ('Ad', 'B', {'in': 0}),
+         ('B', 'Bd', {'out': 0}),
+         ('Bd', 'C', {'in': 0}),
+         ('C', 'Cd', {'out': 0}),
+         ('Cd', 'D', {'in': 0}),
+         ('D', 'Dd', {'out': 0}),
+         ('Dd', 'E', {'in': 0}),
+         ('E', 'Ed', {'out': 0}),
+         ('Ed', 'I', {'in': 0}),
+         ('Cd', 'F', {'in': 0}),
+         ('F', 'Fd', {'out': 0}),
+         ('Fd', 'G', {'in': 0}),
+         ('G', 'Gd', {'out': 0}),
+         ('Gd', 'I', {'in': 1}),
+         ('Cd', 'H', {'in': 0}),
+         ('H', 'Hd', {'out': 0}),
+         ('Hd', 'I', {'in': 2}),
+         ('I', 'Id', {'out': 0}),
+         ('Id', 'J', {'in': 0}),
+         ('J', 'Jd', {'out': 0}),
+         ('Jd', 'K', {'in': 0}),
+         ('K', 'Kd', {'out': 0}),
+         ('Ad', 'E', {'in': 1}),
+         ('Bd', 'K', {'in': 1}),
+         ('Hd', 'J', {'in': 1})],
+
+        [('A', 'Ad', {'out': 0}),
+         ('Ad', 'B', {'in': 0}),
+         ('B', 'Bd', {'out': 0}),
+         ('Bd', 'C', {'in': 0}),
+         ('C', 'Cd', {'out': 0}),
+         ('Cd', 'D', {'in': 0}),
+         ('D', 'Dd', {'out': 0}),
+         ('Dd', 'E', {'in': 0}),
+         ('E', 'Ed', {'out': 0}),
+         ('Ed', 'I', {'in': 0}),
+         ('Cd', 'F', {'in': 0}),
+         ('F', 'Fd', {'out': 0}),
+         ('Fd', 'G', {'in': 0}),
+         ('G', 'Gd', {'out': 0}),
+         ('Gd', 'I', {'in': 1}),
+         ('Cd', 'H', {'in': 0}),
+         ('H', 'Hd', {'out': 0}),
+         ('Hd', 'I', {'in': 2}),
+         ('I', 'Id', {'out': 0}),
+         ('Id', 'J', {'in': 0}),
+         ('J', 'Jd', {'out': 0}),
+         ('Jd', 'K', {'in': 0}),
+         ('K', 'Kd', {'out': 0}),
+         ('Ad', 'E', {'in': 1}),
+         ('Bd', 'K', {'in': 1}),
+         ('Hd', 'J', {'in': 1}),
+         ('Dd', 'F', {'in': 1}),
+         ('Fd', 'H', {'in': 1}),
+         ('Gd', 'H', {'in': 0})]
+    )
+    def test_determined_topological_sort(self, edges):
+        nodes = {'A': {'type': 'Identity', 'kind': 'op'},
+                 'B': {'type': 'Identity', 'kind': 'op'},
+                 'C': {'type': 'Identity', 'kind': 'op'},
+                 'D': {'type': 'Identity', 'kind': 'op'},
+                 'E': {'type': 'Identity', 'kind': 'op'},
+                 'F': {'type': 'Identity', 'kind': 'op'},
+                 'G': {'type': 'Identity', 'kind': 'op'},
+                 'H': {'type': 'Identity', 'kind': 'op'},
+                 'I': {'type': 'Identity', 'kind': 'op'},
+                 'J': {'type': 'Identity', 'kind': 'op'},
+                 'K': {'type': 'Identity', 'kind': 'op'},
+                 'Ad': {'value': None, 'kind': 'data'},
+                 'Bd': {'value': None, 'kind': 'data'},
+                 'Cd': {'value': None, 'kind': 'data'},
+                 'Dd': {'value': None, 'kind': 'data'},
+                 'Ed': {'value': None, 'kind': 'data'},
+                 'Fd': {'value': None, 'kind': 'data'},
+                 'Gd': {'value': None, 'kind': 'data'},
+                 'Hd': {'value': None, 'kind': 'data'},
+                 'Id': {'value': None, 'kind': 'data'},
+                 'Jd': {'value': None, 'kind': 'data'},
+                 'Kd': {'value': None, 'kind': 'data'},
+                 }
+
+        graph = build_graph_with_edge_attrs(nodes, edges)
+        outputs = [Node(graph, 'Kd')]
+        for i in range(100):
+            op_order, data_order = determined_sort(outputs)
+            self.assertListEqual(op_order, ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K'])
+            self.assertListEqual(data_order, ['Ad', 'Bd', 'Cd', 'Dd', 'Ed', 'Fd', 'Gd', 'Hd', 'Id', 'Jd', 'Kd'])
+
+
+class TestGetFWTensorName(unittest.TestCase):
+    def test_get_fw_tensor_debug_info(self):
+        nodes = {
+            'A': {'type': 'Identity', 'kind': 'op'},
+            'B': {'type': 'Identity', 'kind': 'op'},
+            'C': {'type': 'Identity', 'kind': 'op'},
+            'Ad': {'value': None, 'kind': 'data', 'fw_tensor_debug_info': [('A', 0)]},
+            'Bd': {'value': None, 'kind': 'data', 'fw_tensor_debug_info': [('B', 0)]},
+            'Cd': {'value': None, 'kind': 'data'},
+        }
+        edges = [
+            ('A', 'Ad', {'out': 0}),
+            ('Ad', 'B', {'in': 0}),
+            ('B', 'Bd', {'out': 0}),
+            ('Bd', 'C', {'in': 0}),
+            ('C', 'Cd', {'out': 0})
+        ]
+        graph = build_graph_with_edge_attrs(nodes, edges)
+        fw_debug_info = get_fw_tensor_debug_info(Node(graph, 'Cd'))
+        self.assertEqual(len(fw_debug_info), 1)
+        self.assertEqual(fw_debug_info[0], ('B', 0))
+
+
+class TestOutputSort(unittest.TestCase):
+    def test_get_sorted_outputs(self):
+        nodes = {'A': {'type': 'Identity', 'kind': 'op'},
+                 'B': {'type': 'Identity', 'kind': 'op'},
+                 'C': {'type': 'Identity', 'kind': 'op'},
+                 'D': {'type': 'Identity', 'kind': 'op'},
+                 'E': {'type': 'Identity', 'kind': 'op'},
+                 'F': {'type': 'Identity', 'kind': 'op'},
+                 'G': {'type': 'Identity', 'kind': 'op'},
+                 'H': {'type': 'Identity', 'kind': 'op'},
+                 'Ad': {'value': None, 'kind': 'data'},
+                 'Bd': {'value': None, 'kind': 'data'},
+                 'Cd': {'value': None, 'kind': 'data', 'fw_tensor_debug_info': [('C', 0)]},
+                 'Dd': {'value': None, 'kind': 'data'},
+                 'Ed': {'value': None, 'kind': 'data', 'fw_tensor_debug_info': [('E', 0)]},
+                 'Fd': {'value': None, 'kind': 'data', 'fw_tensor_debug_info': [('F', 0)]},
+                 'Gd': {'value': None, 'kind': 'data'},
+                 'Hd': {'value': None, 'kind': 'data'}
+                 }
+        edges = [
+            ('A', 'Ad', {'out': 0}),
+            ('Ad', 'B', {'in': 0}),
+            ('B', 'Bd', {'out': 0}),
+            ('Bd', 'C', {'in': 0}),
+            ('C', 'Cd', {'out': 0}),
+            ('Cd', 'D', {'in': 0}),
+            ('D', 'Dd', {'out': 0}),
+            ('Dd', 'E', {'in': 0}),
+            ('E', 'Ed', {'out': 0}),
+            ('Cd', 'F', {'in': 0}),
+            ('F', 'Fd', {'out': 0}),
+            ('Fd', 'G', {'in': 0}),
+            ('G', 'Gd', {'out': 0}),
+            ('Cd', 'H', {'in': 0}),
+            ('H', 'Hd', {'out': 0})
+        ]
+        graph = build_graph_with_edge_attrs(nodes, edges)
+        self.assertListEqual([node.id for node in get_sorted_outputs(graph)], ['Hd', 'Ed', 'Gd'])
+
+    def test_get_sorted_outputs_fine_situation(self):
+        nodes = {'A': {'type': 'Identity', 'kind': 'op'},
+                 'B': {'type': 'Identity', 'kind': 'op'},
+                 'C': {'type': 'Identity', 'kind': 'op'},
+                 'D': {'type': 'Identity', 'kind': 'op'},
+                 'E': {'type': 'Identity', 'kind': 'op'},
+                 'F': {'type': 'Identity', 'kind': 'op'},
+                 'G': {'type': 'Identity', 'kind': 'op'},
+                 'H': {'type': 'Identity', 'kind': 'op'},
+                 'Ad': {'value': None, 'kind': 'data'},
+                 'Bd': {'value': None, 'kind': 'data'},
+                 'Cd': {'value': None, 'kind': 'data', 'fw_tensor_debug_info': [('C', 0)]},
+                 'Dd': {'value': None, 'kind': 'data'},
+                 'Ed': {'value': None, 'kind': 'data', 'fw_tensor_debug_info': [('E', 0)]},
+                 'Fd': {'value': None, 'kind': 'data', 'fw_tensor_debug_info': [('F', 0)]},
+                 'Gd': {'value': None, 'kind': 'data', 'fw_tensor_debug_info': [('G', 0)]},
+                 'Hd': {'value': None, 'kind': 'data', 'fw_tensor_debug_info': [('H', 0)]}
+                 }
+        edges = [
+            ('A', 'Ad', {'out': 0}),
+            ('Ad', 'B', {'in': 0}),
+            ('B', 'Bd', {'out': 0}),
+            ('Bd', 'C', {'in': 0}),
+            ('C', 'Cd', {'out': 0}),
+            ('Cd', 'D', {'in': 0}),
+            ('D', 'Dd', {'out': 0}),
+            ('Dd', 'E', {'in': 0}),
+            ('E', 'Ed', {'out': 0}),
+            ('Cd', 'F', {'in': 0}),
+            ('F', 'Fd', {'out': 0}),
+            ('Fd', 'G', {'in': 0}),
+            ('G', 'Gd', {'out': 0}),
+            ('Cd', 'H', {'in': 0}),
+            ('H', 'Hd', {'out': 0})
+        ]
+        graph = build_graph_with_edge_attrs(nodes, edges)
+        self.assertListEqual([node.id for node in get_sorted_outputs(graph)], ['Ed', 'Gd', 'Hd'])
diff --git a/model-optimizer/mo/pipeline/kaldi_test.py b/model-optimizer/mo/pipeline/kaldi_test.py
new file mode 100644 (file)
index 0000000..2fe1833
--- /dev/null
@@ -0,0 +1,101 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.graph.graph import Node
+from mo.pipeline.kaldi import apply_biases_to_last_layer
+from mo.utils.unittest.graph import build_graph
+
+
+class TestKaldiPipeline(unittest.TestCase):
+    def test_apply_biases_to_ScaleShift(self):
+        nodes = {'input': {'kind': 'data'},
+                 'weights': {'value': None, 'kind': 'data'},
+                 'biases': {'value': np.zeros(10), 'kind': 'data'},
+                 'sc': {'op': 'ScaleShift', 'kind': 'op'},
+                 'output': {'kind': 'data'}
+                 }
+        graph = build_graph(nodes,
+                            [
+                                ('input', 'sc'),
+                                ('weights', 'sc'),
+                                ('biases', 'sc'),
+                                ('sc', 'output')
+                            ],
+                            {
+                                'output': {
+                                    'is_output': True
+                                }
+                            })
+        counts = -0.5 * np.ones(10)
+        apply_biases_to_last_layer(graph, counts)
+        sc_node = Node(graph, 'sc')
+        self.assertTrue(np.array_equal(sc_node.in_node(2).value, -counts))
+
+    def test_apply_biases_to_FullyConnected(self):
+        nodes = {'input': {'kind': 'data'},
+                 'weights': {'kind': 'data'},
+                 'biases': {'value': None, 'shape': None, 'kind': 'data'},
+                 'fc': {'op': 'FullyConnected', 'kind': 'op'},
+                 'output': {'kind': 'data'}
+                 }
+        graph = build_graph(nodes,
+                            [
+                                ('input', 'fc'),
+                                ('weights', 'fc'),
+                                ('biases', 'fc'),
+                                ('fc', 'output')
+                            ],
+                            {
+                                'output': {
+                                    'is_output': True
+                                }
+                            })
+        counts = -0.5 * np.ones(10)
+        apply_biases_to_last_layer(graph, counts)
+        fc_node = Node(graph, 'fc')
+        self.assertTrue(np.array_equal(fc_node.in_node(2).value, -counts))
+
+    def test_apply_biases_to_graph_with_SoftMax(self):
+        nodes = {'input': {'kind': 'data'},
+                 'weights': {'value': None, 'kind': 'data'},
+                 'biases': {'value': None, 'shape': None, 'kind': 'data'},
+                 'fc': {'op': 'FullyConnected', 'kind': 'op'},
+                 'data': {'kind': 'data'},
+                 'softmax': {'op': 'SoftMax', 'kind': 'op'},
+                 'output': {'kind': 'data'}
+                 }
+        graph = build_graph(nodes,
+                            [
+                                ('input', 'fc'),
+                                ('weights', 'fc'),
+                                ('biases', 'fc'),
+                                ('fc', 'data'),
+                                ('data', 'softmax'),
+                                ('softmax', 'output')
+                            ],
+                            {
+                                'output': {
+                                    'is_output': True
+                                }
+                            })
+        counts = -0.5 * np.ones(10)
+        apply_biases_to_last_layer(graph, counts)
+        fc_node = Node(graph, 'fc')
+        self.assertTrue(np.array_equal(fc_node.in_node(2).value, -counts))
diff --git a/model-optimizer/mo/pipeline/mx_test.py b/model-optimizer/mo/pipeline/mx_test.py
new file mode 100644 (file)
index 0000000..66e49bb
--- /dev/null
@@ -0,0 +1,67 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import numpy as np
+
+from mo.graph.graph import Node
+from mo.pipeline.mx import add_input_data_to_prior_boxes
+from mo.utils.unittest.graph import build_graph
+
+
+class TestMxnetPipeline(unittest.TestCase):
+    def test_mxnet_pipeline_1(self):
+        graph = build_graph(
+            {'data': {'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'Placeholder'},
+             'node_2': {'type': 'Identity', 'value': None, 'kind': 'op'},
+             'node_multi_box': {'type': '_contrib_MultiBoxPrior', 'kind': 'op', 'op': '_contrib_MultiBoxPrior'},
+             },
+            [('data', 'node_2'),
+             ('node_2', 'node_multi_box')],
+            {
+                'data': {'shape': np.array([1, 3, 227, 227])},
+                'node_2': {'shape': np.array([1, 3, 10, 10])},
+            })
+
+        add_input_data_to_prior_boxes(graph)
+        node_multi_box = Node(graph, 'node_multi_box')
+
+        node_input1 = node_multi_box.in_node(0)
+        node_input2 = node_multi_box.in_node(1)
+        self.assertEqual(node_input1.name, 'node_2')
+        self.assertEqual(node_input2.name, 'data')
+
+    def test_mxnet_pipeline_2(self):
+        graph = build_graph(
+            {'node_1': {'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'Placeholder'},
+             'node_2': {'type': 'Identity', 'value': None, 'kind': 'op'},
+             'node_multi_box': {'type': '_contrib_MultiBoxPrior', 'kind': 'op', 'op': '_contrib_MultiBoxPrior'},
+             },
+            [('node_1', 'node_2'),
+             ('node_2', 'node_multi_box')],
+            {
+                'node_1': {'shape': np.array([1, 3, 227, 227])},
+                'node_2': {'shape': np.array([1, 3, 10, 10])},
+            })
+
+        add_input_data_to_prior_boxes(graph, 'node_1')
+        node_multi_box = Node(graph, 'node_multi_box')
+
+        node_input1 = node_multi_box.in_node(0)
+        node_input2 = node_multi_box.in_node(1)
+        self.assertEqual(node_input1.name, 'node_2')
+        self.assertEqual(node_input2.name, 'node_1')
diff --git a/model-optimizer/mo/utils/cli_parser_test.py b/model-optimizer/mo/utils/cli_parser_test.py
new file mode 100644 (file)
index 0000000..1646273
--- /dev/null
@@ -0,0 +1,672 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+import argparse
+import imp
+import os
+import tempfile
+import unittest
+from unittest.mock import patch
+
+import numpy as np
+
+from mo.utils.cli_parser import get_placeholder_shapes, get_tuple_values, get_mean_scale_dictionary, get_model_name, \
+    get_absolute_path, parse_tuple_pairs, check_positive, writable_dir, readable_dirs, \
+    readable_file
+from mo.utils.error import Error
+
+
+class TestingMeanScaleGetter(unittest.TestCase):
+    def test_tuple_parser(self):
+        tuple_values = "data(1.1,22.22,333.333),info[2.2,33.33,444.444]"
+        result = parse_tuple_pairs(tuple_values)
+        exp_res = {
+            'data': np.array([1.1, 22.22, 333.333]),
+            'info': np.array([2.2, 33.33, 444.444])
+        }
+        for el in exp_res.keys():
+            np.array_equal(result[el], exp_res[el])
+
+    def test_tuple_parser_same_values(self):
+        tuple_values = "data(1.1,22.22,333.333),info[1.1,22.22,333.333]"
+        result = parse_tuple_pairs(tuple_values)
+        exp_res = {
+            'data': np.array([1.1, 22.22, 333.333]),
+            'info': np.array([1.1, 22.22, 333.333])
+        }
+        for el in exp_res.keys():
+            np.array_equal(result[el], exp_res[el])
+
+    def test_tuple_parser_no_inputs(self):
+        tuple_values = "(1.1,22.22,333.333),[2.2,33.33,444.444]"
+        result = parse_tuple_pairs(tuple_values)
+        exp_res = [np.array([1.1, 22.22, 333.333]),
+                   np.array([2.2, 33.33, 444.444])]
+        for i in range(0, len(exp_res)):
+            np.array_equal(result[i], exp_res[i])
+
+    def test_tuple_parser_error(self):
+        tuple_values = "(1.1,22.22,333.333),data[2.2,33.33,444.444]"
+        self.assertRaises(Error, parse_tuple_pairs, tuple_values)
+
+    def test_mean_scale_no_input(self):
+        mean_values = "data(1.1,22.22,333.333)"
+        scale_values = "info[1.1,22.22,333.333]"
+        result = get_mean_scale_dictionary(parse_tuple_pairs(mean_values), parse_tuple_pairs(scale_values), None)
+        exp_res = {
+            'data': {
+                'mean': np.array([1.1, 22.22, 333.333]),
+                'scale': None
+            },
+            'info': {
+                'mean': None,
+                'scale': np.array([1.1, 22.22, 333.333])
+            }
+        }
+        for input in exp_res.keys():
+            for key in exp_res[input].keys():
+                if type(exp_res[input][key]) is np.ndarray:
+                    np.array_equal(exp_res[input][key], result[input][key])
+                else:
+                    self.assertEqual(exp_res[input][key], result[input][key])
+
+    def test_mean_scale_no_input_diff_len(self):
+        mean_values = "data(1.1,22.22,333.333),info(2.1,33.22,333.333)"
+        scale_values = "info[1.1,22.22,333.333]"
+        result = get_mean_scale_dictionary(parse_tuple_pairs(mean_values), parse_tuple_pairs(scale_values), None)
+        exp_res = {
+            'data': {
+                'mean': np.array([1.1, 22.22, 333.333]),
+                'scale': None
+            },
+            'info': {
+                'mean': np.array([2.1, 33.22, 333.333]),
+                'scale': np.array([1.1, 22.22, 333.333])
+            }
+        }
+        for input in exp_res.keys():
+            for key in exp_res[input].keys():
+                if type(exp_res[input][key]) is np.ndarray:
+                    np.array_equal(exp_res[input][key], result[input][key])
+                else:
+                    self.assertEqual(exp_res[input][key], result[input][key])
+
+    def test_mean_only_input(self):
+        mean_values = "data(1.1,22.22,333.333)"
+        result = get_mean_scale_dictionary(parse_tuple_pairs(mean_values), parse_tuple_pairs(''), None)
+        exp_res = {
+            'data': {
+                'mean': np.array([1.1, 22.22, 333.333]),
+                'scale': None
+            }
+        }
+        for input in exp_res.keys():
+            for key in exp_res[input].keys():
+                if type(exp_res[input][key]) is np.ndarray:
+                    np.array_equal(exp_res[input][key], result[input][key])
+                else:
+                    self.assertEqual(exp_res[input][key], result[input][key])
+
+    def test_scale_only_input(self):
+        scale_values = "data(1.1,22.22,333.333)"
+        result = get_mean_scale_dictionary(parse_tuple_pairs(''), parse_tuple_pairs(scale_values), None)
+        exp_res = {
+            'data': {
+                'mean': None,
+                'scale': np.array([1.1, 22.22, 333.333])
+            }
+        }
+        for input in exp_res.keys():
+            for key in exp_res[input].keys():
+                if type(exp_res[input][key]) is np.ndarray:
+                    np.array_equal(exp_res[input][key], result[input][key])
+                else:
+                    self.assertEqual(exp_res[input][key], result[input][key])
+
+    def test_scale_only_no_input(self):
+        scale_values = "(1.1,22.22,333.333)"
+        mean_values = ""
+        mean = parse_tuple_pairs(mean_values)
+        scale = parse_tuple_pairs(scale_values)
+        result = get_mean_scale_dictionary(mean, scale, None)
+        exp_res = [
+            [
+                None,
+                np.array([1.1, 22.22, 333.333])
+            ]
+        ]
+        for i in range(len(exp_res)):
+            for j in range(len(exp_res[i])):
+                if type(exp_res[i][j]) is np.ndarray:
+                    np.array_equal(exp_res[i][j], result[i][j])
+                else:
+                    self.assertEqual(exp_res[i][j], result[i][j])
+
+    def test_scale_only_with_input(self):
+        scale_values = "(1.1,22.22,333.333)"
+        mean_values = ""
+        mean = parse_tuple_pairs(mean_values)
+        scale = parse_tuple_pairs(scale_values)
+        result = get_mean_scale_dictionary(mean, scale, 'data')
+        exp_res = {
+            'data': {
+                'mean': None,
+                'scale': np.array([1.1, 22.22, 333.333])
+            }
+        }
+        for input in exp_res.keys():
+            for key in exp_res[input].keys():
+                if type(exp_res[input][key]) is np.ndarray:
+                    np.array_equal(exp_res[input][key], result[input][key])
+                else:
+                    self.assertEqual(exp_res[input][key], result[input][key])
+
+    def test_2_scale_only_with_input(self):
+        scale_values = "(1.1,22.22,333.333),(1.2,22.33,333.444)"
+        mean_values = ""
+        mean = parse_tuple_pairs(mean_values)
+        scale = parse_tuple_pairs(scale_values)
+        result = get_mean_scale_dictionary(mean, scale, 'data,info')
+        exp_res = {
+            'data': {
+                'mean': None,
+                'scale': np.array([1.1, 22.22, 333.333])
+            },
+            'info': {
+                'mean': None,
+                'scale': np.array([1.2, 22.33, 333.444])
+            }
+        }
+        for input in exp_res.keys():
+            for key in exp_res[input].keys():
+                if type(exp_res[input][key]) is np.ndarray:
+                    np.array_equal(exp_res[input][key], result[input][key])
+                else:
+                    self.assertEqual(exp_res[input][key], result[input][key])
+
+    def test_2_mean_only_with_input(self):
+        scale_values = ""
+        mean_values = "(1.1,22.22,333.333),(1.2,22.33,333.444)"
+        mean = parse_tuple_pairs(mean_values)
+        scale = parse_tuple_pairs(scale_values)
+        result = get_mean_scale_dictionary(mean, scale, 'data,info')
+        exp_res = {
+            'data': {
+                'mean': np.array([1.1, 22.22, 333.333]),
+                'scale': None,
+            },
+            'info': {
+                'mean': np.array([1.2, 22.33, 333.444]),
+                'scale': None,
+            }
+        }
+        for input in exp_res.keys():
+            for key in exp_res[input].keys():
+                if type(exp_res[input][key]) is np.ndarray:
+                    np.array_equal(exp_res[input][key], result[input][key])
+                else:
+                    self.assertEqual(exp_res[input][key], result[input][key])
+
+    def test_mean_only_with_input(self):
+        scale_values = ""
+        mean_values = "(1.1,22.22,333.333)"
+        mean = parse_tuple_pairs(mean_values)
+        scale = parse_tuple_pairs(scale_values)
+        result = get_mean_scale_dictionary(mean, scale, 'data')
+        exp_res = {
+            'data': {
+                'mean': np.array([1.1, 22.22, 333.333]),
+                'scale': None
+            }
+        }
+        for input in exp_res.keys():
+            for key in exp_res[input].keys():
+                if type(exp_res[input][key]) is np.ndarray:
+                    np.array_equal(exp_res[input][key], result[input][key])
+                else:
+                    self.assertEqual(exp_res[input][key], result[input][key])
+
+    def test_mean_scale_diff_no_input(self):
+        scale_values = "(1.1,22.22,333.333),(1.1,22.22,333.333)"
+        mean_values = "(2.1,11.22,444.333)"
+        mean = parse_tuple_pairs(mean_values)
+        scale = parse_tuple_pairs(scale_values)
+        result = get_mean_scale_dictionary(mean, scale, None)
+        exp_res = [
+            [
+                np.array([2.1, 11.22, 444.333]),  # mean
+                np.array([1.1, 22.22, 333.333])  # scale
+            ],
+            [
+                None,  # mean
+                np.array([1.1, 22.22, 333.333])  # scale
+            ]
+        ]
+        for i in range(len(exp_res)):
+            for j in range(len(exp_res[i])):
+                if type(exp_res[i][j]) is np.ndarray:
+                    np.array_equal(exp_res[i][j], result[i][j])
+                else:
+                    self.assertEqual(exp_res[i][j], result[i][j])
+
+    def test_multi_mean_scale_no_input(self):
+        mean_values = "data(1.1,22.22,333.333),info(2.1,33.22,444.333)"
+        scale_values = "data[1.1,22.22,333.333],info[2.1,33.22,444.333]"
+        result = get_mean_scale_dictionary(parse_tuple_pairs(mean_values), parse_tuple_pairs(scale_values), None)
+        exp_res = {
+            'data': {
+                'mean': np.array([1.1, 22.22, 333.333]),
+                'scale': np.array([1.1, 22.22, 333.333])
+            },
+            'info': {
+                'mean': np.array([2.1, 33.22, 444.333]),
+                'scale': np.array([2.1, 33.22, 444.333])
+            }
+        }
+        for input in exp_res.keys():
+            for key in exp_res[input].keys():
+                if type(exp_res[input][key]) is np.ndarray:
+                    np.array_equal(exp_res[input][key], result[input][key])
+                else:
+                    self.assertEqual(exp_res[input][key], result[input][key])
+
+    def test_multi_mean_scale_input(self):
+        mean_values = "data(1.1,22.22,333.333),info(2.1,33.22,444.333)"
+        scale_values = "data[1.1,22.22,333.333],info[2.1,33.22,444.333]"
+        input_names = 'data,info'
+        result = get_mean_scale_dictionary(parse_tuple_pairs(mean_values), parse_tuple_pairs(scale_values), input_names)
+        exp_res = {
+            'data': {
+                'mean': np.array([1.1, 22.22, 333.333]),
+                'scale': np.array([1.1, 22.22, 333.333])
+            },
+            'info': {
+                'mean': np.array([2.1, 33.22, 444.333]),
+                'scale': np.array([2.1, 33.22, 444.333])
+            }
+        }
+        for input in exp_res.keys():
+            for key in exp_res[input].keys():
+                if type(exp_res[input][key]) is np.ndarray:
+                    np.array_equal(exp_res[input][key], result[input][key])
+                else:
+                    self.assertEqual(exp_res[input][key], result[input][key])
+
+    def test_multi_mean_scale_input_arrays(self):
+        mean_values = "(1.1,22.22,333.333),(2.1,33.22,444.333)"
+        scale_values = "[1.1,22.22,333.333],[2.1,33.22,444.333]"
+        input_names = 'data,info'
+        result = get_mean_scale_dictionary(parse_tuple_pairs(mean_values), parse_tuple_pairs(scale_values), input_names)
+        exp_res = {
+            'data': {
+                'mean': np.array([1.1, 22.22, 333.333]),
+                'scale': np.array([1.1, 22.22, 333.333])
+            },
+            'info': {
+                'mean': np.array([2.1, 33.22, 444.333]),
+                'scale': np.array([2.1, 33.22, 444.333])
+            }
+        }
+        for input in exp_res.keys():
+            for key in exp_res[input].keys():
+                if type(exp_res[input][key]) is np.ndarray:
+                    np.array_equal(exp_res[input][key], result[input][key])
+                else:
+                    self.assertEqual(exp_res[input][key], result[input][key])
+
+    def test_multi_mean_scale_arrays_no_input(self):
+        mean_values = "(1.1,22.22,333.333),(2.1,33.22,444.333)"
+        scale_values = "[1.1,22.22,333.333],[2.1,33.22,444.333]"
+        result = get_mean_scale_dictionary(parse_tuple_pairs(mean_values), parse_tuple_pairs(scale_values), None)
+        exp_res = [
+            [
+                np.array([1.1, 22.22, 333.333]),  # mean
+                np.array([1.1, 22.22, 333.333])  # scale
+            ],
+            [
+                np.array([2.1, 33.22, 444.333]),  # mean
+                np.array([2.1, 33.22, 444.333])  # scale
+            ]
+        ]
+        for i in range(0, len(exp_res)):
+            for j in range(0, len(exp_res[i])):
+                np.array_equal(exp_res[i][j], result[i][j])
+
+
+class TestSingleTupleParsing(unittest.TestCase):
+    def test_get_values_ideal(self):
+        values = "(1.11, 22.22, 333.333)"
+        result = get_tuple_values(values)
+        exp_res = ['1.11, 22.22, 333.333']
+        self.assertEqual(exp_res, result)
+
+    def test_get_values_ideal_spaces(self):
+        values = "(1    , 22 ,333)"
+        result = get_tuple_values(values)
+        exp_res = ['1    , 22 ,333']
+        self.assertEqual(exp_res, result)
+
+    def test_get_values_ideal_square(self):
+        values = "[1,22,333]"
+        result = get_tuple_values(values)
+        exp_res = ['1,22,333']
+        self.assertEqual(exp_res, result)
+
+    def test_get_values_ideal_square_spaces(self):
+        values = "[1    , 22 ,333]"
+        result = get_tuple_values(values)
+        exp_res = ['1    , 22 ,333']
+        self.assertEqual(exp_res, result)
+
+    def test_get_neg_values_ideal(self):
+        values = "(-1,-22,-333)"
+        result = get_tuple_values(values)
+        exp_res = ['-1,-22,-333']
+        self.assertEqual(exp_res, result)
+
+    def test_get_neg_values_minus(self):
+        values = "(-1,--22,-3-33)"
+        self.assertRaises(Error, get_tuple_values, values)
+
+    def test_get_values_unbalanced(self):
+        values = "(1,22,333]"
+        self.assertRaises(Error, get_tuple_values, values)
+
+    def test_get_values_unbalanced2(self):
+        values = "[1,22,333)"
+        self.assertRaises(Error, get_tuple_values, values)
+
+    def test_get_values_exactly_3(self):
+        values = "[1,22,333,22]"
+        self.assertRaises(Error, get_tuple_values, values)
+
+    def test_get_values_exactly_3_1(self):
+        values = "[1,22]"
+        self.assertRaises(Error, get_tuple_values, values)
+
+    def test_get_values_empty(self):
+        values = ""
+        self.assertRaises(Error, get_tuple_values, values)
+
+    def test_get_values_empty_tuple(self):
+        values = ()
+        result = get_tuple_values(values)
+        exp_res = ()
+        self.assertEqual(exp_res, result)
+
+
+class TestShapesParsing(unittest.TestCase):
+    def test_get_shapes_several_inputs_several_shapes(self):
+        argv_input = "inp1,inp2"
+        input_shapes = "(1,22,333,123), (-1,45,7,1)"
+        result = get_placeholder_shapes(argv_input, input_shapes)
+        exp_res = {'inp1': np.array([1, 22, 333, 123]), 'inp2': np.array([-1, 45, 7, 1])}
+        self.assertEqual(list(exp_res.keys()), list(result.keys()))
+        for i in exp_res.keys():
+            np.testing.assert_array_equal(result[i], exp_res[i])
+
+    def test_get_shapes_several_inputs_several_shapes_not_equal(self):
+        argv_input = "inp1,inp2,inp3"
+        input_shapes = "(1,22,333,123), (-1,45,7,1)"
+        self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes)
+
+    def test_get_shapes_several_shapes_one_input(self):
+        argv_input = "inp1"
+        input_shapes = "(1,22,333,123), (-1,45,7,1), (-1,456,7,1)"
+        self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes)
+
+    def test_get_shapes_several_shapes_no_input(self):
+        argv_input = ""
+        input_shapes = "(1,22,333,123), (-1,45,7,1), (-1,456,7,1)"
+        self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes)
+
+    def test_get_shapes_one_input_one_shape(self):
+        argv_input = "inp1"
+        input_shapes = "(1,22,333,123)"
+        result = get_placeholder_shapes(argv_input, input_shapes)
+        exp_res = {'inp1': np.array([1, 22, 333, 123])}
+        self.assertEqual(list(exp_res.keys()), list(result.keys()))
+        for i in exp_res.keys():
+            np.testing.assert_array_equal(result[i], exp_res[i])
+
+    def test_get_shapes_no_input_no_shape(self):
+        argv_input = ""
+        input_shapes = ""
+        result = get_placeholder_shapes(argv_input, input_shapes)
+        exp_res = np.array([None])
+        np.testing.assert_array_equal(result, exp_res)
+
+    def test_get_shapes_no_input_one_shape(self):
+        argv_input = ""
+        input_shapes = "(12,4,1)"
+        result = get_placeholder_shapes(argv_input, input_shapes)
+        exp_res = np.array([12, 4, 1])
+        np.testing.assert_array_equal(result, exp_res)
+
+    def test_get_shapes_no_input_one_shape2(self):
+        argv_input = ""
+        input_shapes = "[12,4,1]"
+        result = get_placeholder_shapes(argv_input, input_shapes)
+        exp_res = np.array([12, 4, 1])
+        np.testing.assert_array_equal(result, exp_res)
+
+    def test_get_shapes_no_input_two_shapes(self):
+        argv_input = ""
+        input_shapes = "(12,4,1),(5,4,3)"
+        self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes)
+
+    def test_get_shapes_one_input_no_shape(self):
+        argv_input = "inp1"
+        input_shapes = ""
+        result = get_placeholder_shapes(argv_input, input_shapes)
+        exp_res = {'inp1': np.array([None])}
+        self.assertEqual(list(exp_res.keys()), list(result.keys()))
+        for i in exp_res.keys():
+            np.testing.assert_array_equal(result[i], exp_res[i])
+
+    def test_get_shapes_one_input_wrong_shape8(self):
+        argv_input = "inp1"
+        input_shapes = "[2,4,1)"
+        self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes)
+
+    def test_get_shapes_one_input_wrong_shape9(self):
+        argv_input = "inp1"
+        input_shapes = "(2,4,1]"
+        self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes)
+
+    def test_get_shapes_one_input_wrong_shape10(self):
+        argv_input = "inp1"
+        input_shapes = "(2,,,4,1]"
+        self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes)
+
+    def test_get_shapes_one_input_wrong_shape2(self):
+        argv_input = "inp1"
+        input_shapes = "(2,4,1"
+        self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes)
+
+    def test_get_shapes_one_input_wrong_shape3(self):
+        argv_input = "inp1"
+        input_shapes = "2,4,1"
+        self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes)
+
+    def test_get_shapes_one_input_wrong_shape4(self):
+        argv_input = "inp1"
+        input_shapes = "2;4;1"
+        self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes)
+
+    def test_get_shapes_one_input_wrong_shape5(self):
+        argv_input = "inp1"
+        input_shapes = "2,         4,1"
+        self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes)
+
+    def test_get_shapes_one_input_wrong_shape6(self):
+        argv_input = "inp1"
+        input_shapes = "(2,         4,1),[4,6,8]"
+        self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes)
+
+    def test_get_shapes_one_input_wrong_shape7(self):
+        argv_input = "inp1"
+        input_shapes = "[2,4,1],(4,6,8)"
+        self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes)
+
+    def test_get_shapes_one_input_several_shapes(self):
+        argv_input = "inp1"
+        input_shapes = "(2,4,1),(4,6,8)"
+        self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes)
+
+    def test_get_shapes_one_input_first_neg_shape1(self):
+        argv_input = "inp1,inp2"
+        input_shapes = "(-1,4,1),(4,6,8)"
+        result = get_placeholder_shapes(argv_input, input_shapes)
+        exp_res = {'inp1': np.array([-1, 4, 1]), 'inp2': np.array([4, 6, 8])}
+        self.assertEqual(list(exp_res.keys()), list(result.keys()))
+        for i in exp_res.keys():
+            np.testing.assert_array_equal(result[i], exp_res[i])
+
+    def test_get_shapes_one_input_first_neg_shape_not_one(self):
+        argv_input = "inp1"
+        input_shapes = "(-12,4,1),(4,6,8)"
+        self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes)
+
+    def test_get_shapes_one_input_any_neg_shape(self):
+        argv_input = "inp1, inp2"
+        input_shapes = "(12,4,1),(4,-6,8)"
+        self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes)
+
+
+class TestModelNameParsing(unittest.TestCase):
+    def test_model_name_ideal(self):
+        model_name = '/home/models/mymodel.caffemodel'
+        res = get_model_name(model_name)
+        exp_res = 'mymodel'
+        self.assertEqual(exp_res, res)
+
+    def test_model_name_no_name(self):
+        model_name = '/home/models/.caffemodel'
+        res = get_model_name(model_name)
+        exp_res = 'model'
+        self.assertEqual(exp_res, res)
+
+    def test_model_name_no_ext(self):
+        model_name = '/home/models/caffemodel'
+        res = get_model_name(model_name)
+        exp_res = 'caffemodel'
+        self.assertEqual(exp_res, res)
+
+    def test_model_name_no_name_no_path(self):
+        model_name = '.caffemodel'
+        res = get_model_name(model_name)
+        exp_res = 'model'
+        self.assertEqual(exp_res, res)
+
+    @patch("mo.utils.cli_parser.os")
+    def test_model_name_win(self, old_os):
+        old_os.path.basename.return_value = "caffemodel"
+        old_os.path.splitext.return_value = ("caffemodel", "")
+        model_name = r'\home\models\caffemodel'
+        res = get_model_name(model_name)
+
+        exp_res = 'caffemodel'
+        self.assertEqual(exp_res, res)
+
+    def test_model_name_dots(self):
+        model_name = r'/home/models/squeezenet_v1.1.caffemodel'
+        res = get_model_name(model_name)
+        exp_res = 'squeezenet_v1.1'
+        self.assertEqual(exp_res, res)
+
+
+class PositiveChecker(unittest.TestCase):
+    def test_positive_checker_batch(self):
+        res = check_positive('1')
+        self.assertEqual(res, 1)
+
+    def test_positive_checker_batch_negative(self):
+        self.assertRaises(argparse.ArgumentTypeError, check_positive, '-1')
+
+    def test_positive_checker_batch_not_int(self):
+        self.assertRaises(argparse.ArgumentTypeError, check_positive, 'qwe')
+
+
+class PathCheckerFunctions(unittest.TestCase):
+    READABLE_DIR = tempfile.gettempdir()
+    WRITABLE_DIR = os.path.join(tempfile.gettempdir(), 'writable_dir')
+    WRITABLE_NON_EXISTING_DIR = os.path.join(WRITABLE_DIR, 'non_existing_dir')
+    NOT_WRITABLE_DIR = os.path.join(tempfile.gettempdir(), 'not_writable_dir')
+    NOT_WRITABLE_SUB_DIR = os.path.join(tempfile.gettempdir(), 'another_not_writable_dir', 'not_existing_dir')
+    EXISTING_FILE = tempfile.NamedTemporaryFile(mode='r+', delete=False).name
+    NOT_EXISTING_FILE = '/abcd/efgh/ijkl'
+
+    @classmethod
+    def setUpClass(cls):
+        if not os.path.exists(__class__.WRITABLE_DIR):
+            os.makedirs(__class__.WRITABLE_DIR)
+        if os.path.exists(__class__.WRITABLE_NON_EXISTING_DIR):
+            os.removedirs(__class__.WRITABLE_NON_EXISTING_DIR)
+
+        if not os.path.exists(__class__.NOT_WRITABLE_DIR):
+            os.makedirs(__class__.NOT_WRITABLE_DIR)
+        os.chmod(__class__.NOT_WRITABLE_DIR, 0)
+
+        if not os.path.exists(os.path.dirname(__class__.NOT_WRITABLE_SUB_DIR)):
+            os.makedirs(os.path.dirname(__class__.NOT_WRITABLE_SUB_DIR))
+        os.chmod(os.path.dirname(__class__.NOT_WRITABLE_SUB_DIR), 0)
+        if os.path.exists(__class__.NOT_EXISTING_FILE):
+            os.remove(__class__.NOT_EXISTING_FILE)
+
+    @classmethod
+    def tearDownClass(cls):
+        if os.path.exists(__class__.WRITABLE_DIR):
+            os.removedirs(__class__.WRITABLE_DIR)
+        if os.path.exists(__class__.NOT_WRITABLE_DIR):
+            os.removedirs(__class__.NOT_WRITABLE_DIR)
+        if os.path.exists(os.path.dirname(__class__.NOT_WRITABLE_SUB_DIR)):
+            os.removedirs(os.path.dirname(__class__.NOT_WRITABLE_SUB_DIR))
+        if os.path.exists(__class__.EXISTING_FILE):
+            os.remove(__class__.EXISTING_FILE)
+
+    def test_single_writable_dir(self):
+        self.assertEqual(__class__.WRITABLE_DIR, writable_dir(__class__.WRITABLE_DIR))
+
+    def test_single_non_writable_dir(self):
+        with self.assertRaises(Error) as cm:
+            writable_dir(__class__.NOT_WRITABLE_DIR)
+
+    def test_single_non_writable_sub_dir(self):
+        with self.assertRaises(Error) as cm:
+            writable_dir(__class__.NOT_WRITABLE_SUB_DIR)
+
+    def test_multiple_writable_dirs(self):
+        dirs_str = ','.join([__class__.WRITABLE_DIR, __class__.WRITABLE_NON_EXISTING_DIR])
+        self.assertEqual(dirs_str, writable_dir(dirs_str))
+
+    def test_single_writable_non_existing_dir(self):
+        self.assertEqual(__class__.WRITABLE_NON_EXISTING_DIR, writable_dir(__class__.WRITABLE_NON_EXISTING_DIR))
+
+    def test_readable_dirs(self):
+        dirs_str = ','.join([__class__.WRITABLE_DIR, __class__.READABLE_DIR])
+        self.assertEqual(dirs_str, readable_dirs(dirs_str))
+
+    def test_not_readable_dirs(self):
+        dirs_str = ','.join([__class__.WRITABLE_DIR, __class__.WRITABLE_NON_EXISTING_DIR])
+        with self.assertRaises(Error) as cm:
+            readable_dirs(dirs_str)
+
+    def test_readable_file(self):
+        self.assertEqual(__class__.EXISTING_FILE, readable_file(__class__.EXISTING_FILE))
+
+    def test_non_readable_file(self):
+        with self.assertRaises(Error) as cm:
+            readable_file(__class__.NOT_EXISTING_FILE)
diff --git a/model-optimizer/mo/utils/convert.py b/model-optimizer/mo/utils/convert.py
new file mode 100644 (file)
index 0000000..edec06f
--- /dev/null
@@ -0,0 +1,55 @@
+#!/usr/bin/env python3
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+import argparse
+import os
+import sys
+
+import tensorflow as tf
+
+sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
+from mo.front.tf.loader import load_tf_graph_def
+
+os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
+
+
+def argparser():
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--to_pbtxt", dest='pb', type=str, help="Path to TensorFlow binary model")
+    parser.add_argument('--to_pb', dest='pbtxt', type=str, help="Path to TensorFlow text model")
+    return parser.parse_args()
+
+
+def convert(filename: str, is_text: bool):
+    if not os.path.isfile(filename):
+        raise FileNotFoundError("File doesn't exist: {}".format(filename))
+    new_ext = ".pbtxt" if is_text else ".pb"
+    head, tail = os.path.split(os.path.abspath(filename))
+    print("Convert: {} \n     to: {}".format(filename, os.path.join(head, tail + new_ext)))
+    graph_def, _ = load_tf_graph_def(graph_file_name=filename, is_binary=is_text)
+    tf.import_graph_def(graph_def, name='')
+    tf.train.write_graph(graph_def, head, tail + new_ext, as_text=is_text)
+
+
+if __name__ == '__main__':
+    argv = argparser()
+    if argv.pb is None and argv.pbtxt is None:
+        print("Please provide model to convert --to_pb or --to_pbtxt")
+        sys.exit(1)
+    if argv.pb is not None:
+        convert(argv.pb, True)
+    if argv.pbtxt is not None:
+        convert(argv.pbtxt, False)
diff --git a/model-optimizer/mo/utils/graph_test.py b/model-optimizer/mo/utils/graph_test.py
new file mode 100644 (file)
index 0000000..5d4ed57
--- /dev/null
@@ -0,0 +1,214 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+import networkx as nx
+
+from mo.utils.error import Error
+from mo.utils.graph import dfs, bfs_search, is_connected_component, sub_graph_between_nodes
+
+
+class TestGraphUtils(unittest.TestCase):
+    def test_simple_dfs(self):
+        graph = nx.MultiDiGraph()
+        graph.add_nodes_from(list(range(1, 5)))
+        graph.add_edges_from([(1, 2), (1, 3), (3, 4)])
+
+        visited = set()
+        order = dfs(graph, 1, visited)
+        self.assertTrue(order == [4, 3, 2, 1] or order == [2, 4, 3, 1])
+
+    def test_bfs_search_default_start_nodes(self):
+        """
+        Check that BFS automatically determines input nodes and start searching from them.
+        """
+        graph = nx.MultiDiGraph()
+        graph.add_nodes_from(list(range(1, 6)))
+        graph.add_edges_from([(1, 3), (2, 3), (3, 4), (4, 5)])
+
+        order = bfs_search(graph)
+        self.assertTrue(order == [1, 2, 3, 4, 5] or order == [2, 1, 3, 4, 5])
+
+    def test_bfs_search_specific_start_nodes(self):
+        """
+        Check that BFS stars from the user defined nodes and doesn't go in backward edge direction.
+        """
+        graph = nx.MultiDiGraph()
+        graph.add_nodes_from(list(range(1, 7)))
+        graph.add_edges_from([(1, 3), (2, 3), (3, 4), (4, 5), (6, 1)])
+
+        order = bfs_search(graph, [1])
+        self.assertTrue(order == [1, 3, 4, 5])
+
+    def test_is_connected_component_two_separate_sub_graphs(self):
+        """
+        Check that if there are two separate sub-graphs the function returns False.
+        """
+        graph = nx.MultiDiGraph()
+        graph.add_nodes_from(list(range(1, 7)))
+        graph.add_edges_from([(1, 2), (2, 3), (4, 5), (5, 6)])
+        self.assertFalse(is_connected_component(graph, list(range(1, 7))))
+        self.assertFalse(is_connected_component(graph, [1, 3]))
+        self.assertFalse(is_connected_component(graph, [6, 4]))
+        self.assertFalse(is_connected_component(graph, [2, 5]))
+
+    def test_is_connected_component_two_separate_sub_graphs_divided_by_ignored_node(self):
+        """
+        Check that if there are two separate sub-graphs the function connected by an edge going through the ignored node
+        then the function returns False.
+        """
+        graph = nx.MultiDiGraph()
+        node_names = list(range(1, 8))
+        graph.add_nodes_from(node_names)
+        graph.add_edges_from([(1, 2), (2, 3), (4, 5), (5, 6), (1, 7), (7, 4)])
+        self.assertFalse(is_connected_component(graph, list(range(1, 7))))
+
+    def test_is_connected_component_connected(self):
+        """
+        Check that if the sub-graph is connected.
+        """
+        graph = nx.MultiDiGraph()
+        node_names = list(range(1, 8))
+        graph.add_nodes_from(node_names)
+        graph.add_edges_from([(1, 2), (2, 3), (4, 5), (5, 6), (1, 7), (7, 4)])
+        self.assertTrue(is_connected_component(graph, list(range(1, 8))))
+
+    def test_is_connected_component_edges_direction_is_ignored(self):
+        """
+        Check that edges direction is ignored when checking for the connectivity.
+        """
+        graph = nx.MultiDiGraph()
+        node_names = list(range(1, 5))
+        graph.add_nodes_from(node_names)
+        graph.add_edges_from([(2, 1), (2, 3), (4, 3)])
+        self.assertTrue(is_connected_component(graph, node_names))
+        self.assertTrue(is_connected_component(graph, [2, 1]))
+        self.assertTrue(is_connected_component(graph, [4, 2, 3]))
+
+    def test_is_connected_component_edges_direction_is_ignored_not_connected(self):
+        """
+        Check that edges direction is ignored when checking for the connectivity. In this case the graph is not
+        connected.
+        """
+        graph = nx.MultiDiGraph()
+        graph.add_nodes_from(list(range(1, 5)))
+        graph.add_edges_from([(2, 1), (2, 3), (4, 3)])
+        self.assertFalse(is_connected_component(graph, [1, 2, 4]))
+        self.assertFalse(is_connected_component(graph, [1, 4]))
+        self.assertFalse(is_connected_component(graph, [2, 4]))
+        self.assertFalse(is_connected_component(graph, [3, 4, 1]))
+
+    def test_sub_graph_between_nodes_include_incoming_edges_for_internal_nodes(self):
+        """
+        Check that the function adds input nodes for the internal nodes of the graph. For example, we need to add node 5
+        and 6 in the case below if we find match from node 1 till node 4.
+        6 -> 5 ->
+                 \
+            1 -> 2 -> 3 -> 4
+        :return:
+        """
+        graph = nx.MultiDiGraph()
+        graph.add_nodes_from(list(range(1, 7)))
+        graph.add_edges_from([(1, 2), (2, 3), (3, 4), (5, 2), (6, 5)])
+        sub_graph_nodes = sub_graph_between_nodes(graph, [1], [4])
+        self.assertIsNotNone(sub_graph_nodes)
+        self.assertListEqual(sorted(sub_graph_nodes), list(range(1, 7)))
+
+        sub_graph_nodes = sub_graph_between_nodes(graph, [1], [2])
+        self.assertIsNotNone(sub_graph_nodes)
+        self.assertListEqual(sorted(sub_graph_nodes), [1, 2, 5, 6])
+
+    def test_sub_graph_between_nodes_do_not_include_incoming_edges_for_input_nodes(self):
+        """
+        Check that the function doesn't add input nodes for the start nodes of the sub-graph. For example, we do not
+        need to add node 5 in the case below if we find match from node 1 till node 4.
+          5->
+             \
+        1 -> 2 -> 3 -> 4
+        """
+        graph = nx.MultiDiGraph()
+        graph.add_nodes_from(list(range(1, 6)))
+        graph.add_edges_from([(1, 2), (2, 3), (3, 4), (5, 2)])
+        sub_graph_nodes = sub_graph_between_nodes(graph, [2], [4])
+        self.assertIsNotNone(sub_graph_nodes)
+        self.assertListEqual(sorted(sub_graph_nodes), [2, 3, 4])
+
+    def test_sub_graph_between_nodes_placeholder_included(self):
+        """
+        Check that the function doesn't allow to add Placeholders to the sub-graph. 5 is the Placeholder op.
+          5->
+             \
+        1 -> 2 -> 3 -> 4
+        """
+        graph = nx.MultiDiGraph()
+        graph.add_nodes_from(list(range(1, 6)))
+        graph.node[5]['op'] = 'Placeholder'
+        graph.add_edges_from([(1, 2), (2, 3), (3, 4), (5, 2)])
+        self.assertRaises(Error, sub_graph_between_nodes, graph, [1], [4])
+
+    def test_sub_graph_between_nodes_placeholder_excluded(self):
+        """
+        Check that the function do not check that node is Placeholders for the nodes not included into the sub-graph.
+        For example, node 5 is Placeholder but it is not included into the sub-graph, so this attribute is ignored.
+          5->
+             \
+        1 -> 2 -> 3 -> 4
+        """
+        graph = nx.MultiDiGraph()
+        graph.add_nodes_from(list(range(1, 6)))
+        graph.node[5]['op'] = 'Placeholder'
+        graph.add_edges_from([(1, 2), (2, 3), (3, 4), (5, 2)])
+        sub_graph_nodes = sub_graph_between_nodes(graph, [2], [4])
+        self.assertIsNotNone(sub_graph_nodes)
+        self.assertListEqual(sorted(sub_graph_nodes), [2, 3, 4])
+
+    def test_sub_graph_between_nodes_multiple_inputs(self):
+        """
+        Check that the function works correctly when multiple inputs specified.
+          5->
+             \
+        1 -> 2 -> 3 -> 4
+        """
+        graph = nx.MultiDiGraph()
+        graph.add_nodes_from(list(range(1, 6)))
+        graph.add_edges_from([(1, 2), (2, 3), (3, 4), (5, 2)])
+        sub_graph_nodes = sub_graph_between_nodes(graph, [2, 5], [4])
+        self.assertIsNotNone(sub_graph_nodes)
+        self.assertListEqual(sorted(sub_graph_nodes), sorted([2, 3, 4, 5]))
+
+    def test_sub_graph_between_nodes_branches_included(self):
+        """
+        Check that the function works correctly for tree like structures.
+        1 -> 2 -> 3 -> 4
+             \
+             5 -> 6
+            / \
+        9 ->   -> 7 -> 8
+        """
+        graph = nx.MultiDiGraph()
+        node_names = list(range(1, 10))
+        graph.add_nodes_from(node_names)
+        graph.add_edges_from([(1, 2), (2, 3), (3, 4), (2, 5), (5, 6), (5, 7), (7, 8), (9, 5)])
+        self.assertListEqual(sorted(sub_graph_between_nodes(graph, [1], [4])), node_names)
+        self.assertListEqual(sorted(sub_graph_between_nodes(graph, [1], [6])), node_names)
+        self.assertListEqual(sorted(sub_graph_between_nodes(graph, [1], [8])), node_names)
+        # all nodes except 4 because it is a child of end node
+        self.assertListEqual(sorted(sub_graph_between_nodes(graph, [1], [3])), [n for n in node_names if n != 4])
+        # all nodes except 1 because it is a parent node child of start node. The nodes 3 and 4 must be added because
+        # after merging node 2 into sub-graph the node 2 will be removed and it is not known how to calculate the tensor
+        # between node 2 and 3.
+        self.assertListEqual(sorted(sub_graph_between_nodes(graph, [2], [8])), [n for n in node_names if n != 1])
diff --git a/model-optimizer/mo/utils/pipeline_config_test.py b/model-optimizer/mo/utils/pipeline_config_test.py
new file mode 100644 (file)
index 0000000..596a714
--- /dev/null
@@ -0,0 +1,150 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import os
+import tempfile
+import unittest
+
+from mo.utils.error import Error
+from mo.utils.pipeline_config import PipelineConfig
+
+file_content = """model {
+  faster_rcnn {
+    num_classes: 90
+    image_resizer {
+      keep_aspect_ratio_resizer {
+        min_dimension: 600
+        max_dimension: 1024
+      }
+    }
+    feature_extractor {
+      type: "faster_rcnn_inception_v2"
+      first_stage_features_stride: 16
+    }
+    first_stage_anchor_generator {
+      grid_anchor_generator {
+        height_stride: 16
+        width_stride: 16
+        scales: 0.25
+        scales: 0.5
+        scales: 1.0
+        scales: 2.0
+        aspect_ratios: 0.5
+        aspect_ratios: 1.0
+        aspect_ratios: 2.0
+      }
+    }
+    first_stage_box_predictor_conv_hyperparams {
+      op: CONV
+      regularizer {
+        l2_regularizer {
+          weight: 0.0
+        }
+      }
+      initializer {
+        truncated_normal_initializer {
+          stddev: 0.00999999977648
+        }
+      }
+    }
+    first_stage_nms_score_threshold: 0.0
+    first_stage_nms_iou_threshold: 0.699999988079
+    first_stage_max_proposals: 100
+    first_stage_localization_loss_weight: 2.0
+    first_stage_objectness_loss_weight: 1.0
+    initial_crop_size: 14
+    maxpool_kernel_size: 2
+    maxpool_stride: 2
+    second_stage_box_predictor {
+      mask_rcnn_box_predictor {
+        fc_hyperparams {
+          op: FC
+          regularizer {
+            l2_regularizer {
+              weight: 0.0
+            }
+          }
+          initializer {
+            variance_scaling_initializer {
+              factor: 1.0
+              uniform: true
+              mode: FAN_AVG
+            }
+          }
+            }
+          }
+        }
+        use_dropout: false
+        dropout_keep_probability: 1.0
+      }
+    }
+    second_stage_post_processing {
+      batch_non_max_suppression {
+        score_threshold: 0.300000011921
+        iou_threshold: 0.600000023842
+        max_detections_per_class: 100
+        max_total_detections: 100
+      }
+      score_converter: SOFTMAX
+    }
+    second_stage_localization_loss_weight: 2.0
+    second_stage_classification_loss_weight: 1.0
+  }
+}
+"""
+
+
+class TestingSimpleProtoParser(unittest.TestCase):
+    def test_pipeline_config_not_existing_file(self):
+        self.assertRaises(Error, PipelineConfig, "/abc/def")
+
+    def test_pipeline_config_non_model_file(self):
+        file = tempfile.NamedTemporaryFile('wt', delete=False)
+        file.write("non_model {}")
+        file_name = file.name
+        file.close()
+
+        self.assertRaises(Error, PipelineConfig, file_name)
+
+    def test_pipeline_config_existing_file(self):
+        file = tempfile.NamedTemporaryFile('wt', delete=False)
+        file.write(file_content)
+        file_name = file.name
+        file.close()
+
+        pipeline_config = PipelineConfig(file_name)
+        expected_result = {'resizer_min_dimension': 600,
+                           'first_stage_nms_score_threshold': 0.0,
+                           'anchor_generator_aspect_ratios': [0.5, 1.0, 2.0],
+                           'num_classes': 90,
+                           'anchor_generator_scales': [0.25, 0.5, 1.0, 2.0],
+                           'first_stage_max_proposals': 100,
+                           'first_stage_nms_iou_threshold': 0.699999988079,
+                           'resizer_max_dimension': 1024,
+                           'initial_crop_size': 14,
+                           'frcnn_variance_height': 5.0,
+                           'frcnn_variance_width': 5.0,
+                           'frcnn_variance_x': 10.0,
+                           'frcnn_variance_y': 10.0,
+                           'ssd_anchor_generator_base_anchor_width': 1.0,
+                           'ssd_anchor_generator_base_anchor_height': 1.0,
+                           'anchor_generator_height': 256,
+                           'anchor_generator_width': 256,
+                           'anchor_generator_height_stride': 16,
+                           'anchor_generator_width_stride': 16,
+                           }
+        os.unlink(file_name)
+        self.assertDictEqual(pipeline_config._model_params, expected_result)
diff --git a/model-optimizer/mo/utils/simple_proto_parser_test.py b/model-optimizer/mo/utils/simple_proto_parser_test.py
new file mode 100644 (file)
index 0000000..2f601ce
--- /dev/null
@@ -0,0 +1,200 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import os
+import tempfile
+import unittest
+
+from mo.utils.simple_proto_parser import SimpleProtoParser
+
+correct_proto_message_1 = 'model { faster_rcnn { num_classes: 90 image_resizer { keep_aspect_ratio_resizer {' \
+                          ' min_dimension: 600  max_dimension: 1024 }}}}'
+
+correct_proto_message_2 = '    first_stage_anchor_generator {grid_anchor_generator {height_stride: 16 width_stride:' \
+                          ' 16 scales: 0.25 scales: 0.5 scales: 1.0 scales: 2.0  aspect_ratios: 0.5 aspect_ratios:' \
+                          ' 1.0 aspect_ratios: 2.0}}'
+
+correct_proto_message_3 = '  initializer \n{variance_scaling_initializer \n{\nfactor: 1.0 uniform: true bla: false ' \
+                          'mode: FAN_AVG}}'
+
+correct_proto_message_4 = 'train_input_reader {label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt"' \
+                          ' tf_record_input_reader { input_path: "PATH_TO_BE_CONFIGURED/  mscoco_train.record" }}'
+
+correct_proto_message_5 = '  initializer \n  # abc \n{variance_scaling_initializer \n{\nfactor: 1.0 \n  # sd ' \
+                          '\nuniform: true bla: false mode: FAN_AVG}}'
+
+correct_proto_message_6 = '    first_stage_anchor_generator {grid_anchor_generator {height_stride: 16 width_stride:' \
+                          ' 16 scales: [ 0.25, 0.5, 1.0, 2.0] aspect_ratios: 0.5 aspect_ratios:' \
+                          ' 1.0 aspect_ratios: 2.0}}'
+
+correct_proto_message_7 = '    first_stage_anchor_generator {grid_anchor_generator {height_stride: 16 width_stride:' \
+                          ' 16 scales: [ 0.25, 0.5, 1.0, 2.0] aspect_ratios: [] }}'
+
+correct_proto_message_8 = 'model {good_list: [3.0, 5.0, ]}'
+
+correct_proto_message_9 = '    first_stage_anchor_generator {grid_anchor_generator {height_stride: 16, width_stride:' \
+                          ' 16 scales: [ 0.25, 0.5, 1.0, 2.0], aspect_ratios: [] }}'
+
+correct_proto_message_10 = 'train_input_reader {label_map_path: "C:\mscoco_label_map.pbtxt"' \
+                           ' tf_record_input_reader { input_path: "PATH_TO_BE_CONFIGURED/  mscoco_train.record" }}'
+
+correct_proto_message_11 = 'model {path: "C:\[{],}" other_value: [1, 2, 3, 4]}'
+
+incorrect_proto_message_1 = 'model { bad_no_value }'
+
+incorrect_proto_message_2 = 'model { abc: 3 { }'
+
+incorrect_proto_message_3 = 'model { too_many_values: 3 4 }'
+
+incorrect_proto_message_4 = 'model { missing_values: '
+
+incorrect_proto_message_5 = 'model { missing_values: aa bb : }'
+
+incorrect_proto_message_6 = 'model : '
+
+incorrect_proto_message_7 = 'model : {bad_list: [3.0, 4, , 4.0]}'
+
+
+class TestingSimpleProtoParser(unittest.TestCase):
+    def test_correct_proto_reader_from_string_1(self):
+        result = SimpleProtoParser().parse_from_string(correct_proto_message_1)
+        expected_result = {'model': {'faster_rcnn': {'num_classes': 90, 'image_resizer': {
+            'keep_aspect_ratio_resizer': {'min_dimension': 600, 'max_dimension': 1024}}}}}
+        self.assertDictEqual(result, expected_result)
+
+    def test_correct_proto_reader_from_string_2(self):
+        result = SimpleProtoParser().parse_from_string(correct_proto_message_2)
+        expected_result = {'first_stage_anchor_generator': {
+            'grid_anchor_generator': {'height_stride': 16, 'width_stride': 16, 'scales': [0.25, 0.5, 1.0, 2.0],
+                                      'aspect_ratios': [0.5, 1.0, 2.0]}}}
+        self.assertDictEqual(result, expected_result)
+
+    def test_correct_proto_reader_from_string_3(self):
+        result = SimpleProtoParser().parse_from_string(correct_proto_message_3)
+        expected_result = {
+            'initializer': {
+                'variance_scaling_initializer': {'factor': 1.0, 'uniform': True, 'bla': False, 'mode': 'FAN_AVG'}}}
+        self.assertDictEqual(result, expected_result)
+
+    def test_correct_proto_reader_from_string_4(self):
+        result = SimpleProtoParser().parse_from_string(correct_proto_message_4)
+        expected_result = {
+            'train_input_reader': {'label_map_path': "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt",
+                                   'tf_record_input_reader': {
+                                       'input_path': "PATH_TO_BE_CONFIGURED/  mscoco_train.record"}}}
+        self.assertDictEqual(result, expected_result)
+
+    def test_correct_proto_reader_from_string_with_comments(self):
+        result = SimpleProtoParser().parse_from_string(correct_proto_message_5)
+        expected_result = {
+            'initializer': {
+                'variance_scaling_initializer': {'factor': 1.0, 'uniform': True, 'bla': False, 'mode': 'FAN_AVG'}}}
+        self.assertDictEqual(result, expected_result)
+
+    def test_correct_proto_reader_from_string_with_lists(self):
+        result = SimpleProtoParser().parse_from_string(correct_proto_message_6)
+        expected_result = {'first_stage_anchor_generator': {
+            'grid_anchor_generator': {'height_stride': 16, 'width_stride': 16, 'scales': [0.25, 0.5, 1.0, 2.0],
+                                      'aspect_ratios': [0.5, 1.0, 2.0]}}}
+        self.assertDictEqual(result, expected_result)
+
+    def test_correct_proto_reader_from_string_with_empty_list(self):
+        result = SimpleProtoParser().parse_from_string(correct_proto_message_7)
+        expected_result = {'first_stage_anchor_generator': {
+            'grid_anchor_generator': {'height_stride': 16, 'width_stride': 16, 'scales': [0.25, 0.5, 1.0, 2.0],
+                                      'aspect_ratios': []}}}
+        self.assertDictEqual(result, expected_result)
+
+    def test_correct_proto_reader_from_string_with_comma_trailing_list(self):
+        result = SimpleProtoParser().parse_from_string(correct_proto_message_8)
+        expected_result = {'model': {'good_list': [3.0, 5.0]}}
+        self.assertDictEqual(result, expected_result)
+
+    def test_correct_proto_reader_from_string_with_redundant_commas(self):
+        result = SimpleProtoParser().parse_from_string(correct_proto_message_9)
+        expected_result = {'first_stage_anchor_generator': {
+            'grid_anchor_generator': {'height_stride': 16, 'width_stride': 16, 'scales': [0.25, 0.5, 1.0, 2.0],
+                                      'aspect_ratios': []}}}
+        self.assertDictEqual(result, expected_result)
+
+    def test_correct_proto_reader_from_string_with_windows_path(self):
+        result = SimpleProtoParser().parse_from_string(correct_proto_message_10)
+        expected_result = {
+            'train_input_reader': {'label_map_path': "C:\mscoco_label_map.pbtxt",
+                                   'tf_record_input_reader': {
+                                       'input_path': "PATH_TO_BE_CONFIGURED/  mscoco_train.record"}}}
+        self.assertDictEqual(result, expected_result)
+
+    def test_correct_proto_reader_from_string_with_special_characters_in_string(self):
+        result = SimpleProtoParser().parse_from_string(correct_proto_message_11)
+        expected_result = {'model': {'path': "C:\[{],}",
+                                     'other_value': [1, 2, 3, 4]}}
+        self.assertDictEqual(result, expected_result)
+
+    def test_incorrect_proto_reader_from_string_1(self):
+        result = SimpleProtoParser().parse_from_string(incorrect_proto_message_1)
+        self.assertIsNone(result)
+
+    def test_incorrect_proto_reader_from_string_2(self):
+        result = SimpleProtoParser().parse_from_string(incorrect_proto_message_2)
+        self.assertIsNone(result)
+
+    def test_incorrect_proto_reader_from_string_3(self):
+        result = SimpleProtoParser().parse_from_string(incorrect_proto_message_3)
+        self.assertIsNone(result)
+
+    def test_incorrect_proto_reader_from_string_4(self):
+        result = SimpleProtoParser().parse_from_string(incorrect_proto_message_4)
+        self.assertIsNone(result)
+
+    def test_incorrect_proto_reader_from_string_5(self):
+        result = SimpleProtoParser().parse_from_string(incorrect_proto_message_5)
+        self.assertIsNone(result)
+
+    def test_incorrect_proto_reader_from_string_6(self):
+        result = SimpleProtoParser().parse_from_string(incorrect_proto_message_6)
+        self.assertIsNone(result)
+
+    def test_incorrect_proto_reader_from_string_7(self):
+        result = SimpleProtoParser().parse_from_string(incorrect_proto_message_7)
+        self.assertIsNone(result)
+
+    def test_correct_proto_reader_from_file(self):
+        file = tempfile.NamedTemporaryFile('wt', delete=False)
+        file.write(correct_proto_message_1)
+        file_name = file.name
+        file.close()
+
+        result = SimpleProtoParser().parse_file(file_name)
+        expected_result = {'model': {'faster_rcnn': {'num_classes': 90, 'image_resizer': {
+            'keep_aspect_ratio_resizer': {'min_dimension': 600, 'max_dimension': 1024}}}}}
+        self.assertDictEqual(result, expected_result)
+        os.unlink(file_name)
+
+    def test_proto_reader_from_non_readable_file(self):
+        file = tempfile.NamedTemporaryFile('wt', delete=False)
+        file.write(correct_proto_message_1)
+        file_name = file.name
+        file.close()
+        os.chmod(file_name, 0000)
+
+        result = SimpleProtoParser().parse_file(file_name)
+        self.assertIsNone(result)
+        os.unlink(file_name)
+
+    def test_proto_reader_from_non_existing_file(self):
+        result = SimpleProtoParser().parse_file('/non/existing/file')
+        self.assertIsNone(result)
diff --git a/model-optimizer/mo/utils/summarize_graph_test.py b/model-optimizer/mo/utils/summarize_graph_test.py
new file mode 100644 (file)
index 0000000..fbed0eb
--- /dev/null
@@ -0,0 +1,38 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import patch, mock_open
+
+from mo.front.tf.loader import load_tf_graph_def
+from mo.utils.summarize_graph import summarize_graph
+
+pbtxt = 'node{name:"Placeholder"op:"Placeholder"attr{key:"dtype"value{type:DT_FLOAT}}attr{key:"shape"value{shape{dim' + \
+        '{size:1}dim{size:227}dim{size:227}dim{size:3}}}}}node{name:"Output/Identity"op:"Identity"input:"Placeholder' + \
+        '"attr{key:"T"value{type:DT_FLOAT}}}'
+
+
+class TestingSummarizeGraph(unittest.TestCase):
+    def test_summarize_graph(self):
+        with patch('mo.front.tf.loader.open', mock_open(read_data=pbtxt)) as m:
+            graph_def, _ = load_tf_graph_def('path', False)
+            summary = summarize_graph(graph_def)
+            self.assertEqual(len(summary['outputs']), 1)
+            self.assertEqual(summary['outputs'][0], 'Output/Identity')
+            self.assertEqual(len(summary['inputs']), 1)
+            self.assertEqual('Placeholder' in summary['inputs'], True)
+            self.assertEqual(str(summary['inputs']['Placeholder']['shape']), '(1,227,227,3)')
+            self.assertEqual(str(summary['inputs']['Placeholder']['type']), 'float32')
diff --git a/model-optimizer/mo/utils/unittest/extractors.py b/model-optimizer/mo/utils/unittest/extractors.py
new file mode 100644 (file)
index 0000000..e58534c
--- /dev/null
@@ -0,0 +1,82 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+from unittest.mock import patch
+
+import numpy as np
+
+
+class PB(dict):
+    __getattr__ = dict.get
+
+
+class BaseExtractorsTestingClass(unittest.TestCase):
+    expected = None
+    res = None
+    call_args = None
+    expected_call_args = None
+
+    def setUp(self):
+        if hasattr(self, 'patcher') and self.patcher:  # pylint: disable=no-member
+            patcher = patch(self.patcher)  # pylint: disable=no-member
+            self.addCleanup(patcher.stop)
+            self.infer_mock = patcher.start()
+
+    def compare(self):
+        if hasattr(self, 'infer_mock'):
+            self.assertTrue(self.infer_mock.called)
+        for key, val in self.expected.items():
+            if key == "infer":
+                self.assertEqual(self.call_args, self.expected_call_args)
+            if type(val) is np.ndarray:
+                np.testing.assert_equal(val, self.res[key])
+            elif type(val) is list:
+                self.assertTrue(np.all([val == self.res[key]]))
+            else:
+                self.assertEqual(val, self.res[key],
+                                 "{} attribute comparison failed! Expected {} but {} given.".format(key, val,
+                                                                                                    self.res[key]))
+
+
+class FakeParam:
+    def __init__(self, param_key, param_val):
+        setattr(self, param_key, param_val)
+
+
+class FakeMultiParam:
+    def __init__(self, dict_values):
+        self.dict_values = dict_values
+        for (key, value) in dict_values.items():
+            # if type(value) != dict:
+            setattr(self, key, value)
+            # else:
+            #     setattr(self, key, FakeMultiParam(value))
+
+
+class FakeBlob:
+    def __init__(self, param_key, param_val):
+        setattr(self, param_key, param_val)
+
+
+class FakeModelLayer:
+    def __init__(self, blobs_val):
+        self.blobs = [FakeBlob('data', val) for val in blobs_val]
+
+
+class FakeValue:
+    def __init__(self, val):
+        self.shape = val
\ No newline at end of file
diff --git a/model-optimizer/mo/utils/unittest/graph.py b/model-optimizer/mo/utils/unittest/graph.py
new file mode 100644 (file)
index 0000000..64a0f30
--- /dev/null
@@ -0,0 +1,321 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+from collections import deque
+from copy import deepcopy
+
+import networkx as nx
+import numpy as np
+
+from mo.graph.graph import Node
+from mo.middle.pattern_match import all_edges_in_nodes
+from mo.utils.error import Error
+
+
+def not_all_new(old_elements: list, new_elements: list):
+    """
+    This function check whether at least one element from new_elements are in old_elements.
+    """
+    return any([element in old_elements for element in new_elements])
+
+
+def check_and_update_ports(edges_data: list, in_port: bool = True):
+    key = 'in' if in_port else 'out'
+    key_in_edges = [key in edge_data for edge_data in edges_data]
+    if all(key_in_edges):
+        ports = [edge_data[key] for edge_data in edges_data]
+        if len(ports) != len(set(ports)):
+            raise Error("Please, provide unique {} ports for nodes".format(key))
+    elif not any(key_in_edges):
+        for i, edge_data in enumerate(edges_data):
+            edge_data[key] = i
+    else:
+        raise Error("Please, provide all {} ports for nodes".format(key))
+
+
+def build_graph_with_attrs(nodes_with_attrs: list, edges_with_attrs: list, new_nodes_with_attrs: list = [],
+                           new_edges_with_attrs: list = [], update_edge_attrs: dict = None,
+                           update_nodes_attributes: dict = None, nodes_with_edges_only: bool = False,
+                           add_nodes_from_edges: bool = False):
+    """
+    Build the nx.MultiDiGraph with specific nodes and edges. Also update of edge and node parameters is supported.
+    :param nodes_with_attrs: list of tuples ('node_name', {node_attrs})
+    :param edges_with_attrs: list of tuples like (start node, end node, (optional) {attrs of the edge}).
+    :param new_nodes_with_attrs: analogically nodes_with_attrs
+    :param new_edges_with_attrs: analogically new_edges
+    :param update_edge_attrs: optional dictionary like {('from_node', 'to_node', key): {edge_attrs}}.
+    :param update_nodes_attributes: optional dictionary which specifies nodes names and their attributes to be updated. The
+    key is a node name to update attribute and the value is a dictionary with attribute name and its value.
+    :param nodes_with_edges_only: add nodes which has at least one incoming or outcoming edge.
+    :param add_nodes_from_edges: whether nodes that is not listed in all_nodes but are in all_edges is allowed.
+    :return: generated graph.
+    """
+    if not_all_new([node[0] for node in nodes_with_attrs], [node[0] for node in new_nodes_with_attrs]):
+        raise Error('Some nodes from new_nodes_with_attrs are already in nodes.'
+                    ' Please, add to new_nodes_with_attrs only NEW nodes.')
+
+    if not_all_new([(edge[0], edge[1]) for edge in edges_with_attrs], [(edge[0], edge[1]) for edge in new_edges_with_attrs]):
+        raise Error('Some edges from new_edges_with_attrs are already in edges.'
+                    ' Please, add to new_edges_with_attrs only NEW edges.')
+
+    # Check that all nodes from list of edges are in nodes
+    all_nodes = nodes_with_attrs + new_nodes_with_attrs
+    all_edges = edges_with_attrs + new_edges_with_attrs
+    all_nodes_names = [node[0] for node in all_nodes]
+    if not add_nodes_from_edges and not all_edges_in_nodes(nodes=all_nodes_names, edges=all_edges):
+        raise Error("Some nodes from list of edges is not in nodes. Please, add all necessary nodes.")
+
+    graph = nx.MultiDiGraph()
+
+    # Create dict for nodes with attrs
+    nodes_attrs = {}
+    for node_name, attrs in all_nodes:
+        nodes_attrs[node_name] = attrs
+        if 'name' not in attrs:
+            attrs['name'] = node_name
+
+    if nodes_with_edges_only:
+        # filter nodes to keep only ones with edges connected
+        filtered_nodes = {}
+        for edge in all_edges:
+            node_1, node_2 = edge[0], edge[1]
+            filtered_nodes[node_1] = nodes_attrs[node_1]
+            filtered_nodes[node_2] = nodes_attrs[node_2]
+        nodes_attrs = filtered_nodes
+
+    # Create all nodes
+    for node, attrs in nodes_attrs.items():
+        graph.add_node(node, **deepcopy(attrs))
+
+    # Connect nodes with edges (also unpack edge params)
+    for edge in all_edges:
+        node_1, node_2 = edge[0], edge[1]
+        edge_attrs = edge[2] if len(edge) == 3 else {}
+        graph.add_edge(node_1, node_2, **edge_attrs)
+
+    # Update attributes of edges
+    if update_edge_attrs:
+        # it will work in 2.x networkx only
+        for edge, attr in update_edge_attrs.items():
+            for k, v in attr.items():
+                nx.set_edge_attributes(G=graph, name=k, values={edge: v})
+
+    # Update attributes of nodes
+    if update_nodes_attributes is not None:
+        for node_name, new_attrs in update_nodes_attributes:
+            assert (node_name in graph.nodes())
+            for attr, value in new_attrs.items():
+                graph.node[node_name][attr] = value
+
+    for node in graph.nodes():
+        check_and_update_ports([graph.get_edge_data(edge[0], node)[0] for edge in graph.in_edges(node)], True)
+        check_and_update_ports([graph.get_edge_data(node, edge[1])[0] for edge in graph.out_edges(node)], False)
+
+    return graph
+
+
+def build_graph(nodes_attrs: dict, edges: list, update_attributes: dict = None, nodes_with_edges_only: bool = False):
+    """
+    Build the nx.MultiDiGraph with specific nodes and edges.
+    :param nodes_attrs: dictionary where key is the node name and the value is the dictionary with node attributes.
+    :param edges: list of pairs with start and end node names of the edge.
+    :param update_attributes: optional dictionary which specifies nodes names and their attributes to be updated. The
+    key is a node name to update attribute and the value is a dictionary with attribute name and its value.
+    :param nodes_with_edges_only: add nodes which has at least one incoming or outcoming edge.
+    :return: generated graph.
+    """
+    graph = nx.MultiDiGraph()
+
+    for node_name, attrs in nodes_attrs.items():
+        if 'name' not in attrs:
+            attrs['name'] = node_name
+
+    if nodes_with_edges_only:
+        # filter nodes to keep only ones with edges connected
+        filtered_nodes = {}
+        for item in edges:
+            if len(item) == 2: # TODO: is there any better way in python to do that?
+                node1, node2 = item
+            else:
+                node1, node2, _ = item
+            filtered_nodes[node1] = nodes_attrs[node1]
+            filtered_nodes[node2] = nodes_attrs[node2]
+        nodes_attrs = filtered_nodes
+
+    # create all nodes first
+    for node, attrs in nodes_attrs.items():
+        assert node not in graph.nodes()
+        graph.add_node(node, **deepcopy(attrs))
+
+    # connect nodes with edges
+    for item in edges:
+        if len(item) == 2:  # TODO: is there any better way in python to do that?
+            node_1, node_2 = item
+            edge_attrs = {}
+        else:
+            node_1, node_2, edge_attrs = item
+
+        common_attrs = {'in': len(graph.in_edges(node_2)),
+                        'out': len(graph.out_edges(node_1)),
+                        'name': nodes_attrs[node_1]['name']}
+        common_attrs.update(edge_attrs)
+        graph.add_edge(node_1, node_2, **common_attrs)
+
+    if update_attributes is not None:
+        for node_name, new_attrs in update_attributes.items():
+            assert (node_name in graph.nodes())
+            for attr, value in new_attrs.items():
+                graph.node[node_name][attr] = value
+
+    return graph
+
+
+def build_graph_with_edge_attrs(nodes_attrs: dict, edges: list, update_attributes: dict = None):
+    """
+    Build the nx.MultiDiGraph with specific nodes and edges.
+    :param nodes_attrs: dictionary where key is the node name and the value is the dictionary with node attributes.
+    :param edges: list of pairs with start and end node names of the edge.
+    :param update_attributes: optional dictionary which specifies nodes names and their attributes to be updated. The
+    key is a node name to update attribute and the value is a dictionary with attribute name and its value.
+    :return: generated graph.
+    """
+    graph = nx.MultiDiGraph()
+    for node_1, node_2, attr in edges:
+        if node_1 not in graph.nodes():
+            graph.add_node(node_1, **deepcopy(nodes_attrs[node_1]))
+        if node_2 not in graph.nodes():
+            graph.add_node(node_2, **deepcopy(nodes_attrs[node_2]))
+        graph.add_edge(node_1, node_2, **attr)
+    if update_attributes is not None:
+        for node_name, new_attrs in update_attributes.items():
+            assert (node_name in graph.nodes())
+            for attr, value in new_attrs.items():
+                graph.node[node_name][attr] = value
+    return graph
+
+
+def compare_graphs(graph: nx.MultiDiGraph, graph_ref: nx.MultiDiGraph, last_node: str, last_node_ref=None,
+                   check_op_attrs=False):
+    if last_node_ref is None:
+        last_node_ref = last_node
+
+    q = deque([last_node])
+    q_ref = deque([last_node_ref])
+
+    checked_nodes = []
+    checked_nodes_ref = []
+
+    while len(q_ref) != 0:
+        if len(q) == 0:
+            return False, 'Graphs have different number of nodes'
+        node = Node(graph, q.popleft())
+        node_ref = Node(graph_ref, q_ref.popleft())
+
+        checked_nodes.append(node.id)
+        checked_nodes_ref.append(node_ref.id)
+
+        # Check that nodes has same amount of output nodes
+        if len(node_ref.out_nodes()) != len(node.out_nodes()):
+            return False, '{} and {} has different amount of output nodes'.format(node.id, node_ref.id)
+
+        # Check that nodes has same amount of input nodes
+        if len(node_ref.in_nodes()) != len(node.in_nodes()):
+            return False, '{} and {} has different amount of input nodes'.format(node.id, node_ref.id)
+
+        # Check that nodes has same 'kind'
+        if node_ref.kind != node.kind:
+            return False, '{} and {} has different kind parameter'.format(node.id, node_ref.id)
+
+        # Check can_be_fused attr
+        if node_ref.has_valid('can_be_fused'):
+            if node_ref.soft_get('can_be_fused') != node.soft_get('can_be_fused'):
+                return False, '{} and {} has different can_be_fused parameter {} and {}'.format(
+                    node.id, node_ref.id, node.soft_get('can_be_fused'), node_ref.soft_get('can_be_fused'))
+
+        if node_ref.kind == 'op':
+            # Check that nodes has same operation
+            if check_op_attrs:
+                for attr in graph_ref.node[node_ref.id]:
+                    if graph_ref.node[node_ref.id][attr] is None or attr in ['name', 'id']:
+                        continue
+                    if attr not in graph.node[node.id]:
+                        return False, 'Node {} has missing attribute {}'.format(node.id, attr)
+
+                    if type(graph_ref.node[node_ref.id][attr]) in [np.ndarray, list]:
+                        if not np.array_equal(graph.node[node.id][attr], graph_ref.node[node_ref.id][attr]):
+                            return False, '{} and {} has different attr {} : {} and {}'.format(
+                                node.id, node_ref.id, attr, graph.node[node.id][attr],
+                                graph_ref.node[node_ref.id][attr])
+                    else:
+                        if graph.node[node.id][attr] != graph_ref.node[node_ref.id][attr]:
+                            return False, '{} and {} has different attr {} : {} and {}'.format(
+                                node.id, node_ref.id, attr, graph.node[node.id][attr],
+                                graph_ref.node[node_ref.id][attr])
+        else:
+            if node_ref.has_valid('shape') and not node.has_valid('shape'):
+                return False, '{} has None shape'.format(node.id)
+            if node_ref.has_valid('value') and not node.has_valid('value'):
+                return False, '{} has None value'.format(node.id)
+
+            # Check that nodes has same shape and value
+            if node_ref.has_valid('shape') and node_ref.shape is not None and not np.array_equal(node_ref.shape,
+                                                                                                 node.shape):
+                return False, '{} and {} has different shapes {} and {}'.format(node.id, node_ref.id, node.shape,
+                                                                                node_ref.shape)
+            if node_ref.has_valid('value') and node_ref.value is not None and not np.allclose(node_ref.value,
+                                                                                              node.value, rtol=1e-05,
+                                                                                              atol=1e-08):
+                return False, '{} and {} has different values \n{} \nand \n{}'.format(node.id, node_ref.id, node.value,
+                                                                                      node_ref.value)
+        ports = sorted(node.in_nodes().keys()) if node.kind == 'op' else None
+        in_nodes = [node.in_node(k) for k in ports] if node.kind == 'op' else node.in_nodes()
+        for in_node in in_nodes:
+            if in_node.id not in checked_nodes and in_node.id not in q:
+                q.append(in_node.id)
+
+        ports_ref = sorted(node_ref.in_nodes().keys()) if node_ref.kind == 'op' else None
+        if ports != ports_ref:
+            return False, '{} and {} has different ports'.format(node.id, node_ref.id)
+
+        in_nodes = [node_ref.in_node(k) for k in ports] if node_ref.kind == 'op' else node_ref.in_nodes()
+        for in_node in in_nodes:
+            if in_node.id not in checked_nodes_ref and in_node.id not in q_ref:
+                q_ref.append(in_node.id)
+
+        out_nodes = node.out_nodes().values() if node.kind == 'op' else node.out_nodes()
+        for out_node in out_nodes:
+            if out_node.id not in checked_nodes and out_node.id not in q:
+                q.append(out_node.id)
+
+        out_nodes = node_ref.out_nodes().values() if node_ref.kind == 'op' else node_ref.out_nodes()
+        for out_node in out_nodes:
+            if out_node.id not in checked_nodes_ref and out_node.id not in q_ref:
+                q_ref.append(out_node.id)
+
+    return True, ''
+
+
+class FakeNode:
+    def __init__(self, pl, ml):
+        self.pb = pl
+        self.model_pb = ml
+        self.graph = None
+
+    def __setitem__(self, key, value):
+        setattr(self, key, value)
+
+    def __getitem__(self, item):
+        return getattr(self, item)
diff --git a/model-optimizer/mo/utils/utils_test.py b/model-optimizer/mo/utils/utils_test.py
new file mode 100644 (file)
index 0000000..7ebae7f
--- /dev/null
@@ -0,0 +1,41 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+import networkx as nx
+import numpy as np
+from mo.utils.utils import match_shapes
+
+
+class TestMatchShapes(unittest.TestCase):
+
+    def run_match_shapes(self, pattern: list, shape: list):
+        return match_shapes(np.array(pattern, dtype=np.int64), np.array(shape, dtype=np.int64))
+
+    def test_positive(self):
+        self.assertTrue(self.run_match_shapes([], []))
+        self.assertTrue(self.run_match_shapes([1,2,3], [1,2,3]))
+        self.assertTrue(self.run_match_shapes([-1,2,3], [1,2,3]))
+        self.assertTrue(self.run_match_shapes([1,-1,3], [1,2,3]))
+        self.assertTrue(self.run_match_shapes([-1,-1,-1], [1,2,3]))
+        self.assertTrue(self.run_match_shapes([-1], [2]))
+
+    def test_negative(self):
+        self.assertFalse(self.run_match_shapes([-1], []))
+        self.assertFalse(self.run_match_shapes([-1], [1,2,3]))
+        self.assertFalse(self.run_match_shapes([-1,2,3], [1,3,3]))
+        self.assertFalse(self.run_match_shapes([1,-1,3], [2,2]))
+        self.assertFalse(self.run_match_shapes([-1, -1, -1], [2, 3, 4, 5]))
diff --git a/model-optimizer/mo/utils/version_test.py b/model-optimizer/mo/utils/version_test.py
new file mode 100644 (file)
index 0000000..909e742
--- /dev/null
@@ -0,0 +1,24 @@
+"""
+ Copyright (c) 2018 Intel Corporation
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+"""
+
+import unittest
+
+from mo.utils.version import get_version
+
+
+class TestingVersion(unittest.TestCase):
+    def test_unknown_version(self):
+        self.assertEqual(get_version(), "unknown version")