Template documentation update (#1519)
authorIlya Lavrenov <ilya.lavrenov@intel.com>
Wed, 29 Jul 2020 16:56:24 +0000 (19:56 +0300)
committerGitHub <noreply@github.com>
Wed, 29 Jul 2020 16:56:24 +0000 (19:56 +0300)
* Updated Inference Engine Plugin developer guide
after inference using ngraph reference backend is added

* Documentation fixes

* Fixed review comments

43 files changed:
docs/IE_PLUGIN_DG/Building.md
docs/IE_PLUGIN_DG/Doxyfile
docs/IE_PLUGIN_DG/ExecutableNetwork.md
docs/IE_PLUGIN_DG/InferRequest.md
docs/IE_PLUGIN_DG/Intro.md
docs/IE_PLUGIN_DG/Plugin.md
docs/IE_PLUGIN_DG/PluginTesting.md
docs/template_plugin/CMakeLists.txt
docs/template_plugin/README.md
docs/template_plugin/src/CMakeLists.txt
docs/template_plugin/src/template_async_infer_request.cpp
docs/template_plugin/src/template_config.cpp
docs/template_plugin/src/template_config.hpp
docs/template_plugin/src/template_executable_network.cpp
docs/template_plugin/src/template_executable_network.hpp
docs/template_plugin/src/template_function_transformation.cpp
docs/template_plugin/src/template_function_transformation.hpp
docs/template_plugin/src/template_infer_request.cpp
docs/template_plugin/src/template_infer_request.hpp
docs/template_plugin/src/template_pattern_transformation.cpp
docs/template_plugin/src/template_pattern_transformation.hpp
docs/template_plugin/src/template_plugin.cpp
docs/template_plugin/src/template_plugin.hpp
docs/template_plugin/tests/functional/CMakeLists.txt
docs/template_plugin/tests/functional/shared_tests_instances/behavior/config.cpp
docs/template_plugin/tests/functional/shared_tests_instances/behavior/cpp_holders.cpp
docs/template_plugin/tests/functional/shared_tests_instances/behavior/exec_graph_info.cpp
docs/template_plugin/tests/functional/shared_tests_instances/behavior/infer_request.cpp
docs/template_plugin/tests/functional/shared_tests_instances/behavior/infer_request_callback.cpp
docs/template_plugin/tests/functional/shared_tests_instances/behavior/infer_request_config.cpp
docs/template_plugin/tests/functional/shared_tests_instances/behavior/infer_request_input.cpp
docs/template_plugin/tests/functional/shared_tests_instances/behavior/infer_request_output.cpp
docs/template_plugin/tests/functional/shared_tests_instances/behavior/layout.cpp
docs/template_plugin/tests/functional/shared_tests_instances/behavior/set_preprocess.cpp
docs/template_plugin/tests/functional/shared_tests_instances/behavior/test_plugin.cpp
docs/template_plugin/tests/functional/shared_tests_instances/behavior/version.cpp
docs/template_plugin/tests/functional/shared_tests_instances/hetero/query_network.cpp [new file with mode: 0644]
docs/template_plugin/tests/functional/shared_tests_instances/hetero/synthetic.cpp [new file with mode: 0644]
docs/template_plugin/tests/functional/shared_tests_instances/single_layer_tests/convolution.cpp
inference-engine/src/plugin_api/cpp_interfaces/impl/ie_plugin_internal.hpp
inference-engine/src/plugin_api/exec_graph_info.hpp
inference-engine/src/plugin_api/precision_utils.h
inference-engine/tests/functional/plugin/shared/include/single_layer_tests/convolution.hpp

index f30d67fec226d0cb0c9463c40b301f4da9a3f257..93d6c934a0359d4ca7e6dc013b5db9cbb5b343a8 100644 (file)
@@ -16,22 +16,23 @@ $ cmake -DCMAKE_BUILD_TYPE=Release ../dldt
 Once the commands above are executed, the Inference Engine Developer Package is generated in the `dldt-release-build` folder. It consists of several files:
  - `InferenceEngineDeveloperPackageConfig.cmake` - the main CMake script which imports targets and provides compilation flags and CMake options.
  - `InferenceEngineDeveloperPackageConfig-version.cmake` - a file with a package version.
- - `targets_developer.cmake` - an automatically generated file which contains all targets exported from the Deep Learning Deployment Toolkit (DLDT) build tree. This file is included by `InferenceEngineDeveloperPackageConfig.cmake` to import the following targets:
+ - `targets_developer.cmake` - an automatically generated file which contains all targets exported from the OpenVINO build tree. This file is included by `InferenceEngineDeveloperPackageConfig.cmake` to import the following targets:
    - Libraries for plugin development:
-          * `IE::ngraph` - shared nGraph library
-          * `IE::inference_engine` - shared Inference Engine library
-          * `IE::inference_engine_preproc` - shared library with Inference Engine preprocessing plugin
-          * `IE::inference_engine_plugin_api` - interface library with Inference Engine Plugin API headers
-          * `IE::inference_engine_lp_transformations` - shared library with low-precision transformations
-          * `IE::pugixml` - static Pugixml library
-          * `IE::xbyak` - interface library with Xbyak headers
+       * `IE::ngraph` - shared nGraph library
+       * `IE::inference_engine` - shared Inference Engine library
+       * `IE::inference_engine_transformations` - shared library with Inference Engine ngraph-based Transformations
+       * `IE::inference_engine_preproc` - shared library with Inference Engine preprocessing plugin
+       * `IE::inference_engine_plugin_api` - interface library with Inference Engine Plugin API headers
+       * `IE::inference_engine_lp_transformations` - shared library with low-precision transformations
+       * `IE::pugixml` - static Pugixml library
+       * `IE::xbyak` - interface library with Xbyak headers
    - Libraries for tests development:
-          * `IE::gtest`, `IE::gtest_main`, `IE::gmock` - Google Tests framework libraries
-          * `IE::commonTestUtils` - static library with common tests utilities 
-          * `IE::funcTestUtils` - static library with functional tests utilities 
-          * `IE::unitTestUtils` - static library with unit tests utilities 
-          * `IE::ngraphFunctions` - static library with the set of Ngraph Functions builders
-          * `IE::funcSharedTests` - static library with common functional tests
+       * `IE::gtest`, `IE::gtest_main`, `IE::gmock` - Google Tests framework libraries
+       * `IE::commonTestUtils` - static library with common tests utilities 
+       * `IE::funcTestUtils` - static library with functional tests utilities 
+       * `IE::unitTestUtils` - static library with unit tests utilities 
+       * `IE::ngraphFunctions` - static library with the set of `ngraph::Function` builders
+       * `IE::funcSharedTests` - static library with common functional tests
 
 > **Note:** it's enough just to run `cmake --build . --target ie_dev_targets` command to build only targets from the
 > Inference Engine Developer package.
@@ -68,24 +69,20 @@ find_package(InferenceEngineDeveloperPackage REQUIRED)
 add_subdirectory(src)
 
 if(ENABLE_TESTS)
-       include(CTest)
-       enable_testing()
+    include(CTest)
+    enable_testing()
 
-       if(ENABLE_FUNCTIONAL_TESTS)
-           add_subdirectory(tests/functional)
-       endif()
-
-       if(ENABLE_BEH_TESTS)
-           add_subdirectory(tests/behavior)
-       endif()
+    if(ENABLE_FUNCTIONAL_TESTS)
+        add_subdirectory(tests/functional)
+    endif()
 endif()
 ```
 
-> **NOTE**: The default values of the `ENABLE_TESTS`, `ENABLE_FUNCTIONAL_TESTS`, `ENABLE_BEH_TESTS` options are shared via the Inference Engine Developer Package and they are the same as for the main DLDT build tree. You can override them during plugin build using the command below:
+> **NOTE**: The default values of the `ENABLE_TESTS`, `ENABLE_FUNCTIONAL_TESTS` options are shared via the Inference Engine Developer Package and they are the same as for the main DLDT build tree. You can override them during plugin build using the command below:
 
-       ```bash
-       $ cmake -DENABLE_FUNCTIONAL_TESTS=OFF -DInferenceEngineDeveloperPackage_DIR=../dldt-release-build ../template-plugin
-       ``` 
+    ```bash
+    $ cmake -DENABLE_FUNCTIONAL_TESTS=OFF -DInferenceEngineDeveloperPackage_DIR=../dldt-release-build ../template-plugin
+    ``` 
 
 - `src/CMakeLists.txt` to build a plugin shared library from sources:
 
index 1f1407ea16588a515b62504dc0b1999c7fc80efb..96ef887e7cbe78013860eb197fd5593ef4fbf39f 100644 (file)
@@ -869,6 +869,8 @@ EXAMPLE_PATH           = ../template_plugin/src \
                          ../template_plugin/src/CMakeLists.txt \
                          ../template_plugin/tests/functional/CMakeLists.txt \
                          ../template_plugin/tests/functional/transformations \
+                         ../template_plugin/tests/functional/shared_tests_instances/ \
+                         ../../inference-engine/tests/functional/plugin/shared/include \
                          ../examples
 
 # If the value of the EXAMPLE_PATH tag contains directories, you can use the
@@ -877,7 +879,8 @@ EXAMPLE_PATH           = ../template_plugin/src \
 # files are included.
 
 EXAMPLE_PATTERNS       = *.cpp \
-                                                *.hpp
+                         *.hpp \
+                         *.txt
 
 # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
 # searched for input files to be used with the \include or \dontinclude commands
@@ -2044,7 +2047,8 @@ INCLUDE_FILE_PATTERNS  =
 # recursively expanded use the := operator instead of the = operator.
 # This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
 
-PREDEFINED             = INFERENCE_ENGINE_API \
+PREDEFINED             = INFERENCE_PLUGIN_API \
+                         INFERENCE_ENGINE_API \
                          INFERENCE_ENGINE_API_CPP \
                          INFERENCE_ENGINE_API_CLASS \
                          INFERENCE_ENGINE_DEPRECATED \
index eecdcb6d0443a03495f73855ce88a075e29c6935..a52872946c2bf37b9f02e7167fcac3fa8fc59d85 100644 (file)
@@ -1,7 +1,7 @@
 # Executable Network {#executable_network}
 
 `ExecutableNetwork` class functionality:
-- Compile an InferenceEngine::ICNNNetwork instance to a hardware-specific graph representation
+- Compile an InferenceEngine::ICNNNetwork instance to a backend specific graph representation
 - Create an arbitrary number of `InferRequest` objects
 - Hold some common resources shared between different instances of `InferRequest`. For example:
        - InferenceEngine::ExecutableNetworkInternal::_taskExecutor task executor to implement asynchronous execution
@@ -19,38 +19,37 @@ Inference Engine Plugin API provides the helper InferenceEngine::ExecutableNetwo
 The example class has several fields:
 
 - `_requestId` - Tracks a number of created inference requests, which is used to distinguish different inference requests during profiling via the Intel® Instrumentation and Tracing Technology (ITT) library.
-- `_name` - Provides a network name.
 - `_cfg` - Defines a configuration an executable network was compiled with.
 - `_plugin` - Refers to a plugin instance.
+- `_function` - Keeps a reference to transformed `ngraph::Function` which is used in ngraph reference backend computations. Note, in case of other backends with backend specific graph representation `_function` has different type and represents backend specific graph or just a set of computational kernels to perform an inference.
+- `_inputIndex` - maps a name of input with its index among all network inputs.
+- `_outputIndex` - maps a name of output with its index among all network outputs.
 
 ### `ExecutableNetwork` Constructor with `ICNNNetwork`
 
-This constructor accepts a generic representation of a neural network as an InferenceEngine::ICNNNetwork reference and is compiled into a hardware-specific device graph:
+This constructor accepts a generic representation of a neural network as an InferenceEngine::ICNNNetwork reference and is compiled into a backend specific device graph:
 
 @snippet src/template_executable_network.cpp executable_network:ctor_cnnnetwork
 
-The implementation `CompileGraph` is fully device-specific.
+The implementation `CompileNetwork` is fully device-specific.
 
-### `CompileGraph()`
+### `CompileNetwork()`
 
-The function accepts a const shared pointer to `const ngraph::Function` object and performs the following steps:
+The function accepts a const shared pointer to `ngraph::Function` object and performs the following steps:
 
-1. Deep copies a const object to a local object, which can later be modified.
-2. Applies common and plugin-specific transformations on a copied graph to make the graph more friendly to hardware operations. For details how to write custom plugin-specific transformation, please, refer to [Writing ngraph transformations](@ref new_ngraph_transformation) guide.
-3. Maps the transformed graph to a plugin-specific graph representation (for example, to MKLDNN graph for CPU). See details topics about network representation:
-    * [Intermediate Representation and Operation Sets](../_docs_MO_DG_IR_and_opsets.html)
-    * [Quantized networks](@ref quantized_networks).
-4. Allocates and fills memory for graph weights.
+1. Applies ngraph passes using `TransformNetwork` function, which defines plugin-specific conversion pipeline. 
+2. Maps the transformed graph to a backend specific graph representation (for example, to MKLDNN graph for Intel CPU).
+3. Allocates and fills memory for graph weights, backend specific memory handles and so on.
 
-@snippet src/template_executable_network.cpp executable_network:compile_graph
+@snippet src/template_executable_network.cpp executable_network:map_graph
 
-> **NOTE**: After all these steps, the hardware-specific graph is ready to create inference requests and perform inference.
+> **NOTE**: After all these steps, the backend specific graph is ready to create inference requests and perform inference.
 
 ### `ExecutableNetwork` Constructor Importing from Stream
 
-This constructor creates a hardware-specific graph by importing from a stream object:
+This constructor creates a backend specific graph by importing from a stream object:
 
-> **NOTE**: The export of hardware-specific graph is done in the `ExportImpl` method, and data formats must be the same for both import and export.
+> **NOTE**: The export of backend specific graph is done in the `ExportImpl` method, and data formats must be the same for both import and export.
 
 @snippet src/template_executable_network.cpp executable_network:ctor_import_stream
 
@@ -59,9 +58,9 @@ This constructor creates a hardware-specific graph by importing from a stream ob
 **Implementation details:**   
 Base InferenceEngine::ExecutableNetworkThreadSafeDefault class implements the public InferenceEngine::ExecutableNetworkThreadSafeDefault::Export method as following:
 - Writes `_plugin->GetName()` to the `model` stream.
-- Calls the `ExportImpl` method defined in a derived class to dump a hardware-specific graph.
+- Calls the `ExportImpl` method defined in a derived class to dump a backend specific graph.
 
-The implementation of the method should write all data to the `model` stream, which is required to import a hardware-specific graph later in the `Plugin::Import` method:
+The implementation of the method should write all data to the `model` stream, which is required to import a backend specific graph later in the `Plugin::Import` method:
 
 @snippet src/template_executable_network.cpp executable_network:export_impl
 
@@ -73,7 +72,6 @@ The method creates an asynchronous inference request and returns it. While the p
 - [Asynchronous inference request](@ref async_infer_request), which is a wrapper for a synchronous inference request and can run a pipeline asynchronously. Depending on a device pipeline structure, it can has one or several stages:
    - For single-stage pipelines, there is no need to define this method and create a class derived from InferenceEngine::AsyncInferRequestThreadSafeDefault. For single stage pipelines, a default implementation of this method creates InferenceEngine::AsyncInferRequestThreadSafeDefault wrapping a synchronous inference request and runs it asynchronously in the `_taskExecutor` executor.
    - For pipelines with multiple stages, such as performing some preprocessing on host, uploading input data to a device, running inference on a device, or downloading and postprocessing output data, schedule stages on several task executors to achieve better device use and performance. You can do it by creating a sufficient number of inference requests running in parallel. In this case, device stages of different inference requests are overlapped with preprocessing and postprocessing stage giving better performance.
-
    > **IMPORTANT**: It is up to you to decide how many task executors you need to optimally execute a device pipeline.
 
 @snippet src/template_executable_network.cpp executable_network:create_infer_request
index c7ea3794744368e0ade267c9000c662786800673..d57989889807240d6071c4fa16938e2faa66ddcd 100644 (file)
@@ -1,7 +1,7 @@
 # Synchronous Inference Request {#infer_request}
 
 `InferRequest` class functionality:
-- Allocate input and output blobs needed for a hardware-dependent network inference.
+- Allocate input and output blobs needed for a backend-dependent network inference.
 - Define functions for inference process stages (for example, `preprocess`, `upload`, `infer`, `download`, `postprocess`). These functions can later be used to define an execution pipeline during [Asynchronous Inference Request](@ref async_infer_request) implementation.
 - Call inference stages one by one synchronously.
 
@@ -20,9 +20,15 @@ The example class has several fields:
 
 - `_executableNetwork` - reference to an executable network instance. From this reference, an inference request instance can take a task executor, use counter for a number of created inference requests, and so on.
 - `_profilingTask` - array of the `std::array<InferenceEngine::ProfilingTask, numOfStages>` type. Defines names for pipeline stages. Used to profile an inference pipeline execution with the Intel® instrumentation and tracing technology (ITT).
-- `_inputsNCHW` - input blob map
-- `_outputsNCHW` - output blob map
-- Several double values to hold an execution time for pipeline stages.
+- `_durations` - array of durations of each pipeline stage.
+- `_networkInputBlobs` - input blob map.
+- `_networkOutputBlobs` - output blob map.
+- `_parameters` - `ngraph::Function` parameter operations.
+- `_results` - `ngraph::Function` result operations.
+- backend specific fields:
+       - `_inputTensors` - inputs tensors which wrap `_networkInputBlobs` blobs. They are used as inputs to backend `_executable` computational graph.
+       - `_outputTensors` - output tensors which wrap `_networkOutputBlobs` blobs. They are used as outputs from backend `_executable` computational graph.
+       - `_executable` - an executable object / backend computational graph.
 
 ### `InferRequest` Constructor
 
@@ -30,11 +36,6 @@ The constructor initializes helper fields and calls methods which allocate blobs
 
 @snippet src/template_infer_request.cpp infer_request:ctor
 
-The implementation of function allocating device buffers is fully device-specific and not provided in the guide. 
-The implementation of function allocating host buffers assumes that the `Template` device works 
-natively only with the InferenceEngine::NCHW input and output layout, while the user can specify the InferenceEngine::NHWC as a layout 
-of InferenceEngine::CNNNetwork inputs and outputs and set InferenceEngine::NHWC blobs via the InferenceEngine::InferRequest::SetBlob method.
-
 > **NOTE**: Call InferenceEngine::CNNNetwork::getInputsInfo and InferenceEngine::CNNNetwork::getOutputsInfo to specify both layout and precision of blobs, which you can set with InferenceEngine::InferRequest::SetBlob and get with InferenceEngine::InferRequest::GetBlob. A plugin uses these hints to determine its internal layouts and precisions for input and output blobs if needed. 
 
 ### `~InferRequest` Destructor
@@ -51,14 +52,27 @@ Decrements a number of created inference requests:
 
 @snippet src/template_infer_request.cpp infer_request:infer_impl
 
+#### 1. `inferPreprocess`
+
 Below is the code of the the `inferPreprocess` method to demonstrate Inference Engine common preprocessing step handling:
 
 @snippet src/template_infer_request.cpp infer_request:infer_preprocess
 
 **Details:**
 * `InferImpl` must call the InferenceEngine::InferRequestInternal::execDataPreprocessing function, which executes common Inference Engine preprocessing step (for example, applies resize or color conversion operations) if it is set by the user. The output dimensions, layout and precision matches the input information set via InferenceEngine::CNNNetwork::getInputsInfo.
-* To handle both InferenceEngine::NCHW and InferenceEngine::NHWC input layouts, the `TemplateInferRequest` class has the `_inputsNCHW` field, which holds blobs in the InferenceEngine::NCHW layout. During Inference Request execution, `InferImpl` copies from the input InferenceEngine::NHWC layout to `_inputsNCHW` if needed.
-* The next logic of `InferImpl` works with `_inputsNCHW`.
+* If `inputBlob` passed by user differs in terms of precisions from precision expected by plugin, `blobCopy` is performed which does actual precision conversion.
+
+#### 2. `startPipeline`
+
+Executes a pipeline synchronously using `_executable` object:
+
+@snippet src/template_infer_request.cpp infer_request:start_pipeline
+
+#### 3. `inferPostprocess`
+
+Converts output blobs if precisions of backend output blobs and blobs passed by user are different:
+
+@snippet src/template_infer_request.cpp infer_request:infer_postprocess
 
 ### `GetPerformanceCounts()`
 
index 038f9e167b49431232ebd359fbab6e9e7c94163f..f8638c768b8880c3b325f5d0211c89f620d78b14 100644 (file)
@@ -12,15 +12,15 @@ Inference Engine plugin dynamic library consists of several main components:
 1. [Plugin class](@ref plugin):
        - Provides information about devices of a specific type.
        - Can create an [executable network](@ref executable_network) instance which represents a Neural 
-       Network hardware-specific graph structure for a particular device in opposite to the InferenceEngine::ICNNNetwork 
-       interface which is hardware-independent.
+       Network backend specific graph structure for a particular device in opposite to the InferenceEngine::ICNNNetwork 
+       interface which is backend-independent.
        - Can import an already compiled graph structure from an input stream to an 
        [executable network](@ref executable_network) object.
 2. [Executable Network class](@ref executable_network):
        - Is an execution configuration compiled for a particular device and takes into account its capabilities.
        - Holds a reference to a particular device and a task executor for this device.
        - Can create several instances of [Inference Request](@ref infer_request).
-       - Can export an internal hardware-specific graph structure to an output stream.
+       - Can export an internal backend specific graph structure to an output stream.
 3. [Inference Request class](@ref infer_request):
     - Runs an inference pipeline serially.
     - Can extract performance counters for an inference pipeline execution profiling.
@@ -30,7 +30,7 @@ Inference Engine plugin dynamic library consists of several main components:
 
 > **NOTE**: This documentation is written based on the `Template` plugin, which demonstrates plugin 
 development details. Find the complete code of the `Template`, which is fully compilable and up-to-date,
-at `<dldt source dir>/docs_developer/template_plugin`.
+at `<dldt source dir>/docs/template_plugin`.
 
 Detailed guides
 -----------------------
index 93f80f189d0e39e14367b522330b806269e690e0..3c51085916adc48cf2dc1e05110f29355ab0cb99 100644 (file)
@@ -1,5 +1,15 @@
 # Plugin {#plugin}
 
+Inference Engine Plugin usually represents a wrapper around a backend. Backends can be:
+- OpenCL-like backend (e.g. clDNN library) for GPU devices.
+- MKLDNN backend for Intel CPU devices.
+- NVIDIA cuDNN for NVIDIA GPUs.
+
+The responsibility of Inference Engine Plugin:
+- Initializes a backend and throw exception in `Engine` constructor if backend cannot be initialized.
+- Provides information about devices enabled by a particular backend, e.g. how many devices, their properties and so on.
+- Loads or imports [executable network](@ref executable_network) objects.
+
 In addition to the Inference Engine Public API, the Inference Engine provides the Plugin API, which is a set of functions and helper classes that simplify new plugin development:
 
 - header files in the `inference_engine/src/plugin_api` directory
@@ -18,8 +28,10 @@ Based on that, declaration of a plugin class can look as follows:
 
 #### Class Fields
 
-The provided plugin class also has a single field:
+The provided plugin class also has several fields:
 
+* `_backend` - a backend engine that is used to perform actual computations for network inference. For `Template` plugin `ngraph::runtime::Backend` is used which performs computations using ngraph reference implementations.
+* `_waitExecutor` - a task executor that waits for a response from a device about device tasks completion.
 * `_cfg` of type `Configuration`:
 
 @snippet src/template_config.hpp configuration:header
@@ -28,6 +40,7 @@ As an example, a plugin configuration has three value parameters:
 
 - `deviceId` - particular device ID to work with. Applicable if a plugin supports more than one `Template` device. In this case, some plugin methods, like `SetConfig`, `QueryNetwork`, and `LoadNetwork`, must support the CONFIG_KEY(KEY_DEVICE_ID) parameter. 
 - `perfCounts` - boolean value to identify whether to collect performance counters during [Inference Request](@ref infer_request) execution.
+- `_streamsExecutorConfig` - configuration of `InferenceEngine::IStreamsExecutor` to handle settings of multi-threaded context.
 
 ### Engine Constructor
 
@@ -47,25 +60,45 @@ A plugin must define a device name enabled via the `_pluginName` field of a base
 of the public InferenceEngine::InferencePluginInternal::LoadNetwork method that calls plugin-specific `LoadExeNetworkImpl`, which is defined in a derived class.
 
 This is the most important function of the `Plugin` class and creates an instance of compiled `ExecutableNetwork`,
-which holds a hardware-dependent compiled graph in an internal representation:
+which holds a backend-dependent compiled graph in an internal representation:
 
 @snippet src/template_plugin.cpp plugin:load_exe_network_impl
 
 Before a creation of an `ExecutableNetwork` instance via a constructor, a plugin may check if a provided 
 InferenceEngine::ICNNNetwork object is supported by a device. In the example above, the plugin checks precision information.
 
+The very important part before creation of `ExecutableNetwork` instance is to call `TransformNetwork` method which applies ngraph transformation passes.
+
 Actual graph compilation is done in the `ExecutableNetwork` constructor. Refer to the [ExecutableNetwork Implementation Guide](@ref executable_network) for details.
 
 > **NOTE**: Actual configuration map used in `ExecutableNetwork` is constructed as a base plugin 
 > configuration set via `Plugin::SetConfig`, where some values are overwritten with `config` passed to `Plugin::LoadExeNetworkImpl`. 
 > Therefore, the config of  `Plugin::LoadExeNetworkImpl` has a higher priority.
 
+### `TransformNetwork()`
+
+The function accepts a const shared pointer to `ngraph::Function` object and performs the following steps:
+
+1. Deep copies a const object to a local object, which can later be modified.
+2. Applies common and plugin-specific transformations on a copied graph to make the graph more friendly to hardware operations. For details how to write custom plugin-specific transformation, please, refer to [Writing ngraph transformations](@ref new_ngraph_transformation) guide. See detailed topics about network representation:
+    * [Intermediate Representation and Operation Sets](../_docs_MO_DG_IR_and_opsets.html)
+    * [Quantized networks](@ref quantized_networks).
+
+@snippet src/template_plugin.cpp plugin:transform_network
+
+> **NOTE**: After all these transformations, a `ngraph::Function` object cointains operations which can be perfectly mapped to backend kernels. E.g. if backend has kernel computing `A + B` operations at once, the `TransformNetwork` function should contain a pass which fuses operations `A` and `B` into a single custom operation `A + B` which fits backend kernels set. 
+
 ### `QueryNetwork()`
 
 Use the method with the `HETERO` mode, which allows to distribute network execution between different 
 devices based on the `ngraph::Node::get_rt_info()` map, which can contain the `"affinity"` key.
 The `QueryNetwork` method analyzes operations of provided `network` and returns a list of supported
-operations via the InferenceEngine::QueryNetworkResult structure:
+operations via the InferenceEngine::QueryNetworkResult structure. The `QueryNetwork` firstly applies `TransformNetwork` passes to input `ngraph::Function` argument. After this, the transformed network in ideal case contains only operations are 1:1 mapped to kernels in computational backend. In this case, it's very easy to analyze which operations is supposed (`_backend` has a kernel for such operation or extensions for the operation is provided) and not supported (kernel is missed in `_backend`):
+
+1. Store original names of all operations in input `ngraph::Function`
+2. Apply `TransformNetwork` passes. Note, the names of operations in a transformed network can be different and we need to restore the mapping in the steps below.
+3. Construct `supported` and `unsupported` maps which contains names of original operations. Note, that since the inference is performed using ngraph reference backend, the decision whether the operation is supported or not depends on whether the latest OpenVINO opset contains such operation.
+4. `QueryNetworkResult.supportedLayersMap` contains only operations which are fully supported by `_backend`.
 
 @snippet src/template_plugin.cpp plugin:query_network
 
@@ -83,7 +116,7 @@ Sets new values for plugin configuration keys:
 @snippet src/template_plugin.cpp plugin:set_config
 
 In the snippet above, the `Configuration` class overrides previous configuration values with the new 
-ones. All these values are used during hardware-specific graph compilation and execution of inference requests.
+ones. All these values are used during backend specific graph compilation and execution of inference requests.
 
 > **NOTE**: The function must throw an exception if it receives an unsupported configuration key.
 
@@ -111,7 +144,7 @@ all devices of the same `Template` type with automatic logic of the `MULTI` devi
 in the `option` parameter as `{ CONFIG_KEY(KEY_DEVICE_ID), "deviceID" }`.
 - METRIC_KEY(SUPPORTED_METRICS) - list of metrics supported by a plugin
 - METRIC_KEY(SUPPORTED_CONFIG_KEYS) - list of configuration keys supported by a plugin that
-affects their behavior during a hardware-specific graph compilation or an inference requests execution
+affects their behavior during a backend specific graph compilation or an inference requests execution
 - METRIC_KEY(OPTIMIZATION_CAPABILITIES) - list of optimization capabilities of a device.
 For example, supported data types and special optimizations for them.
 - Any other device-specific metrics. In this case, place metrics declaration and possible values to 
@@ -128,9 +161,9 @@ The snippet below provides an example of the implementation for `GetMetric`:
 
 ### `ImportNetworkImpl()`
 
-The importing network mechanism allows to import a previously exported hardware-specific graph and wrap it 
+The importing network mechanism allows to import a previously exported backend specific graph and wrap it 
 using an [ExecutableNetwork](@ref executable_network) object. This functionality is useful if 
-hardware-specific graph compilation takes significant time and/or cannot be done on a target host 
+backend specific graph compilation takes significant time and/or cannot be done on a target host 
 device due to other reasons.
 
 **Implementation details:** The base plugin class InferenceEngine::InferencePluginInternal implements InferenceEngine::InferencePluginInternal::ImportNetwork 
@@ -141,7 +174,7 @@ implementation and define an output blob structure up to its needs. This
 can be useful if a plugin exports a blob in a special format for integration with other frameworks 
 where a common Inference Engine header from a base class implementation is not appropriate. 
 
-During export of hardware-specific graph using `ExecutableNetwork::Export`, a plugin may export any 
+During export of backend specific graph using `ExecutableNetwork::Export`, a plugin may export any 
 type of information it needs to import a compiled graph properly and check its correctness. 
 For example, the export information may include:
 
@@ -150,7 +183,7 @@ For example, the export information may include:
 throw an exception if the `model` stream contains wrong data. For example, if devices have different 
 capabilities and a graph compiled for a particular device cannot be used for another, such type of 
 information must be stored and checked during the import. 
-- Compiled hardware-specific graph itself
+- Compiled backend specific graph itself
 - Information about precisions and shapes set by the user
 
 @snippet src/template_plugin.cpp plugin:import_network_impl
index a4c11ed9984acd56a78327cb94692755cea72eb1..53bddf823209ae362ecb56ca2e93314eb4da50e7 100644 (file)
@@ -1,40 +1,58 @@
 # Plugin Testing {#plugin_testing}
 
-Inference Engine (IE) tests infrastructure provides a predefined set of functional tests and utilities exported via the Inference
-Engine developer package. They are used to verify a plugin using the Inference Engine public API.
+Inference Engine (IE) tests infrastructure provides a predefined set of functional tests and utilities. They are used to verify a plugin using the Inference Engine public API.
 All the tests are written in the [Google Test C++ framework](https://github.com/google/googletest).
 
-To build test binaries together with other build artifacts, use the `make all` command. For details, see
-[Build Plugin Using CMake*](@ref plugin_build).
+Inference Engine Plugin tests are included in the `IE::funcSharedTests` CMake target which is built within the OpenVINO repository
+(see [Build Plugin Using CMake](@ref plugin_build) guide). This library contains tests definitions (the tests bodies) which can be parametrized and instantiated in plugins depending on whether a plugin supports a particular feature, specific sets of parameters for test on supported operation set and so on.
+
+Test definitions are splitted into tests class declaration (see `inference_engine/tests/functional/plugin/shared/include`) and tests class implementation (see `inference_engine/tests/functional/plugin/shared/src`) and include the following scopes of plugin conformance tests:
+
+1. **Behavior tests** (`behavior` sub-folder), which are a separate test group to check that a plugin satisfies basic Inference
+Engine concepts: plugin creation, multiple executable networks support, multiple synchronous and asynchronous inference requests support, and so on. See the next section with details how to instantiate the tests definition class with plugin-specific parameters.
+
+2. **Single layer tests** (`single_layer_tests` sub-folder). This groups of tests checks that a particular single layer can be inferenced on a device. An example of test instantiation based on test definition from `IE::funcSharedTests` library:
+
+    - From the declaration of convolution test class we can see that it's a parametrized GoogleTest based class with the `convLayerTestParamsSet` tuple of parameters:
+
+    @snippet single_layer_tests/convolution.hpp test_convolution:definition
+
+    - Based on that, define a set of parameters for `Template` plugin functional test instantiation:
 
-Inference Engine Plugin tests are included in the `funcSharedTests` CMake target which is built within the  Deep Learning Deployment Toolkit (DLDT) repository
-(see [Build Plugin Using CMake](@ref plugin_build) guide).
+    @snippet single_layer_tests/convolution.cpp test_convolution:declare_parameters
 
-Test definitions:
+    - Instantiate the test itself using standard GoogleTest macro `INSTANTIATE_TEST_CASE_P`:
 
-1. **Conformance tests**, which are a separate test group to check that a plugin satisfies basic Inference
-Engine concepts: plugin creation, multiple executable networks support, multiple synchronous and asynchronous inference requests support, and so on.
-2. **Other API tests**, which contain the following types of tests:
-    - Per-layer tests. Located in the `single_layer_tests`and `subgraph_tests` folders.
-    - Tests for integration with the `InferenceEngine::Core` class. Located in the the `ie_class` folder.
-    - Tests to check that IE common preprocessing works with your plugin. The `io_blob_tests` folder.
+    @snippet single_layer_tests/convolution.cpp test_convolution:instantiate
+
+3. **Sub-graph tests** (`subgraph_tests` sub-folder). This group of tests is designed to tests small patterns or combination of layers. E.g. when a particular topology is being enabled in a plugin e.g. TF ResNet-50, there is no need to add the whole topology to test tests. In opposite way, a particular repetative subgraph or pattern can be extracted from `ResNet-50` and added to the tests. The instantiation of the sub-graph tests is done in the same way as for single layer tests.  
+> **Note**, such sub-graphs or patterns for sub-graph tests should be added to `IE::ngraphFunctions` library first (this library is a pre-defined set of small `ngraph::Function`) and re-used in sub-graph tests after.
+
+4. **HETERO tests** (`subgraph_tests` sub-folder) contains tests for `HETERO` scenario (manual or automatic affinities settings, tests for `QueryNetwork`).
+
+5. **Other tests**, which contain tests for other scenarios and has the following types of tests:
+    - Tests for execution graph
+    - Etc.
+
+To use these tests for your own plugin development, link the `IE::funcSharedTests` library to your test binary and instantiate required test cases with desired parameters values.
+
+> **NOTE**: A plugin may contain its own tests for use cases that are specific to hardware or need to be extensively tested.
+
+To build test binaries together with other build artifacts, use the `make all` command. For details, see
+[Build Plugin Using CMake*](@ref plugin_build).
 
-To use these tests for your own plugin development, link the `funcSharedTests` library to your test binary and
-instantiate required test cases with desired parameters values.
+### Tests for plugin-specific ngraph transformations
 
-> **NOTE**: A plugin may contain its own tests for use cases that are specific to hardware or need to be extensively
-> tested. Depending on your device positioning, you can implement more specific tests for your device. Such tests can
-> be defined both for conformance and other API tests groups within your own test binary.
+Please, refer to [Transformation testing](@ref new_ngraph_transformation) guide.
 
-How to Extend Inference Engine Plugin Tests
-========================
+### How to Extend Inference Engine Plugin Tests
 
 Inference Engine Plugin tests are open for contribution.
-Add common test case definitions applicable for all plugins to the `funcSharedTests` target within the DLDT repository. Then, any other plugin supporting corresponding functionality can instantiate the new test.
+Add common test case definitions applicable for all plugins to the `IE::funcSharedTests` target within the DLDT repository. Then, any other plugin supporting corresponding functionality can instantiate the new test.
 
 All Inference Engine per-layer tests check test layers functionality. They are developed using nGraph functions
 as input graphs used by tests. In this case, to test a new layer with layer tests, extend
-the `ngraphFunctions` CMake target, which is also included in the Inference Engine Developer package, with a new nGraph function
+the `IE::ngraphFunctions` library, which is also included in the Inference Engine Developer package, with a new nGraph function
 including the corresponding operation.
 
-> **NOTE**: When implementing a new subgraph test, add new single-layer tests for each operation of the subgraph.
\ No newline at end of file
+> **NOTE**: When implementing a new subgraph test, add new single-layer tests for each operation of the subgraph if such test does not exist.
\ No newline at end of file
index df6c02e4de0017d2b5323feb942c17064bf211b0..182c6d1a54bef0526cdbb93440baf9f3feb1bf8a 100644 (file)
@@ -25,7 +25,6 @@ if(ENABLE_TESTS)
        if(ENABLE_FUNCTIONAL_TESTS)
            add_subdirectory(tests/functional)
        endif()
-
 endif()
 # [cmake:main]
 
index 49d8cf0ec2ed30d18f9cccc8e379daebdd8081ed..d6128390d24ffe7956041909279bf4f20a5abe1c 100644 (file)
@@ -1,6 +1,7 @@
 # template-plugin
 
 Template Plugin for Inference Engine which demonstrates basics of how Inference Engine plugin can be built and implemented on top of Inference Engine Developer Package and Plugin API.
+As a backend for actual computations ngraph reference implementations is used, so the Template plugin is fully functional.
 
 ## How to build
 
@@ -8,7 +9,7 @@ Template Plugin for Inference Engine which demonstrates basics of how Inference
 $ cd $DLDT_HOME
 $ mkdir $DLDT_HOME/build
 $ cd $DLDT_HOME/build
-$ cmake -DENABLE_TESTS=ON -DENABLE_BEH_TESTS=ON -DENABLE_FUNCTIONAL_TESTS=ON ..
+$ cmake -DENABLE_TESTS=ON -DENABLE_FUNCTIONAL_TESTS=ON ..
 $ make -j8
 $ cd $TEMPLATE_PLUGIN_HOME
 $ mkdir $TEMPLATE_PLUGIN_HOME/build
index f5921e20d9795a12aeadfa30b379f933284afcbd..86b0e9eb8771f9931228acc13e2e26fb4327e1cb 100644 (file)
@@ -25,6 +25,7 @@ target_include_directories(${TARGET_NAME} PRIVATE
 target_include_directories(${TARGET_NAME} PRIVATE
     "${IE_MAIN_TEMPLATE_PLUGIN_SOURCE_DIR}/include")
 
+# link common Inference Engine libraries
 target_link_libraries(${TARGET_NAME} PRIVATE
     IE::inference_engine
     IE::inference_engine_transformations
index f1024185c444c0729e2d0524310817d6de86a676..fed650962c79624a18806b0a6949a0f6f2a04c19 100644 (file)
@@ -2,12 +2,7 @@
 // SPDX-License-Identifier: Apache-2.0
 //
 
-#include <utility>
-
-#include <ie_profiling.hpp>
-
 #include "template_async_infer_request.hpp"
-#include "template_executable_network.hpp"
 
 using namespace TemplatePlugin;
 
@@ -19,11 +14,13 @@ TemplateAsyncInferRequest::TemplateAsyncInferRequest(
     const InferenceEngine::ITaskExecutor::Ptr& callbackExecutor) :
     AsyncInferRequestThreadSafeDefault(inferRequest, cpuTaskExecutor, callbackExecutor),
     _inferRequest(inferRequest), _waitExecutor(waitExecutor) {
-    constexpr const auto remoteDevice = false;
-    // By default single stage pipeline is created.
+    // In current implementation we have CPU only tasks and no needs in 2 executors
+    // So, by default single stage pipeline is created.
     // This stage executes InferRequest::Infer() using cpuTaskExecutor.
     // But if remote asynchronous device is used the pipeline can by splitted tasks that are executed by cpuTaskExecutor
     // and waiting tasks. Waiting tasks can lock execution thread so they use separate threads from other executor.
+    constexpr const auto remoteDevice = false;
+
     if (remoteDevice) {
         _pipeline = {
             {cpuTaskExecutor, [this] {
index b01f7a6cabd6383c004b21c96c0d8d6fa57f2202..7297c5effbdd3aa6016e983455314b15b452493c 100644 (file)
@@ -2,15 +2,8 @@
 // SPDX-License-Identifier: Apache-2.0
 //
 
-
-#include <string>
-#include <vector>
-#include <algorithm>
-
-#include <ie_util_internal.hpp>
 #include <ie_plugin_config.hpp>
 #include <cpp_interfaces/interface/ie_internal_plugin_config.hpp>
-#include <file_utils.h>
 #include <cpp_interfaces/exception2status.hpp>
 
 #include "template_config.hpp"
index 453c85a3e6d10df4483ec3b4683b05d8ba1b82c0..b57d40a63b3b708bf4cc82830daa1fc768f92ee6 100644 (file)
@@ -4,10 +4,8 @@
 
 #pragma once
 
-#include <vector>
 #include <string>
 #include <map>
-#include <unordered_map>
 
 #include <ie_parameter.hpp>
 
@@ -15,9 +13,6 @@
 
 namespace TemplatePlugin {
 
-template<typename T>
-using IOMap = std::unordered_map<std::string, T>;
-
 // ! [configuration:header]
 using ConfigMap = std::map<std::string, std::string>;
 
index b0d0e66e80ef7dbe4eeae7421b047301cb4bdd7a..98b6e7dd9f752e941a02e81d9a76c84d0ddf6fe0 100644 (file)
@@ -2,19 +2,9 @@
 // SPDX-License-Identifier: Apache-2.0
 //
 
-#include <atomic>
-#include <set>
-#include <utility>
-#include <algorithm>
-#include <memory>
-#include <string>
-#include <vector>
-
 #include <ie_metric_helpers.hpp>
-#include <ie_util_internal.hpp>
 #include <ie_plugin_config.hpp>
 #include <threading/ie_executor_manager.hpp>
-#include <details/ie_cnn_network_tools.h>
 
 #include "template/template_config.hpp"
 #include "template_plugin.hpp"
 using namespace TemplatePlugin;
 
 // ! [executable_network:ctor_cnnnetwork]
-TemplatePlugin::ExecutableNetwork::ExecutableNetwork(const std::shared_ptr<ngraph::Function>&   function,
-                                                     const Configuration&                       cfg,
-                                                     const Plugin::Ptr&                         plugin) :
+TemplatePlugin::ExecutableNetwork::ExecutableNetwork(const std::shared_ptr<const ngraph::Function>& function,
+                                                     const Configuration&                           cfg,
+                                                     const Plugin::Ptr&                             plugin) :
     InferenceEngine::ExecutableNetworkThreadSafeDefault(nullptr, nullptr), // Disable default threads creation
     _cfg(cfg),
-    _plugin(plugin),
-    _function(function) {
+    _plugin(plugin) {
     // TODO: if your plugin supports device ID (more that single instance of device can be on host machine)
     // you should select proper device based on KEY_DEVICE_ID or automatic behavior
     // In this case, _waitExecutor should also be created per device.
     try {
-        CompileGraph();
-        InitExecutor();
+        CompileNetwork(function);
+        InitExecutor(); // creates thread-based executor using for async requests
     } catch (const InferenceEngineException&) {
         throw;
     } catch (const std::exception & e) {
@@ -53,12 +42,17 @@ TemplatePlugin::ExecutableNetwork::ExecutableNetwork(std::istream &
     _cfg(cfg),
     _plugin(plugin) {
     // TODO: since Import network is not a mandatory functionality, this ctor can just be removed
+    THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str;
 }
 // ! [executable_network:ctor_import_stream]
 
-// ! [executable_network:compile_graph]
-void TemplatePlugin::ExecutableNetwork::CompileGraph() {
-    // TODO: perform actual graph compilation taking `_cfg` into account
+// ! [executable_network:map_graph]
+// forward declaration
+std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const ngraph::Function>& function);
+
+void TemplatePlugin::ExecutableNetwork::CompileNetwork(const std::shared_ptr<const ngraph::Function>& function) {
+    // TODO: perform actual graph compilation / mapping to backend graph representation / kernels
+    _function = TransformNetwork(function);
 
     // Generate backend specific blob mappings. For example Inference Engine uses not ngraph::Result nodes friendly name
     // as inference request output names but the name of the layer before.
@@ -74,13 +68,14 @@ void TemplatePlugin::ExecutableNetwork::CompileGraph() {
         _inputIndex.emplace(parameter->get_friendly_name(), _function->get_parameter_index(parameter));
     }
 
-    // Perform any other steps like allocation and filling device buffers, and so on
+    // Perform any other steps like allocation and filling backend specific memory handles and so on
 }
-// ! [executable_network:compile_graph]
+// ! [executable_network:map_graph]
+
 
 // ! [executable_network:init_executor]
 void TemplatePlugin::ExecutableNetwork::InitExecutor() {
-    // Default mutlitthreaded configuration is balanced for throughtput and latency cases and takes into account
+    // Default multi-threaded configuration is balanced for throughtput and latency cases and takes into account
     // real hardware cores and NUMA nodes.
     auto streamsExecutorConfig = InferenceEngine::IStreamsExecutor::Config::MakeDefaultMultiThreaded(_cfg._streamsExecutorConfig);
     streamsExecutorConfig._name = "TemplateStreamsExecutor";
@@ -151,7 +146,8 @@ void TemplatePlugin::ExecutableNetwork::GetMetric(const std::string &name, Infer
 // ! [executable_network:get_metric]
 
 // ! [executable_network:export_impl]
-void TemplatePlugin::ExecutableNetwork::ExportImpl(std::ostream& dlaModel) {
+void TemplatePlugin::ExecutableNetwork::ExportImpl(std::ostream& modelStream) {
     // TODO: Code which exports graph from std::ostream
+    THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str;
 }
 // ! [executable_network:export_impl]
index b2755b39c69b343508cb64c4df5717ec8cc07201..01bcc3270aff8f775757eb9f2aa7b006451bae15 100644 (file)
@@ -5,26 +5,14 @@
 
 #pragma once
 
-#include <utility>
-#include <tuple>
-#include <memory>
-#include <string>
-#include <vector>
-#include <map>
-#include <unordered_map>
-#include <list>
-
-#include <ie_common.h>
-#include <cpp_interfaces/impl/ie_executable_network_thread_safe_default.hpp>
-#include <cnn_network_impl.hpp>
-#include <threading/ie_itask_executor.hpp>
-
-#include <ngraph/ngraph.hpp>
+#include <ngraph/function.hpp>
 
 #include "template_config.hpp"
 #include "template_infer_request.hpp"
 #include "template_async_infer_request.hpp"
 
+#include <cpp_interfaces/impl/ie_executable_network_thread_safe_default.hpp>
+
 namespace TemplatePlugin {
 
 class Plugin;
@@ -36,9 +24,9 @@ class Plugin;
 // ! [executable_network:header]
 class ExecutableNetwork : public InferenceEngine::ExecutableNetworkThreadSafeDefault {
 public:
-    ExecutableNetwork(const std::shared_ptr<ngraph::Function>&  function,
-                      const Configuration&                      cfg,
-                      const std::shared_ptr<Plugin>&            plugin);
+    ExecutableNetwork(const std::shared_ptr<const ngraph::Function>& function,
+                      const Configuration&                           cfg,
+                      const std::shared_ptr<Plugin>&                 plugin);
 
     ExecutableNetwork(std::istream&                  model,
                       const Configuration&           cfg,
@@ -58,7 +46,7 @@ public:
 private:
     friend class TemplateInferRequest;
 
-    void CompileGraph();
+    void CompileNetwork(const std::shared_ptr<const ngraph::Function>& function);
     void InitExecutor();
 
     std::atomic<std::size_t>                    _requestId = {0};
index 671c76d3a560b3044b4d5519e0a9d1fc4c912cbb..aa2299d779c8b2046dc817f1a4c5b26d62255e87 100644 (file)
@@ -4,8 +4,6 @@
 
 #include "template_function_transformation.hpp"
 
-#include <ngraph/ngraph.hpp>
-
 using namespace ngraph;
 
 // ! [function_pass:template_transformation_cpp]
index 398aa3f057a77d65b63b8a83f1d6bf2656af6ccc..72938bec81ca13bae04d6a0823ae4198b09372eb 100644 (file)
@@ -4,10 +4,7 @@
 
 #pragma once
 
-#include <vector>
-#include <memory>
-
-#include <ngraph/ngraph.hpp>
+#include <ngraph/pass/pass.hpp>
 
 namespace ngraph {
 namespace pass {
index e33e1c92927a2ccc7e4425c440de2f80fd2012de..3d15842f7278ce03743aee7eaea2f7f35c0f5da3 100644 (file)
@@ -125,7 +125,7 @@ void TemplateInferRequest::InferImpl() {
     // TODO: fill with actual list of pipeline stages, which are executed synchronously for sync infer requests
     inferPreprocess();
     startPipeline();
-    waitPipeline();
+    waitPipeline();  // does nothing in current implementation
     inferPostprocess();
 }
 // ! [infer_request:infer_impl]
@@ -208,49 +208,53 @@ void TemplateInferRequest::inferPreprocess() {
 }
 // ! [infer_request:infer_preprocess]
 
+// ! [infer_request:start_pipeline]
 void TemplateInferRequest::startPipeline() {
     IE_PROFILING_AUTO_SCOPE_TASK(_profilingTask[StartPipeline])
     auto start = Time::now();
     _executable->call(_outputTensors, _inputTensors);
     _durations[StartPipeline] = Time::now() - start;
 }
+// ! [infer_request:start_pipeline]
 
 void TemplateInferRequest::waitPipeline() {
     IE_PROFILING_AUTO_SCOPE_TASK(_profilingTask[WaitPipeline])
     auto start = Time::now();
     // TODO: Wait pipeline using driver API or other synchronizations methods
+    // NOTE: not used in current implementation since `startPipeline` executes pipiline synchronously
     _durations[WaitPipeline] = Time::now() - start;
 }
 
+// ! [infer_request:infer_postprocess]
 void TemplateInferRequest::inferPostprocess() {
     IE_PROFILING_AUTO_SCOPE_TASK(_profilingTask[Postprocess]);
     auto start = Time::now();
     for (auto&& output : _outputs) {
         auto outputBlob = output.second;
         auto networkOutput = _networkOutputBlobs[output.first];
+        // perform precision conversion of network output's precision and computational
+        // graph output's precision are different
         if (outputBlob->getTensorDesc().getPrecision() != networkOutput->getTensorDesc().getPrecision()) {
             blobCopy(networkOutput, outputBlob);
         }
     }
     _durations[Postprocess] = Time::now() - start;
 }
+// ! [infer_request:infer_postprocess]
 
 // ! [infer_request:get_performance_counts]
 void TemplateInferRequest::GetPerformanceCounts(std::map<std::string, InferenceEngineProfileInfo> &perfMap) const {
     InferenceEngineProfileInfo info;
     info.execution_index = 0;
     info.status = InferenceEngineProfileInfo::EXECUTED;
+
     info.cpu_uSec = info.realTime_uSec = _durations[Preprocess].count();
     perfMap["1. input preprocessing"] = info;
-    info.cpu_uSec = 0;
-    info.realTime_uSec = 0;
+    info.cpu_uSec = info.realTime_uSec = 0;
     perfMap["2. input transfer to a device"] = info;
-    info.cpu_uSec = 0;
-    info.status = InferenceEngineProfileInfo::EXECUTED;
     info.cpu_uSec = info.realTime_uSec = _durations[StartPipeline].count();
     perfMap["3. execution time"] = info;
-    info.cpu_uSec = 0;
-    info.realTime_uSec = 0;
+    info.cpu_uSec = info.realTime_uSec = 0;
     perfMap["4. output transfer from a device"] = info;
     info.cpu_uSec = info.realTime_uSec = _durations[Postprocess].count();
     perfMap["5. output postprocessing"] = info;
index ce42b250617a839ca466f6707c46dc8df37536cb..070965054b74a034e0652232f81f439a8885d988 100644 (file)
@@ -17,7 +17,6 @@
 #include <cpp_interfaces/impl/ie_executable_network_internal.hpp>
 #include <threading/ie_itask_executor.hpp>
 
-#include <ngraph/runtime/tensor.hpp>
 #include <ngraph/runtime/tensor.hpp>
 #include <executable.hpp>
 
@@ -47,8 +46,6 @@ public:
     void waitPipeline();
     void inferPostprocess();
 
-    std::shared_ptr<ExecutableNetwork>                      _executableNetwork;
-
 private:
     void allocateDeviceBuffers();
     void allocateBlobs();
@@ -61,6 +58,7 @@ private:
         numOfStages
     };
 
+    std::shared_ptr<ExecutableNetwork>                                  _executableNetwork;
     std::array<InferenceEngine::ProfilingTask, numOfStages>             _profilingTask;
     // for performance counters
     std::array<std::chrono::duration<float, std::micro>, numOfStages>   _durations;
index 1e2060fbc7b9454b7133d5a022e3c200df30b16b..e8ca30c9cf5cadafc35380e7448d05caf944922e 100644 (file)
@@ -5,9 +5,10 @@
 #include "template_pattern_transformation.hpp"
 #include "template_function_transformation.hpp"
 
-#include <ngraph/ngraph.hpp>
 #include <ngraph/opsets/opset3.hpp>
+#include <ngraph/pass/manager.hpp>
 #include <ngraph/pattern/op/wrap_type.hpp>
+#include <ngraph/rt_info.hpp>
 
 using namespace ngraph;
 
index 0220a6d6b9b24a68faf0ea6f86afea454e3b057d..c9346ef59e49255d1fdbf0622e52f07e9e96308a 100644 (file)
@@ -4,10 +4,7 @@
 
 #pragma once
 
-#include <vector>
-#include <memory>
-
-#include <ngraph/ngraph.hpp>
+#include <ngraph/pass/graph_rewrite.hpp>
 
 namespace ngraph {
 namespace pass {
index 55113183166d2b7d52dd3472bf3a7178e7be9c05..4343c20f4a13f9ebec6126110ce67ebdd5497679 100644 (file)
@@ -2,34 +2,20 @@
 // SPDX-License-Identifier: Apache-2.0
 //
 
-
-#include <utility>
-#include <memory>
-#include <vector>
-#include <sstream>
-#include <regex>
-#include <string>
-#include <map>
-
 #include <ie_metric_helpers.hpp>
-#include <details/ie_cnn_network_tools.h>
 #include <ie_plugin_config.hpp>
-#include <ie_util_internal.hpp>
-#include <inference_engine.hpp>
-#include <file_utils.h>
+
+#include <hetero/hetero_plugin_config.hpp>
 #include <cpp_interfaces/base/ie_plugin_base.hpp>
-#include <cpp_interfaces/interface/ie_internal_plugin_config.hpp>
 #include <threading/ie_executor_manager.hpp>
-#include <graph_tools.hpp>
-#include <ie_input_info.hpp>
-#include <ie_layouts.h>
-#include <hetero/hetero_plugin_config.hpp>
-#include <backend.hpp>
+
+#include <ngraph/op/util/op_types.hpp>
 #include <ngraph/specialize_function.hpp>
 #include <ngraph/pass/manager.hpp>
 #include <ngraph/opsets/opset.hpp>
 #include <transformations/common_optimizations/common_optimizations.hpp>
 #include <transformations/rt_info/fused_names_attribute.hpp>
+
 #include "template/template_config.hpp"
 #include "template_plugin.hpp"
 #include "template_executable_network.hpp"
@@ -40,10 +26,14 @@ using namespace TemplatePlugin;
 
 // ! [plugin:ctor]
 Plugin::Plugin() {
-    // TODO: fill with actual device name
+    // TODO: fill with actual device name, backend engine
     _pluginName = "TEMPLATE";
+
+    // create ngraph backend which performs inference using ngraph reference implementations
     ngraph::runtime::Backend::set_backend_shared_library_search_directory("");
     _backend = ngraph::runtime::Backend::create("INTERPRETER");
+
+    // create default stream executor with a given name
     _waitExecutor = ExecutorManager::getInstance()->getIdleCPUStreamsExecutor({"TemplateWaitExecutor"});
 }
 // ! [plugin:ctor]
@@ -58,9 +48,10 @@ Plugin::~Plugin() {
 }
 // ! [plugin:dtor]
 
-// ! [plugin:transform]
-std::shared_ptr<ngraph::Function> Plugin::Transform(const std::shared_ptr<const ngraph::Function>& function) {
-    // 1.Copy ngraph::Function first to apply some transformations which modify original ngraph::Function
+// ! [plugin:transform_network]
+
+std::shared_ptr<ngraph::Function> TransformNetwork(const std::shared_ptr<const ngraph::Function>& function) {
+    // 1. Copy ngraph::Function first to apply some transformations which modify original ngraph::Function
     const bool shareConsts = false, constFolding = false;
     std::vector<::ngraph::element::Type> new_types;
     std::vector<::ngraph::PartialShape> new_shapes;
@@ -70,10 +61,11 @@ std::shared_ptr<ngraph::Function> Plugin::Transform(const std::shared_ptr<const
         new_types.emplace_back(parameter->get_element_type());
     }
 
-    auto copyFunction = ngraph::specialize_function(std::const_pointer_cast<ngraph::Function>(function),
+    auto clonedNetwork = ngraph::specialize_function(std::const_pointer_cast<ngraph::Function>(function),
         new_types, new_shapes, std::vector<void *>(new_types.size(), nullptr), constFolding, shareConsts);
 
-    copyFunction->set_friendly_name(function->get_friendly_name());
+    auto transformedNetwork = clonedNetwork;
+    transformedNetwork->set_friendly_name(function->get_friendly_name());
 
     // 2. Perform common optimizations and device-specific transformations
     ngraph::pass::Manager passManager;
@@ -86,16 +78,12 @@ std::shared_ptr<ngraph::Function> Plugin::Transform(const std::shared_ptr<const
     // ..
 
     // After `run_passes`, we have the transformed function, where operations match device operations,
-    // and we can create device hardware-dependent graph
-    passManager.run_passes(copyFunction);
+    // and we can create device backend-dependent graph
+    passManager.run_passes(transformedNetwork);
 
-    // 3. Iterate over operations and create hardware-specific ngraph
-    for (const auto& op : copyFunction->get_ordered_ops()) {
-        // TODO: map ngraph `op` to device operation
-    }
-    return copyFunction;
+    return transformedNetwork;
 }
-// ! [plugin:transform]
+// ! [plugin:transform_network]
 
 // ! [plugin:load_exe_network_impl]
 InferenceEngine::ExecutableNetworkInternal::Ptr Plugin::LoadExeNetworkImpl(const InferenceEngine::ICNNNetwork & network,
@@ -135,7 +123,7 @@ InferenceEngine::ExecutableNetworkInternal::Ptr Plugin::LoadExeNetworkImpl(const
         THROW_IE_EXCEPTION << "TEMPLATE plugin can compile only IR v10 networks";
     }
 
-    return std::make_shared<ExecutableNetwork>(Transform(function), cfg, std::static_pointer_cast<Plugin>(shared_from_this()));
+    return std::make_shared<ExecutableNetwork>(function, cfg, std::static_pointer_cast<Plugin>(shared_from_this()));
 }
 // ! [plugin:load_exe_network_impl]
 
@@ -166,14 +154,17 @@ void Plugin::QueryNetwork(const ICNNNetwork &network, const ConfigMap& config, Q
     if (function == nullptr) {
          THROW_IE_EXCEPTION << "Template Plugin supports only ngraph cnn network representation";
     }
-    // First of all we should store initial input operation set
+
+    // 1. First of all we should store initial input operation set
     std::unordered_set<std::string> originalOps;
     for (auto&& node : function->get_ops()) {
         originalOps.emplace(node->get_friendly_name());
     }
-    // It is needed to apply all transformations as it is done in LoadExeNetworkImpl
-    auto transformedFunction = Transform(function);
-    // The same input node can be transformed into supported and unsupported backend node
+
+    // 2. It is needed to apply all transformations as it is done in LoadExeNetworkImpl
+    auto transformedFunction = TransformNetwork(function);
+
+    // 3. The same input node can be transformed into supported and unsupported backend node
     // So we need store as supported ether unsupported node sets
     std::unordered_set<std::string> supported;
     std::unordered_set<std::string> unsupported;
@@ -183,6 +174,7 @@ void Plugin::QueryNetwork(const ICNNNetwork &network, const ConfigMap& config, Q
             // Extract transformation history from transformed node as list of nodes
             for (auto&& fusedLayerName : ngraph::getFusedNamesVector(node)) {
                 // Filter just nodes from original operation set
+                // TODO: fill with actual decision rules based on whether kernel is supported by backend
                 if (contains(originalOps, fusedLayerName)) {
                     if (opset.contains_type_insensitive(fusedLayerName)) {
                         supported.emplace(fusedLayerName);
@@ -193,7 +185,8 @@ void Plugin::QueryNetwork(const ICNNNetwork &network, const ConfigMap& config, Q
             }
         }
     }
-    // The result set should contains just nodes from supported set
+
+    // 4. The result set should contains just nodes from supported set
     for (auto&& layerName : supported) {
         if (!contains(unsupported, layerName)) {
             res.supportedLayersMap.emplace(layerName, GetName());
@@ -205,6 +198,7 @@ void Plugin::QueryNetwork(const ICNNNetwork &network, const ConfigMap& config, Q
 // ! [plugin:add_extension]
 void Plugin::AddExtension(InferenceEngine::IExtensionPtr /*extension*/) {
     // TODO: add extensions if plugin supports extensions
+    THROW_IE_EXCEPTION << NOT_IMPLEMENTED_str;
 }
 // ! [plugin:add_extension]
 
index 6e611e58ed3436516edb6cf30b8a56ee81e36ee9..22519dfe55ba36a27a4f4b040fd133b536b6af83 100644 (file)
@@ -4,18 +4,11 @@
 
 #pragma once
 
-#include <inference_engine.hpp>
-#include <description_buffer.hpp>
+#include "template_config.hpp"
+#include "template_executable_network.hpp"
 #include <cpp_interfaces/impl/ie_plugin_internal.hpp>
 
-#include <memory>
-#include <string>
-#include <map>
-#include <unordered_map>
-#include <vector>
-
-#include "template_executable_network.hpp"
-#include "template_config.hpp"
+#include "backend.hpp"
 
 #include "backend.hpp"
 
@@ -45,10 +38,8 @@ private:
     friend class ExecutableNetwork;
     friend class TemplateInferRequest;
 
-    static std::shared_ptr<ngraph::Function> Transform(const std::shared_ptr<const ngraph::Function>& function);
-
-    Configuration                               _cfg;
     std::shared_ptr<ngraph::runtime::Backend>   _backend;
+    Configuration                               _cfg;
     InferenceEngine::ITaskExecutor::Ptr         _waitExecutor;
 };
 
index be10445bf5d043ee15ce991cbce7c0a31c12a82f..627e112c4e223e48e0c74269df501a0bff010009 100644 (file)
@@ -3,7 +3,8 @@
 # SPDX-License-Identifier: Apache-2.0
 #
 
-set(TARGET_NAME TemplateFuncTests)
+# [cmake:functional_tests]
+set(TARGET_NAME templateFuncTests)
 
 addIeTargetTest(
         NAME ${TARGET_NAME}
@@ -12,9 +13,10 @@ addIeTargetTest(
             templatePlugin
         LINK_LIBRARIES
             IE::funcSharedTests
+        INCLUDES
+            "${IE_MAIN_TEMPLATE_PLUGIN_SOURCE_DIR}/include"
         ADD_CPPLINT
         LABELS
             TEMPLATE
 )
-
-target_include_directories(${TARGET_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/../../include)
+# [cmake:functional_tests]
index 6b9d7ebf1e9e6d9f7f2f93d71b61354579a8bd97..981e0527a9b7a9eccdb72020003ddd34749bd645 100644 (file)
@@ -8,49 +8,51 @@
 #include <template/template_config.hpp>
 
 using namespace BehaviorTestsDefinitions;
+
 namespace {
-    const std::vector<InferenceEngine::Precision> netPrecisions = {
-            InferenceEngine::Precision::FP32,
-            InferenceEngine::Precision::FP16
-    };
-
-    const std::vector<std::map<std::string, std::string>> configs = {
-            {{TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS), InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}},
-            {{TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS), InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_NUMA}},
-            {{TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS), "8"}},
-    };
-
-    const std::vector<std::map<std::string, std::string>> inconfigs = {
-            {{TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS), "OFF"}},
-    };
-
-    INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, IncorrectConfigTests,
-                            ::testing::Combine(
-                                    ::testing::ValuesIn(netPrecisions),
-                                    ::testing::Values("TEMPLATE"),
-                                    ::testing::ValuesIn(inconfigs)),
-                            IncorrectConfigTests::getTestCaseName);
-
-
-    INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, IncorrectConfigAPITests,
-                            ::testing::Combine(
-                                    ::testing::ValuesIn(netPrecisions),
-                                    ::testing::Values("TEMPLATE"),
-                                    ::testing::ValuesIn(inconfigs)),
-                            IncorrectConfigAPITests::getTestCaseName);
-
-
-    INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CorrectConfigAPITests,
-                            ::testing::Combine(
-                                    ::testing::ValuesIn(netPrecisions),
-                                    ::testing::Values("TEMPLATE"),
-                                    ::testing::ValuesIn(configs)),
-                            CorrectConfigAPITests::getTestCaseName);
-
-    INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, CorrectConfigTests,
-                            ::testing::Combine(
-                                    ::testing::ValuesIn(netPrecisions),
-                                    ::testing::Values("TEMPLATE"),
-                                    ::testing::ValuesIn(configs)),
-                            CorrectConfigAPITests::getTestCaseName);
+
+const std::vector<InferenceEngine::Precision> netPrecisions = {
+    InferenceEngine::Precision::FP32,
+    InferenceEngine::Precision::FP16
+};
+
+const std::vector<std::map<std::string, std::string>> configs = {
+    {{TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS), InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_AUTO}},
+    {{TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS), InferenceEngine::PluginConfigParams::CPU_THROUGHPUT_NUMA}},
+    {{TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS), "8"}},
+};
+
+const std::vector<std::map<std::string, std::string>> inconfigs = {
+    {{TEMPLATE_CONFIG_KEY(THROUGHPUT_STREAMS), CONFIG_VALUE(NO)}},
+};
+
+INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, IncorrectConfigTests,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(netPrecisions),
+                                ::testing::Values("TEMPLATE"),
+                                ::testing::ValuesIn(inconfigs)),
+                        IncorrectConfigTests::getTestCaseName);
+
+INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, IncorrectConfigAPITests,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(netPrecisions),
+                                ::testing::Values("TEMPLATE"),
+                                ::testing::ValuesIn(inconfigs)),
+                        IncorrectConfigAPITests::getTestCaseName);
+
+
+INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CorrectConfigAPITests,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(netPrecisions),
+                                ::testing::Values("TEMPLATE"),
+                                ::testing::ValuesIn(configs)),
+                        CorrectConfigAPITests::getTestCaseName);
+
+INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, CorrectConfigTests,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(netPrecisions),
+                                ::testing::Values("TEMPLATE"),
+                                ::testing::ValuesIn(configs)),
+                        CorrectConfigAPITests::getTestCaseName);
+
 } // namespace
\ No newline at end of file
index e575e4e9764d62c959625fb9e9b0143d25823206..354572bbaf295ce23e081f47794c92c631d120d3 100644 (file)
@@ -5,23 +5,25 @@
 #include "behavior/cpp_holders.hpp"
 
 using namespace BehaviorTestsDefinitions;
+
 namespace {
-    const std::vector<std::vector<int >> orders = {
-            // 0 - plugin
-            // 1 - executable_network
-            // 2 - infer_request
-            {0, 1, 2},
-            {0, 2, 1},
-            {1, 0, 2},
-            {1, 2, 0},
-            {2, 0, 1},
-            {2, 1, 0}
-    };
 
-    INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, HoldersTest,
-            ::testing::Combine(
-            ::testing::Values("TEMPLATE"),
-            ::testing::ValuesIn(orders)),
-            HoldersTest::getTestCaseName);
+const std::vector<std::vector<int >> orders = {
+    // 0 - plugin
+    // 1 - executable_network
+    // 2 - infer_request
+    {0, 1, 2},
+    {0, 2, 1},
+    {1, 0, 2},
+    {1, 2, 0},
+    {2, 0, 1},
+    {2, 1, 0}
+};
+
+INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, HoldersTest,
+        ::testing::Combine(
+        ::testing::Values("TEMPLATE"),
+        ::testing::ValuesIn(orders)),
+        HoldersTest::getTestCaseName);
 
 }  // namespace
\ No newline at end of file
index aaa279b476b3018037cb6abbd986e7e5aab56102..8951fdd037b11fcac8ee405e1a298bc9b4733bb0 100644 (file)
@@ -7,20 +7,23 @@
 #include "behavior/exec_graph_info.hpp"
 
 using namespace BehaviorTestsDefinitions;
+
 namespace {
-    const std::vector<InferenceEngine::Precision> netPrecisions = {
-            InferenceEngine::Precision::FP32,
-            InferenceEngine::Precision::FP16
-    };
-
-    const std::vector<std::map<std::string, std::string>> configs = {
-          {}
-    };
-
-    INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, ExecGraphTests,
-                            ::testing::Combine(
-                                    ::testing::ValuesIn(netPrecisions),
-                                    ::testing::Values("TEMPLATE"),
-                                    ::testing::ValuesIn(configs)),
-                            ExecGraphTests::getTestCaseName);
+
+const std::vector<InferenceEngine::Precision> netPrecisions = {
+    InferenceEngine::Precision::FP32,
+    InferenceEngine::Precision::FP16
+};
+
+const std::vector<std::map<std::string, std::string>> configs = {
+    {}
+};
+
+INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, ExecGraphTests,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(netPrecisions),
+                                ::testing::Values("TEMPLATE"),
+                                ::testing::ValuesIn(configs)),
+                        ExecGraphTests::getTestCaseName);
+
 }  // namespace
\ No newline at end of file
index ff858ac4ae940b7e9d86a541c53d0e67e318c6ec..be37bf7bff8a8451d6efc403ccc8063c08c3dde7 100644 (file)
@@ -7,20 +7,23 @@
 #include "behavior/infer_request.hpp"
 
 using namespace BehaviorTestsDefinitions;
+
 namespace {
-    const std::vector<InferenceEngine::Precision> netPrecisions = {
-            InferenceEngine::Precision::FP32,
-            InferenceEngine::Precision::FP16
-    };
-
-    const std::vector<std::map<std::string, std::string>> configs = {
-            {}
-    };
-
-    INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestTests,
-                            ::testing::Combine(
-                                    ::testing::ValuesIn(netPrecisions),
-                                    ::testing::Values("TEMPLATE"),
-                                    ::testing::ValuesIn(configs)),
-                            InferRequestTests::getTestCaseName);
+
+const std::vector<InferenceEngine::Precision> netPrecisions = {
+    InferenceEngine::Precision::FP32,
+    InferenceEngine::Precision::FP16
+};
+
+const std::vector<std::map<std::string, std::string>> configs = {
+    {}
+};
+
+INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestTests,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(netPrecisions),
+                                ::testing::Values("TEMPLATE"),
+                                ::testing::ValuesIn(configs)),
+                        InferRequestTests::getTestCaseName);
+
 }  // namespace
index 48f9e096f4f280297dc92506d2de1a05f1ad2c92..8b02fad1a403c110cb21d1a421d50ff1a75f5d57 100644 (file)
@@ -7,14 +7,16 @@
 #include "behavior/infer_request_callback.hpp"
 
 using namespace BehaviorTestsDefinitions;
+
 namespace {
+
 const std::vector<InferenceEngine::Precision> netPrecisions = {
-        InferenceEngine::Precision::FP32,
-        InferenceEngine::Precision::FP16
+    InferenceEngine::Precision::FP32,
+    InferenceEngine::Precision::FP16
 };
 
 const std::vector<std::map<std::string, std::string>> configs = {
-        {}
+    {}
 };
 
 INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, CallbackTests,
index b0054fcdb554479d26f5d1a79700172249de9c20..1ad9080221860983f23093cac9fb1c7fed161e70 100644 (file)
@@ -7,20 +7,23 @@
 #include "behavior/infer_request_config.hpp"
 
 using namespace BehaviorTestsDefinitions;
+
 namespace {
-    const std::vector<InferenceEngine::Precision> netPrecisions = {
-            InferenceEngine::Precision::FP32,
-            InferenceEngine::Precision::FP16
-    };
-
-    const std::vector<std::map<std::string, std::string>> configs = {
-            {}
-    };
-
-    INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferConfigTests,
-                            ::testing::Combine(
-                                    ::testing::ValuesIn(netPrecisions),
-                                    ::testing::Values("TEMPLATE"),
-                                    ::testing::ValuesIn(configs)),
-                            InferConfigTests::getTestCaseName);
+
+const std::vector<InferenceEngine::Precision> netPrecisions = {
+    InferenceEngine::Precision::FP32,
+    InferenceEngine::Precision::FP16
+};
+
+const std::vector<std::map<std::string, std::string>> configs = {
+    {}
+};
+
+INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferConfigTests,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(netPrecisions),
+                                ::testing::Values("TEMPLATE"),
+                                ::testing::ValuesIn(configs)),
+                        InferConfigTests::getTestCaseName);
+
 }  // namespace
index 371f01a3b2f291ee046689838f70879bbfc2d53c..91154881495d4df22978a89fb32406f597546cf4 100644 (file)
@@ -7,23 +7,23 @@
 #include "behavior/infer_request_input.hpp"
 
 using namespace BehaviorTestsDefinitions;
-namespace {
-
-    const std::vector<InferenceEngine::Precision> netPrecisions = {
-            InferenceEngine::Precision::FP32,
-            InferenceEngine::Precision::FP16
-    };
-
-    const std::vector<std::map<std::string, std::string>> configs = {
-            {}
-    };
 
+namespace {
 
-    INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestInputTests,
-                            ::testing::Combine(
-                                    ::testing::ValuesIn(netPrecisions),
-                                    ::testing::Values("TEMPLATE"),
-                                    ::testing::ValuesIn(configs)),
-                            InferRequestInputTests::getTestCaseName);
+const std::vector<InferenceEngine::Precision> netPrecisions = {
+    InferenceEngine::Precision::FP32,
+    InferenceEngine::Precision::FP16
+};
+
+const std::vector<std::map<std::string, std::string>> configs = {
+    {}
+};
+
+INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestInputTests,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(netPrecisions),
+                                ::testing::Values("TEMPLATE"),
+                                ::testing::ValuesIn(configs)),
+                        InferRequestInputTests::getTestCaseName);
 
 }  // namespace
index ceb069e71cc42522af76cb1c874609b434a31f90..0bb9bd7583085f250bce09065092f07866ba3fc3 100644 (file)
@@ -7,22 +7,23 @@
 #include "behavior/infer_request_output.hpp"
 
 using namespace BehaviorTestsDefinitions;
+
 namespace {
-    const std::vector<InferenceEngine::Precision> netPrecisions = {
-            InferenceEngine::Precision::FP32,
-            InferenceEngine::Precision::FP16
-    };
-
-    const std::vector<std::map<std::string, std::string>> configs = {
-            {}
-    };
-
-
-    INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestOutputTests,
-                            ::testing::Combine(
-                                    ::testing::ValuesIn(netPrecisions),
-                                    ::testing::Values("TEMPLATE"),
-                                    ::testing::ValuesIn(configs)),
-                            InferRequestOutputTests::getTestCaseName);
+
+const std::vector<InferenceEngine::Precision> netPrecisions = {
+    InferenceEngine::Precision::FP32,
+    InferenceEngine::Precision::FP16
+};
+
+const std::vector<std::map<std::string, std::string>> configs = {
+    {}
+};
+
+INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, InferRequestOutputTests,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(netPrecisions),
+                                ::testing::Values("TEMPLATE"),
+                                ::testing::ValuesIn(configs)),
+                        InferRequestOutputTests::getTestCaseName);
 
 }  // namespace
index 4a80067b51087f4e6b9c3533a6d56118a50420c1..96a25da7a32d8fbf140ff8f27b3971430428fbc8 100644 (file)
@@ -5,32 +5,34 @@
 #include "behavior/layout.hpp"
 
 using namespace BehaviorTestsDefinitions;
+
 namespace {
-    const std::vector<std::map<std::string, std::string>> configs = {
-            {}
-    };
-
-    const std::vector<InferenceEngine::Layout> Layout = {
-           InferenceEngine::Layout::NCHW,
-           InferenceEngine::Layout::CHW,
-           InferenceEngine::Layout::NC,
-           InferenceEngine::Layout::C
-    };
-
-    const std::vector<std::vector<size_t>> inputShapes = {
-            { 1, 3, 16, 16 },
-            { 3, 32, 16 },
-            { 1, 3 },
-            { 3 }
-    };
-
-    INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, LayoutTest,
-                            ::testing::Combine(
-                                    ::testing::Values(InferenceEngine::Precision::FP32),
-                                    ::testing::Values("TEMPLATE"),
-                                    ::testing::ValuesIn(configs),
-                                    ::testing::ValuesIn(Layout),
-                                    ::testing::ValuesIn(inputShapes)),
-                            LayoutTest::getTestCaseName);
+
+const std::vector<std::map<std::string, std::string>> configs = {
+    {}
+};
+
+const std::vector<InferenceEngine::Layout> Layout = {
+    InferenceEngine::Layout::NCHW,
+    InferenceEngine::Layout::CHW,
+    InferenceEngine::Layout::NC,
+    InferenceEngine::Layout::C
+};
+
+const std::vector<std::vector<size_t>> inputShapes = {
+    { 1, 3, 16, 16 },
+    { 3, 32, 16 },
+    { 1, 3 },
+    { 3 }
+};
+
+INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, LayoutTest,
+                        ::testing::Combine(
+                                ::testing::Values(InferenceEngine::Precision::FP32),
+                                ::testing::Values("TEMPLATE"),
+                                ::testing::ValuesIn(configs),
+                                ::testing::ValuesIn(Layout),
+                                ::testing::ValuesIn(inputShapes)),
+                        LayoutTest::getTestCaseName);
 
 }  // namespace
\ No newline at end of file
index 6353f625364bc46fa4b34c33224796e975cf9c95..bd87269a9a058be2ab8853842cce96c99021358d 100644 (file)
@@ -7,21 +7,23 @@
 #include "behavior/set_preprocess.hpp"
 
 using namespace BehaviorTestsDefinitions;
+
 namespace {
-    const std::vector<InferenceEngine::Precision> netPrecisions = {
-            InferenceEngine::Precision::FP32,
-            InferenceEngine::Precision::FP16
-    };
-
-    const std::vector<std::map<std::string, std::string>> configs = {
-            {}
-    };
-
-    INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PreprocessTest,
-                            ::testing::Combine(
-                                    ::testing::ValuesIn(netPrecisions),
-                                    ::testing::Values("TEMPLATE"),
-                                    ::testing::ValuesIn(configs)),
-                            PreprocessTest::getTestCaseName);
+
+const std::vector<InferenceEngine::Precision> netPrecisions = {
+    InferenceEngine::Precision::FP32,
+    InferenceEngine::Precision::FP16
+};
+
+const std::vector<std::map<std::string, std::string>> configs = {
+    {}
+};
+
+INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, PreprocessTest,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(netPrecisions),
+                                ::testing::Values("TEMPLATE"),
+                                ::testing::ValuesIn(configs)),
+                        PreprocessTest::getTestCaseName);
 
 }  // namespace
\ No newline at end of file
index c3d9745521d91ba20c816baec7435a2d72b6a626..a6c11635869ce28f18e11f725e1bf2b4123fc863 100644 (file)
@@ -5,35 +5,37 @@
 #include "behavior/test_plugin.hpp"
 
 using namespace BehaviorTestsDefinitions;
+
 namespace {
-    const std::vector<InferenceEngine::Precision> netPrecisions = {
-            InferenceEngine::Precision::FP32,
-            InferenceEngine::Precision::FP16
-    };
-
-    const std::vector<std::map<std::string, std::string>> configs = {
-            {}
-    };
-
-    INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, BehaviorTests,
-                            ::testing::Combine(
-                                    ::testing::Values(InferenceEngine::Precision::FP32),
-                                    ::testing::Values("TEMPLATE"),
-                                    ::testing::ValuesIn(configs)),
-                            BehaviorTests::getTestCaseName);
-
-    INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, BehaviorTestInput,
-                            ::testing::Combine(
-                                    ::testing::ValuesIn(netPrecisions),
-                                    ::testing::Values("TEMPLATE"),
-                                    ::testing::ValuesIn(configs)),
-                            BehaviorTestInput::getTestCaseName);
-
-    INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, BehaviorTestOutput,
-                            ::testing::Combine(
-                                    ::testing::ValuesIn(netPrecisions),
-                                    ::testing::Values("TEMPLATE"),
-                                    ::testing::ValuesIn(configs)),
-                            BehaviorTestOutput::getTestCaseName);
+
+const std::vector<InferenceEngine::Precision> netPrecisions = {
+    InferenceEngine::Precision::FP32,
+    InferenceEngine::Precision::FP16
+};
+
+const std::vector<std::map<std::string, std::string>> configs = {
+    {}
+};
+
+INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, BehaviorTests,
+                        ::testing::Combine(
+                                ::testing::Values(InferenceEngine::Precision::FP32),
+                                ::testing::Values("TEMPLATE"),
+                                ::testing::ValuesIn(configs)),
+                        BehaviorTests::getTestCaseName);
+
+INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, BehaviorTestInput,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(netPrecisions),
+                                ::testing::Values("TEMPLATE"),
+                                ::testing::ValuesIn(configs)),
+                        BehaviorTestInput::getTestCaseName);
+
+INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, BehaviorTestOutput,
+                        ::testing::Combine(
+                                ::testing::ValuesIn(netPrecisions),
+                                ::testing::Values("TEMPLATE"),
+                                ::testing::ValuesIn(configs)),
+                        BehaviorTestOutput::getTestCaseName);
 
 }  // namespace
index 315700cb8b35142aadeb087f5508f9fa2e099a28..131e6872c687d374792c706091bb7dc16b106519 100644 (file)
@@ -5,16 +5,18 @@
 #include "behavior/version.hpp"
 
 using namespace BehaviorTestsDefinitions;
+
 namespace {
-    const std::vector<std::map<std::string, std::string>> configs = {
-            {}
-    };
 
-    INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, VersionTest,
-                            ::testing::Combine(
-                                    ::testing::Values(InferenceEngine::Precision::FP32),
-                                    ::testing::Values("TEMPLATE"),
-                                    ::testing::ValuesIn(configs)),
-                            VersionTest::getTestCaseName);
+const std::vector<std::map<std::string, std::string>> configs = {
+    {}
+};
+
+INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, VersionTest,
+                        ::testing::Combine(
+                                ::testing::Values(InferenceEngine::Precision::FP32),
+                                ::testing::Values("TEMPLATE"),
+                                ::testing::ValuesIn(configs)),
+                        VersionTest::getTestCaseName);
 
 }  // namespace
diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/hetero/query_network.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/hetero/query_network.cpp
new file mode 100644 (file)
index 0000000..53d7777
--- /dev/null
@@ -0,0 +1,21 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+
+#include "hetero/query_network.hpp"
+#include "ngraph_functions/builders.hpp"
+#include "ngraph_functions/subgraph_builders.hpp"
+
+namespace {
+using namespace HeteroTests;
+
+auto ConvBias = ngraph::builder::subgraph::makeConvBias();
+
+INSTANTIATE_TEST_CASE_P(smoke_FullySupportedTopologies, QueryNetworkTest,
+                        ::testing::Combine(
+                                ::testing::Values("TEMPLATE", "HETERO:TEMPLATE", "MULTI:TEMPLATE"),
+                                ::testing::Values(ConvBias)),
+                        QueryNetworkTest::getTestCaseName);
+}  // namespace
diff --git a/docs/template_plugin/tests/functional/shared_tests_instances/hetero/synthetic.cpp b/docs/template_plugin/tests/functional/shared_tests_instances/hetero/synthetic.cpp
new file mode 100644 (file)
index 0000000..4cca016
--- /dev/null
@@ -0,0 +1,25 @@
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+
+#include "hetero/synthetic.hpp"
+#include "ngraph_functions/builders.hpp"
+#include "ngraph_functions/subgraph_builders.hpp"
+
+namespace {
+using namespace HeteroTests;
+
+INSTANTIATE_TEST_CASE_P(smoke_SingleMajorNode, HeteroSyntheticTest,
+                        ::testing::Combine(
+                                ::testing::Values(std::vector<PluginParameter>{{"TEMPLATE0", "templatePlugin"}, {"TEMPLATE1", "templatePlugin"}}),
+                                ::testing::ValuesIn(HeteroTests::HeteroSyntheticTest::_singleMajorNodeFunctions)),
+                        HeteroSyntheticTest::getTestCaseName);
+
+INSTANTIATE_TEST_CASE_P(nightly_RandomMajorNodes, HeteroSyntheticTest,
+                        ::testing::Combine(
+                                ::testing::Values(std::vector<PluginParameter>{{"TEMPLATE0", "templatePlugin"}, {"TEMPLATE1", "templatePlugin"}}),
+                                ::testing::ValuesIn(HeteroTests::HeteroSyntheticTest::_randomMajorNodeFunctions)),
+                        HeteroSyntheticTest::getTestCaseName);
+}  // namespace
index b40daaeaa85788445c6f4403c2d818430e6648a6..c04f0b0c62a360d89ea799c0445c42e175d3c486 100644 (file)
@@ -11,21 +11,23 @@ using namespace LayerTestsDefinitions;
 
 namespace {
 
+// ! [test_convolution:declare_parameters]
 const std::vector<InferenceEngine::Precision> netPrecisions = {
-        InferenceEngine::Precision::FP32,
+    InferenceEngine::Precision::FP32,
 };
 
 /* ============= 2D Convolution ============= */
+
 const std::vector<std::vector<size_t >> kernels = {{3, 3},
-                                                          {3, 5}};
+                                                   {3, 5}};
 const std::vector<std::vector<size_t >> strides = {{1, 1},
-                                                          {1, 3}};
+                                                   {1, 3}};
 const std::vector<std::vector<ptrdiff_t>> padBegins = {{0, 0},
                                                        {0, 3}};
 const std::vector<std::vector<ptrdiff_t>> padEnds = {{0, 0},
                                                      {0, 3}};
 const std::vector<std::vector<size_t >> dilations = {{1, 1},
-                                                            {3, 1}};
+                                                     {3, 1}};
 const std::vector<size_t> numOutChannels = {1, 5};
 const std::vector<ngraph::op::PadType> padTypes = {
         ngraph::op::PadType::EXPLICIT,
@@ -41,6 +43,8 @@ const auto conv2DParams_ExplicitPadding = ::testing::Combine(
         ::testing::ValuesIn(numOutChannels),
         ::testing::Values(ngraph::op::PadType::EXPLICIT)
 );
+// ! [test_convolution:declare_parameters]
+
 const auto conv2DParams_AutoPadValid = ::testing::Combine(
         ::testing::ValuesIn(kernels),
         ::testing::ValuesIn(strides),
@@ -51,6 +55,7 @@ const auto conv2DParams_AutoPadValid = ::testing::Combine(
         ::testing::Values(ngraph::op::PadType::VALID)
 );
 
+// ! [test_convolution:instantiate]
 INSTANTIATE_TEST_CASE_P(Convolution2D_ExplicitPadding, ConvolutionLayerTest,
                         ::testing::Combine(
                                 conv2DParams_ExplicitPadding,
@@ -58,6 +63,7 @@ INSTANTIATE_TEST_CASE_P(Convolution2D_ExplicitPadding, ConvolutionLayerTest,
                                 ::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
                                 ::testing::Values("TEMPLATE")),
                         ConvolutionLayerTest::getTestCaseName);
+// ! [test_convolution:instantiate]
 
 INSTANTIATE_TEST_CASE_P(Convolution2D_AutoPadValid, ConvolutionLayerTest,
                         ::testing::Combine(
@@ -66,7 +72,9 @@ INSTANTIATE_TEST_CASE_P(Convolution2D_AutoPadValid, ConvolutionLayerTest,
                                 ::testing::Values(std::vector<size_t >({1, 3, 30, 30})),
                                 ::testing::Values("TEMPLATE")),
                         ConvolutionLayerTest::getTestCaseName);
+
 /* ============= 3D Convolution ============= */
+
 const std::vector<std::vector<size_t >> kernels3d = {{3, 3, 3},
                                                             {3, 5, 3}};
 const std::vector<std::vector<ptrdiff_t>> paddings3d = {{0, 0, 0},
index ce810bede1a97b2135da52f2ed0fcd478de099f6..a456629d598972f932c322b7258d84f308d5775b 100644 (file)
@@ -144,7 +144,6 @@ protected:
      * @note The function is used in
      * InferencePluginInternal::LoadNetwork(IExecutableNetwork::Ptr&, const ICNNNetwork&, const std::map<std::string, std::string>&)
      * which performs common steps first and calls this plugin-dependent method implementation after.
-     * @param core A pointer to ICore interface.
      * @param network A network object
      * @param config string-string map of config parameters relevant only for this load operation
      * @return Shared pointer to the ExecutableNetwork object
index 6c22c60f5a9f64d3d6281e0b367734b547fff3dd..07629865b509ed4e48800ccf2b9b9371ddd8b971 100644 (file)
 /**
  * @brief A namespace with const values for Execution Graph parameters names.
  *  
- *        Executable Graph Info is represented in ICNNNetwork format with general CNNLayer nodes inside
- *        including connections between the nodes. Each node describes an executable hardware-specific
- *        primitive and stores its parameters within CNNLayer::params map.
- *        There is a list of general keys for the parameters map.
+ * Executable Graph Info is represented in ICNNNetwork format with general CNNLayer nodes inside
+ * including connections between the nodes. Each node describes an executable hardware-specific
+ * primitive and stores its parameters within CNNLayer::params map.
+ * There is a list of general keys for the parameters map.
  */
 namespace ExecGraphInfoSerialization {
 
 /**
+ * @ingroup ie_dev_exec_graph
  * @brief Used to get a string of layer names separated by a comma
  *        from the original IR, which were fused/merged to the current executable primitive.
  */
 static const char ORIGINAL_NAMES[] = "originalLayersNames";
 
 /**
+ * @ingroup ie_dev_exec_graph
  * @brief Used to get a type of the executable primitive.
  */
 static const char IMPL_TYPE[] = "primitiveType";
 
 /**
+ * @ingroup ie_dev_exec_graph
  * @brief Used to get output precisions of the executable primitive.
  */
 static const char OUTPUT_PRECISIONS[] = "outputPrecisions";
 
 /**
+ * @ingroup ie_dev_exec_graph
  * @brief Used to get a value of execution time of the executable primitive.
  */
 static const char PERF_COUNTER[] = "execTimeMcs";
 
 /**
+ * @ingroup ie_dev_exec_graph
  * @brief Used to get output layouts of primitive.
  */
 static const char OUTPUT_LAYOUTS[] = "outputLayouts";
 
 /**
+ * @ingroup ie_dev_exec_graph
  * @brief Used to get an execution order of primitive.
  */
 static const char EXECUTION_ORDER[] = "execOrder";
 
 /**
+ * @ingroup ie_dev_exec_graph
  * @brief Used to get a type of primitive.
  */
 static const char LAYER_TYPE[] = "layerType";
 
+/**
+ * @ingroup ie_dev_exec_graph
+ * @brief The Execution node which is used to represent node in execution graph.
+ * 
+ * It contains the following type of information in node runtime information:
+ * - ExecGraphInfoSerialization::ORIGINAL_NAMES
+ * - ExecGraphInfoSerialization::IMPL_TYPE
+ * - ExecGraphInfoSerialization::OUTPUT_PRECISIONS
+ * - ExecGraphInfoSerialization::PERF_COUNTER
+ * - ExecGraphInfoSerialization::OUTPUT_LAYOUTS
+ * - ExecGraphInfoSerialization::EXECUTION_ORDER
+ * - ExecGraphInfoSerialization::LAYER_TYPE
+ */
 class INFERENCE_ENGINE_API_CLASS(ExecutionNode) : public ngraph::Node {
 public:
     static constexpr ngraph::NodeTypeInfo type_info { "ExecutionNode", 0 };
     const ngraph::NodeTypeInfo& get_type_info() const override;
 
+    /**
+     * A default constructor with no node inputs and 0 output ports.
+     */
     ExecutionNode() = default;
 
+    /**
+     * @brief      Constructs a new execution node with a given parameters
+     *
+     * @param[in]  arguments    Inputs nodes
+     * @param[in]  output_size  A number of output ports
+     */
     ExecutionNode(const ngraph::OutputVector& arguments, size_t output_size = 1) :
         Node(arguments, output_size) { }
 
+    /**
+     * @brief      Creates a new execution node with the same state, but different input nodes
+     *
+     * @param[in]  inputs  The input nodes
+     *
+     * @return     A newly created execution node
+     */
     std::shared_ptr<ngraph::Node> clone_with_new_inputs(const ngraph::OutputVector& inputs) const override {
         auto cloned = std::make_shared<ExecutionNode>();
 
index 9e99addb901c3743ed220b85ee8a595bfb2c5822..84502732ff5472d585cb4ccad5ce9a9bd56559da 100644 (file)
@@ -53,6 +53,9 @@ namespace InferenceEngine {
  * @defgroup ie_dev_api_system_conf System configuration utilities
  * @brief API to get information about the system, core processor capabilities
  * 
+ * @defgroup ie_dev_exec_graph Execution graph utilities
+ * @brief Contains `ExecutionNode` and its properties
+ * 
  * @defgroup ie_dev_api_error_debug Error handling and debug helpers
  * @brief Utility methods to works with errors or exceptional situations
  * 
index d7caea2b5e0778a58f7c09208ef13d62bd47dbab..a9add6d52536836afe244bfda3bc797403a99eb3 100644 (file)
@@ -13,6 +13,7 @@
 #include "ngraph_functions/builders.hpp"
 #include "ngraph_functions/utils/ngraph_helpers.hpp"
 
+// ! [test_convolution:definition]
 typedef std::tuple<
         InferenceEngine::SizeVector,    // Kernel size
         InferenceEngine::SizeVector,    // Strides
@@ -39,5 +40,6 @@ public:
 protected:
     void SetUp() override;
 };
+// ! [test_convolution:definition]
 
 }  // namespace LayerTestsDefinitions