--- /dev/null
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+
+#include "behavior/exec_graph_info.hpp"
+
+using namespace LayerTestsDefinitions;
+
+namespace {
+ const std::vector<InferenceEngine::Precision> netPrecisions = {
+ InferenceEngine::Precision::FP32,
+ InferenceEngine::Precision::FP16
+ };
+
+ const std::vector<std::map<std::string, std::string>> configs = {
+ {}
+ };
+
+ INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, ExecGraphTests,
+ ::testing::Combine(
+ ::testing::ValuesIn(netPrecisions),
+ ::testing::Values("TEMPLATE"),
+ ::testing::ValuesIn(configs)),
+ ExecGraphTests::getTestCaseName);
+} // namespace
+++ /dev/null
-// Copyright (C) 2018-2019 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include "behavior_test_plugin_exec_graph_info.hpp"
-#include "template_test_data.hpp"
-
-INSTANTIATE_TEST_CASE_P(
- BehaviorTest,
- BehaviorPluginTestExecGraphInfo,
- ValuesIn(supportedValues),
- getTestCaseName);
--- /dev/null
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "multi-device/multi_device_config.hpp"
+
+#include "behavior/exec_graph_info.hpp"
+
+using namespace LayerTestsDefinitions;
+
+namespace {
+ const std::vector<InferenceEngine::Precision> netPrecisions = {
+ InferenceEngine::Precision::FP32,
+ InferenceEngine::Precision::FP16
+ };
+
+ const std::vector<std::map<std::string, std::string>> configs = {
+ {},
+ };
+ const std::vector<std::map<std::string, std::string>> multiConfigs = {
+ {{ InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES , CommonTestUtils::DEVICE_CPU}}
+ };
+
+ INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, ExecGraphTests,
+ ::testing::Combine(
+ ::testing::ValuesIn(netPrecisions),
+ ::testing::Values(CommonTestUtils::DEVICE_CPU),
+ ::testing::ValuesIn(configs)),
+ ExecGraphTests::getTestCaseName);
+
+ INSTANTIATE_TEST_CASE_P(smoke_Multi_BehaviorTests, ExecGraphTests,
+ ::testing::Combine(
+ ::testing::ValuesIn(netPrecisions),
+ ::testing::Values(CommonTestUtils::DEVICE_MULTI),
+ ::testing::ValuesIn(multiConfigs)),
+ ExecGraphTests::getTestCaseName);
+} // namespace
--- /dev/null
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "multi-device/multi_device_config.hpp"
+
+#include "behavior/exec_graph_info.hpp"
+
+using namespace LayerTestsDefinitions;
+
+namespace {
+ const std::vector<InferenceEngine::Precision> netPrecisions = {
+ InferenceEngine::Precision::FP32,
+ InferenceEngine::Precision::FP16
+ };
+
+ const std::vector<std::map<std::string, std::string>> configs = {
+ {},
+ };
+
+ INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, ExecGraphTests,
+ ::testing::Combine(
+ ::testing::ValuesIn(netPrecisions),
+ ::testing::Values(CommonTestUtils::DEVICE_GNA),
+ ::testing::ValuesIn(configs)),
+ ExecGraphTests::getTestCaseName);
+
+} // namespace
--- /dev/null
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "multi-device/multi_device_config.hpp"
+
+#include "behavior/exec_graph_info.hpp"
+
+using namespace LayerTestsDefinitions;
+
+namespace {
+ const std::vector<InferenceEngine::Precision> netPrecisions = {
+ InferenceEngine::Precision::FP32,
+ InferenceEngine::Precision::FP16
+ };
+
+ const std::vector<std::map<std::string, std::string>> configs = {
+ {},
+ };
+
+ INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, ExecGraphTests,
+ ::testing::Combine(
+ ::testing::ValuesIn(netPrecisions),
+ ::testing::Values(CommonTestUtils::DEVICE_GPU),
+ ::testing::ValuesIn(configs)),
+ ExecGraphTests::getTestCaseName);
+
+} // namespace
--- /dev/null
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "multi-device/multi_device_config.hpp"
+
+#include "behavior/exec_graph_info.hpp"
+
+using namespace LayerTestsDefinitions;
+
+namespace {
+ const std::vector<InferenceEngine::Precision> netPrecisions = {
+ InferenceEngine::Precision::FP16
+ };
+
+ const std::vector<std::map<std::string, std::string>> configs = {
+ {}
+ };
+
+ INSTANTIATE_TEST_CASE_P(smoke_BehaviorTests, ExecGraphTests,
+ ::testing::Combine(
+ ::testing::ValuesIn(netPrecisions),
+ ::testing::Values(CommonTestUtils::DEVICE_MYRIAD),
+ ::testing::ValuesIn(configs)),
+ ExecGraphTests::getTestCaseName);
+} // namespace
".*ActivationLayerTest\\.CompareWithRefs/Log.*netPRC=FP32.*",
".*ActivationLayerTest\\.CompareWithRefs/Sigmoid.*netPRC=FP32.*",
".*ActivationLayerTest\\.CompareWithRefs/Relu.*netPRC=FP32.*",
+
+ // TODO: currently this tests are not applicable to myriadPlugin
+ ".*Behavior.*ExecGraphTests.*"
};
}
--- /dev/null
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <tuple>
+#include <vector>
+#include <string>
+#include <memory>
+#include "ie_extension.h"
+#include <condition_variable>
+#include "functional_test_utils/layer_test_utils.hpp"
+#include "ngraph_functions/utils/ngraph_helpers.hpp"
+#include "ngraph_functions/builders.hpp"
+
+namespace LayerTestsDefinitions {
+ typedef std::tuple<
+ InferenceEngine::Precision, // Network precision
+ std::string, // Device name
+ std::map<std::string, std::string> // Config
+ > ExecGraphParams;
+
+class ExecGraphTests : public testing::WithParamInterface<ExecGraphParams>,
+ public LayerTestsUtils::LayerTestsCommon {
+public:
+ static std::string getTestCaseName(testing::TestParamInfo<ExecGraphParams> obj);
+
+protected:
+ void SetUp() override;
+ void TearDown() override;
+};
+
+} // namespace LayerTestsDefinitions
--- /dev/null
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <memory>
+#include <tuple>
+#include <vector>
+#include <string>
+
+#include <ie_core.hpp>
+#include <details/ie_cnn_network_tools.h>
+
+#include "common_test_utils/common_utils.hpp"
+#include "functional_test_utils/plugin_cache.hpp"
+#include "functional_test_utils/layer_test_utils.hpp"
+#include "functional_test_utils/blob_utils.hpp"
+#include "exec_graph_info.hpp"
+#include "ngraph_functions/pass/convert_prc.hpp"
+#include "ngraph_functions/subgraph_builders.hpp"
+#include "behavior/exec_graph_info.hpp"
+
+
+namespace LayerTestsDefinitions {
+ std::string ExecGraphTests::getTestCaseName(testing::TestParamInfo<ExecGraphParams> obj) {
+ InferenceEngine::Precision netPrecision;
+ std::string targetDevice;
+ std::map<std::string, std::string> configuration;
+ std::tie(netPrecision, targetDevice, configuration) = obj.param;
+ std::ostringstream result;
+ result << "netPRC=" << netPrecision.name() << "_";
+ result << "targetDevice=" << targetDevice;
+ if (!configuration.empty()) {
+ result << "configItem=" << configuration.begin()->first << "_" << configuration.begin()->second;
+ }
+ return result.str();
+ }
+
+ void ExecGraphTests::SetUp() {
+ InferenceEngine::Precision netPrecision;
+ std::tie(netPrecision, targetDevice, configuration) = this->GetParam();
+ function = ngraph::builder::subgraph::makeConvPoolRelu();
+ }
+
+ void ExecGraphTests::TearDown() {
+ if (targetDevice.find(CommonTestUtils::DEVICE_GPU) != std::string::npos) {
+ PluginCache::get().reset();
+ }
+ }
+
+ inline std::vector<std::string> separateStrToVec(std::string str, const char sep) {
+ std::vector<std::string> result;
+
+ std::istringstream stream(str);
+ std::string strVal;
+
+ while (getline(stream, strVal, sep)) {
+ result.push_back(strVal);
+ }
+ return result;
+ }
+
+TEST_P(ExecGraphTests, CheckExecGraphInfoBeforeExecution) {
+ // Skip test according to plugin specific disabledTestPatterns() (if any)
+ SKIP_IF_CURRENT_TEST_IS_DISABLED()
+ // Create CNNNetwork from ngrpah::Function
+ InferenceEngine::CNNNetwork cnnNet(function);
+ InferenceEngine::CNNNetwork execGraph;
+ // Get Core from cache
+ auto ie = PluginCache::get().ie();
+ if (targetDevice == CommonTestUtils::DEVICE_CPU || targetDevice == CommonTestUtils::DEVICE_GPU) {
+ // Load CNNNetwork to target plugins
+ auto execNet = ie->LoadNetwork(cnnNet, targetDevice);
+ ASSERT_NO_THROW(execGraph = execNet.GetExecGraphInfo());
+ // Create InferRequest
+ InferenceEngine::InferRequest req;
+ ASSERT_NO_THROW(req = execNet.CreateInferRequest());
+ // Store all the original layers from the network
+ const auto originalLayers = function->get_ops();
+ std::map<std::string, int> originalLayersMap;
+ for (const auto &layer : originalLayers) {
+ if (layer->description() == "Result")
+ continue;
+ originalLayersMap[layer->get_friendly_name()] = 0;
+ }
+ int IteratorForLayersConstant = 0;
+ // Store all the layers from the executable graph information represented as CNNNetwork
+ const std::vector<InferenceEngine::CNNLayerPtr> execGraphLayers =
+ InferenceEngine::details::CNNNetSortTopologically(execGraph);
+ for (const auto &execLayer : execGraphLayers) {
+ IE_SUPPRESS_DEPRECATED_START
+ // Each layer from the execGraphInfo network must have PM data option set
+ ASSERT_EQ("not_executed", execLayer->params[ExecGraphInfoSerialization::PERF_COUNTER]);
+ // Parse origin layer names (fused/merged layers) from the executable graph
+ // and compare with layers from the original model
+ auto origFromExecLayer = execLayer->params[ExecGraphInfoSerialization::ORIGINAL_NAMES];
+ if (origFromExecLayer == "")
+ IteratorForLayersConstant++;
+ std::vector<std::string> origFromExecLayerSep = separateStrToVec(origFromExecLayer, ',');
+ std::for_each(origFromExecLayerSep.begin(), origFromExecLayerSep.end(), [&](const std::string &layer) {
+ auto origLayer = originalLayersMap.find(layer);
+ ASSERT_NE(originalLayersMap.end(), origLayer) << layer;
+ origLayer->second++;
+ });
+ }
+ // All layers from the original IR must be present with in ExecGraphInfo
+ for (auto &layer : originalLayersMap) {
+ if ((layer.second == 0) && (IteratorForLayersConstant > 0)) {
+ IteratorForLayersConstant--;
+ continue;
+ }
+ ASSERT_GE(layer.second, 0);
+ }
+ } else {
+ ASSERT_THROW(ie->LoadNetwork(cnnNet, targetDevice).GetExecGraphInfo(),
+ InferenceEngine::details::InferenceEngineException);
+ }
+ IE_SUPPRESS_DEPRECATED_END
+ function.reset();
+}
+
+TEST_P(ExecGraphTests, CheckExecGraphInfoAfterExecution) {
+ // Skip test according to plugin specific disabledTestPatterns() (if any)
+ SKIP_IF_CURRENT_TEST_IS_DISABLED()
+ // Create CNNNetwork from ngrpah::Function
+ InferenceEngine::CNNNetwork cnnNet(function);
+ InferenceEngine::CNNNetwork execGraph;
+ // Get Core from cache
+ auto ie = PluginCache::get().ie();
+ if (targetDevice == CommonTestUtils::DEVICE_CPU || targetDevice == CommonTestUtils::DEVICE_GPU) {
+ // Load CNNNetwork to target plugins
+ auto execNet = ie->LoadNetwork(cnnNet, targetDevice);
+ ASSERT_NO_THROW(execGraph = execNet.GetExecGraphInfo());
+ // Create InferRequest
+ InferenceEngine::InferRequest req;
+ ASSERT_NO_THROW(req = execNet.CreateInferRequest());
+ // Store all the original layers from the network
+ const auto originalLayers = function->get_ops();
+ std::map<std::string, int> originalLayersMap;
+ for (const auto &layer : originalLayers) {
+ originalLayersMap[layer->get_friendly_name()] = 0;
+ }
+ int IteratorForLayersConstant = 0;
+ // Store all the layers from the executable graph information represented as CNNNetwork
+ const std::vector<InferenceEngine::CNNLayerPtr> execGraphLayers =
+ InferenceEngine::details::CNNNetSortTopologically(execGraph);
+ bool has_layer_with_valid_time = false;
+ for (const auto &execLayer : execGraphLayers) {
+ IE_SUPPRESS_DEPRECATED_START
+ // At least one layer in the topology should be executed and have valid perf counter value
+ try {
+ float x = static_cast<float>(std::atof(
+ execLayer->params[ExecGraphInfoSerialization::PERF_COUNTER].c_str()));
+ ASSERT_GE(x, 0.0f);
+ has_layer_with_valid_time = true;
+ } catch (std::exception &) {}
+
+ // Parse origin layer names (fused/merged layers) from the executable graph
+ // and compare with layers from the original model
+ auto origFromExecLayer = execLayer->params[ExecGraphInfoSerialization::ORIGINAL_NAMES];
+ std::vector<std::string> origFromExecLayerSep = separateStrToVec(origFromExecLayer, ',');
+ if (origFromExecLayer == "")
+ IteratorForLayersConstant++;
+ std::for_each(origFromExecLayerSep.begin(), origFromExecLayerSep.end(), [&](const std::string &layer) {
+ auto origLayer = originalLayersMap.find(layer);
+ ASSERT_NE(originalLayersMap.end(), origLayer) << layer;
+ origLayer->second++;
+ });
+ }
+ ASSERT_TRUE(has_layer_with_valid_time);
+
+ // All layers from the original IR must be present within ExecGraphInfo
+ for (auto &layer : originalLayersMap) {
+ if ((layer.second == 0) && (IteratorForLayersConstant > 0)) {
+ IteratorForLayersConstant--;
+ continue;
+ }
+ ASSERT_GE(layer.second, 0);
+ }
+ } else {
+ ASSERT_THROW(ie->LoadNetwork(cnnNet, targetDevice).GetExecGraphInfo(),
+ InferenceEngine::details::InferenceEngineException);
+ }
+ IE_SUPPRESS_DEPRECATED_END
+ function.reset();
+}
+
+TEST_P(ExecGraphTests, CheckExecGraphInfoSerialization) {
+ // Skip test according to plugin specific disabledTestPatterns() (if any)
+ SKIP_IF_CURRENT_TEST_IS_DISABLED()
+ // Create CNNNetwork from ngrpah::Function
+ InferenceEngine::CNNNetwork cnnNet(function);
+ InferenceEngine::CNNNetwork execGraph;
+ // Get Core from cache
+ auto ie = PluginCache::get().ie();
+ if (targetDevice == CommonTestUtils::DEVICE_CPU || targetDevice == CommonTestUtils::DEVICE_GPU) {
+ // Load CNNNetwork to target plugins
+ auto execNet = ie->LoadNetwork(cnnNet, targetDevice);
+ ASSERT_NO_THROW(execGraph = execNet.GetExecGraphInfo());
+ // Create InferRequest
+ InferenceEngine::InferRequest req;
+ ASSERT_NO_THROW(req = execNet.CreateInferRequest());
+ execGraph.serialize("exeNetwork.xml", "exeNetwork.bin");
+ ASSERT_EQ(0, std::remove("exeNetwork.xml"));
+ } else {
+ ASSERT_THROW(ie->LoadNetwork(cnnNet, targetDevice).GetExecGraphInfo(),
+ InferenceEngine::details::InferenceEngineException);
+ }
+ function.reset();
+}
+} // namespace LayerTestsDefinitions
\ No newline at end of file
+++ /dev/null
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include "behavior_test_plugin_exec_graph_info.hpp"
-#include "cldnn_test_data.hpp"
-
-// Disabled due to a bug on CentOS that leads to segmentation fault of application on exit
-// when perf counters are enabled
-//INSTANTIATE_TEST_CASE_P(smoke_
-// BehaviorTest,
-// BehaviorPluginTestExecGraphInfo,
-// ValuesIn(supportedValues),
-// getTestCaseName);
+++ /dev/null
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include "behavior_test_plugin_exec_graph_info.hpp"
-#include "gna_test_data.hpp"
-
-INSTANTIATE_TEST_CASE_P(
- smoke_BehaviorTest,
- BehaviorPluginTestExecGraphInfo,
- ValuesIn(supportedValues),
- getTestCaseName);
+++ /dev/null
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include "behavior_test_plugin_exec_graph_info.hpp"
-#include "mkldnn_test_data.hpp"
-
-INSTANTIATE_TEST_CASE_P(
- smoke_BehaviorTest,
- BehaviorPluginTestExecGraphInfo,
- ValuesIn(supportedValues),
- getTestCaseName);
+++ /dev/null
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include <gtest/gtest.h>
-#include "behavior_test_plugin.h"
-#include "details/ie_cnn_network_tools.h"
-#include "exec_graph_info.hpp"
-
-using namespace ::testing;
-using namespace InferenceEngine;
-
-namespace {
-std::string getTestCaseName(testing::TestParamInfo<BehTestParams> obj) {
- return obj.param.device + "_" + obj.param.input_blob_precision.name()
- + (obj.param.config.size() ? "_" + obj.param.config.begin()->second : "");
-}
-}
-
-inline std::vector<std::string> separateStrToVec(std::string str, const char sep) {
- std::vector<std::string> result;
-
- std::istringstream stream(str);
- std::string strVal;
-
- while (getline(stream, strVal, sep)) {
- result.push_back(strVal);
- }
- return result;
-}
-
-
-TEST_P(BehaviorPluginTestExecGraphInfo, CheckExecGraphInfoBeforeExecution) {
- auto param = GetParam();
-
- TestEnv::Ptr testEnv;
- ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
-
- auto cnnNetwork = testEnv->network;
- auto exeNetwork = testEnv->exeNetwork;
-
- if (param.device == CommonTestUtils::DEVICE_CPU || param.device == CommonTestUtils::DEVICE_GPU) {
- CNNNetwork execGraph;
- ASSERT_NO_THROW(execGraph = exeNetwork.GetExecGraphInfo());
-
- // Store all the original layers from the network
- const std::vector<CNNLayerPtr> originalLayers = CNNNetSortTopologically(cnnNetwork);
- std::map<std::string, int> originalLayersMap;
- for (const auto &layer : originalLayers) {
- originalLayersMap[layer->name] = 0;
- }
-
- // Store all the layers from the executable graph information represented as CNNNetwork
- const std::vector<CNNLayerPtr> execGraphLayers = CNNNetSortTopologically(execGraph);
- for (const auto &execLayer : execGraphLayers) {
- // Each layer from the execGraphInfo network must have PM data option set
- ASSERT_EQ("not_executed", execLayer->params[ExecGraphInfoSerialization::PERF_COUNTER]);
-
- // Parse origin layer names (fused/merged layers) from the executable graph
- // and compare with layers from the original model
- auto origFromExecLayer = execLayer->params[ExecGraphInfoSerialization::ORIGINAL_NAMES];
- std::vector<std::string> origFromExecLayerSep = separateStrToVec(origFromExecLayer, ',');
-
- std::for_each(origFromExecLayerSep.begin(), origFromExecLayerSep.end(), [&](const std::string &layer) {
- auto origLayer = originalLayersMap.find(layer);
- ASSERT_NE(originalLayersMap.end(), origLayer) << layer;
- origLayer->second++;
- } );
- }
- // All layers from the original IR must be present within ExecGraphInfo
- for (auto& layer : originalLayersMap) {
- ASSERT_GT(layer.second, 0);
- }
- } else {
- // Not implemented for other plugins
- ASSERT_THROW(exeNetwork.GetExecGraphInfo(), InferenceEngineException);
- }
-}
-
-TEST_P(BehaviorPluginTestExecGraphInfo, CheckExecGraphInfoAfterExecution) {
- auto param = GetParam();
-
- TestEnv::Ptr testEnv;
- ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv,
- {{ PluginConfigParams::KEY_PERF_COUNT, PluginConfigParams::YES }}));
- ASSERT_NO_THROW(sts = testEnv->inferRequest->Infer(&response));
- ASSERT_EQ(StatusCode::OK, sts) << response.msg;
-
- auto cnnNetwork = testEnv->network;
- auto exeNetwork = testEnv->exeNetwork;
-
- if (param.device == CommonTestUtils::DEVICE_CPU || param.device == CommonTestUtils::DEVICE_GPU) {
- CNNNetwork execGraph;
- ASSERT_NO_THROW(execGraph = exeNetwork.GetExecGraphInfo());
-
- // Store all the original layers from the network
- const std::vector<CNNLayerPtr> originalLayers = CNNNetSortTopologically(cnnNetwork);
- std::map<std::string, int> originalLayersMap;
- for (const auto &layer : originalLayers) {
- originalLayersMap[layer->name] = 0;
- }
-
- // Store all the layers from the executable graph information represented as CNNNetwork
- const std::vector<CNNLayerPtr> execGraphLayers = CNNNetSortTopologically(execGraph);
- bool has_layer_with_valid_time = false;
- for (const auto &execLayer : execGraphLayers) {
- // At least one layer in the topology should be executed and have valid perf counter value
- try {
- float x = static_cast<float>(std::atof(execLayer->params[ExecGraphInfoSerialization::PERF_COUNTER].c_str()));
- ASSERT_GE(x, 0.0f);
- has_layer_with_valid_time = true;
- } catch (std::exception&) { }
-
- // Parse origin layer names (fused/merged layers) from the executable graph
- // and compare with layers from the original model
- auto origFromExecLayer = execLayer->params[ExecGraphInfoSerialization::ORIGINAL_NAMES];
- std::vector<std::string> origFromExecLayerSep = separateStrToVec(origFromExecLayer, ',');
-
- std::for_each(origFromExecLayerSep.begin(), origFromExecLayerSep.end(), [&](const std::string &layer) {
- auto origLayer = originalLayersMap.find(layer);
- ASSERT_NE(originalLayersMap.end(), origLayer) << layer;
- origLayer->second++;
- } );
- }
-
- ASSERT_TRUE(has_layer_with_valid_time);
-
- // All layers from the original IR must be present within ExecGraphInfo
- for (auto& layer : originalLayersMap) {
- ASSERT_GT(layer.second, 0);
- }
- } else {
- // Not implemented for other plugins
- ASSERT_THROW(exeNetwork.GetExecGraphInfo(), InferenceEngineException);
- }
-}
-
-TEST_P(BehaviorPluginTestExecGraphInfo, CheckExecGraphInfoSerialization) {
- auto param = GetParam();
-
- TestEnv::Ptr testEnv;
- ASSERT_NO_FATAL_FAILURE(_createAndCheckInferRequest(GetParam(), testEnv));
-
- auto cnnNetwork = testEnv->network;
- auto exeNetwork = testEnv->exeNetwork;
-
- if (param.device == CommonTestUtils::DEVICE_CPU || param.device == CommonTestUtils::DEVICE_GPU) {
- CNNNetwork execGraph;
- ASSERT_NO_THROW(execGraph = exeNetwork.GetExecGraphInfo());
- execGraph.serialize("exeNetwork.xml", "exeNetwork.bin");
- ASSERT_EQ(0, std::remove("exeNetwork.xml"));
- } else {
- // Not implemented for other plugins
- ASSERT_THROW(exeNetwork.GetExecGraphInfo(), InferenceEngineException);
- }
-}
+++ /dev/null
-// Copyright (C) 2018-2020 Intel Corporation
-// SPDX-License-Identifier: Apache-2.0
-//
-
-#include "behavior_test_plugin_exec_graph_info.hpp"
-#include "vpu_test_data.hpp"
-
-// TODO: currently this tests are not applicable to myriadPlugin
-#if 0
-INSTANTIATE_TEST_CASE_P(smoke_
- BehaviorTest,
- BehaviorPluginTestExecGraphInfo,
- ValuesIn(supportedValues),
- getTestCaseName);
-#endif
\ No newline at end of file