#include <tuple>
#include <set>
+#include <ie_icore.hpp>
#include <cpp/ie_cnn_network.h>
#include <details/caseless.hpp>
public:
using Ptr = std::shared_ptr<FrontEnd>;
- explicit FrontEnd(StageBuilder::Ptr stageBuilder);
+ explicit FrontEnd(StageBuilder::Ptr stageBuilder, const ie::ICore* core);
ModelPtr buildInitialModel(ie::ICNNNetwork& network);
private:
StageBuilder::Ptr _stageBuilder;
+ const ie::ICore* _core = nullptr;
IeParsedNetwork _ieParsedNetwork;
std::unordered_set<ie::DataPtr> _unbatchedOutputs;
#include <set>
#include <utility>
+#include <ie_icore.hpp>
#include <ie_icnn_network.hpp>
#include <details/caseless.hpp>
ie::ICNNNetwork& network,
Platform platform,
const CompilationConfig& config,
- const Logger::Ptr& log);
+ const Logger::Ptr& log,
+ const ie::ICore* core);
CompiledGraph::Ptr compileSubNetwork(
ie::ICNNNetwork& network,
- const CompilationConfig& subConfig);
+ const CompilationConfig& subConfig,
+ const ie::ICore* core);
//
// getSupportedLayers
const ie::ICNNNetwork& network,
Platform platform,
const CompilationConfig& config,
- const Logger::Ptr& log);
+ const Logger::Ptr& log,
+ const ie::ICore* core);
//
// Blob version and checks
[this](const Model& model, const ie::CNNLayerPtr& layer, const DataVector& inputs, const DataVector& outputs) \
{ functor_name(model, layer, inputs, outputs); }
-FrontEnd::FrontEnd(StageBuilder::Ptr stageBuilder)
- : _stageBuilder(std::move(stageBuilder))
- , parsers{{
+FrontEnd::FrontEnd(StageBuilder::Ptr stageBuilder, const ie::ICore* core)
+ : _stageBuilder(std::move(stageBuilder)),
+ _core(core),
+ parsers{{
{"Convolution", LAYER_PARSER(parseConvolution)},
{"Pooling", LAYER_PARSER(parsePooling)},
{"ReLU", LAYER_PARSER(parseReLU)},
{"StaticShapeReshape", LAYER_PARSER(parseReshape)},
{"Mish", LAYER_PARSER(parseMish)},
{"Gelu", LAYER_PARSER(parseGelu)},
- }} {}
+ }} {
+ VPU_THROW_UNLESS(_core != nullptr, "Argument core is null");
+ }
ModelPtr FrontEnd::buildInitialModel(ie::ICNNNetwork& network) {
VPU_PROFILE(buildInitialModel);
namespace {
-CompiledGraph::Ptr compileImpl(ie::ICNNNetwork& network) {
+CompiledGraph::Ptr compileImpl(ie::ICNNNetwork& network,
+ const ie::ICore* core) {
const auto& env = CompileEnv::get();
env.log->debug("Compile network [%s]", network.getName());
VPU_LOGGER_SECTION(env.log);
auto stageBuilder = std::make_shared<StageBuilder>();
- auto frontEnd = std::make_shared<FrontEnd>(stageBuilder);
+ auto frontEnd = std::make_shared<FrontEnd>(stageBuilder, core);
auto backEnd = std::make_shared<BackEnd>();
auto passManager = std::make_shared<PassManager>(stageBuilder, backEnd);
ie::ICNNNetwork& network,
Platform platform,
const CompilationConfig& config,
- const Logger::Ptr& log) {
+ const Logger::Ptr& log,
+ const ie::ICore* core) {
CompileEnv::init(platform, config, log);
AutoScope autoDeinit([] {
CompileEnv::free();
VPU_PROFILE(compileNetwork);
- return compileImpl(network);
+ return compileImpl(network, core);
}
CompiledGraph::Ptr compileModel(
CompiledGraph::Ptr compileSubNetwork(
ie::ICNNNetwork& network,
- const CompilationConfig& subConfig) {
+ const CompilationConfig& subConfig,
+ const ie::ICore* core) {
VPU_PROFILE(compileSubNetwork);
const auto& env = CompileEnv::get();
CompileEnv::updateConfig(subConfig);
- return compileImpl(network);
+ return compileImpl(network, core);
}
//
const ie::ICNNNetwork& network,
Platform platform,
const CompilationConfig& config,
- const Logger::Ptr& log) {
+ const Logger::Ptr& log,
+ const ie::ICore* core) {
CompileEnv::init(platform, config, log);
AutoScope autoDeinit([] {
CompileEnv::free();
VPU_PROFILE(getSupportedLayers);
auto stageBuilder = std::make_shared<StageBuilder>();
- auto frontEnd = std::make_shared<FrontEnd>(stageBuilder);
+ auto frontEnd = std::make_shared<FrontEnd>(stageBuilder, core);
auto clonedNetworkImpl = ie::cloneNet(network);
continue;
}
- int IN = inputDesc.dim(Dim::N);
+ int I_N = inputDesc.dim(Dim::N);
int IC = inputDesc.dim(Dim::C);
int ID = inputDesc.dim(Dim::D);
int IH = inputDesc.dim(Dim::H);
"but: KO=%d, OC=%d", KO, OC);
// check spacial dims of output
- int inputShape[] = {IW, IH, ID, IC, IN};
+ int inputShape[] = {IW, IH, ID, IC, I_N};
int outputShape[] = {OW, OH, OD, OC, ON};
int weightsShape[] = {KW, KH, KD, KI, KO};
for (int i = 0; i < 3; i++) {
// create subInputs[i], if it was not created previously
if (subInputs[i] == nullptr) {
auto postfix = formatString("@input_depth=%d/%d", i + 1, ID);
- DataDesc subInputsDesc(inputDesc.type(), DimsOrder::NCHW, {IW, IH, IC, IN});
+ DataDesc subInputsDesc(inputDesc.type(), DimsOrder::NCHW, {IW, IH, IC, I_N});
subInputs[i] = model->duplicateData(input, postfix, subInputsDesc);
}
continue; // this subInputs[d] is not needed
}
auto postfix = formatString("@input_depth=%d/%d", d + 1, ID);
- DataDesc subInputsDesc3D(inputDesc.type(), DimsOrder::NCDHW, {IW, IH, 1, IC, IN});
+ DataDesc subInputsDesc3D(inputDesc.type(), DimsOrder::NCDHW, {IW, IH, 1, IC, I_N});
subInputs3D[d] = model->duplicateData(input, postfix + "@3D", subInputsDesc3D);
_stageBuilder->addReshapeStage(model,
stage->name() + "@split",
VPU_THROW_UNLESS(inputDesc.type() == outputDesc.type(), "incompatible data types");
VPU_THROW_UNLESS(inputDesc.dimsOrder() == outputDesc.dimsOrder(), "incompatible dim orders");
- int IN = inputDesc.dim(Dim::N);
+ int I_N = inputDesc.dim(Dim::N);
int IC = inputDesc.dim(Dim::C);
int ID = inputDesc.dim(Dim::D);
int IH = inputDesc.dim(Dim::H);
int OH = outputDesc.dim(Dim::H);
int OW = outputDesc.dim(Dim::W);
- VPU_THROW_UNLESS(IN == ON, "incompatible: input batch=%d, output batch=%d", IN, ON);
+ VPU_THROW_UNLESS(I_N == ON, "incompatible: input batch=%d, output batch=%d", I_N, ON);
VPU_THROW_UNLESS(IC == OC, "incompatible: input channels=%d, output channels=%d", IC, OC);
// check spacial dims of output
- int inputShape[] = {IW, IH, ID, IC, IN};
+ int inputShape[] = {IW, IH, ID, IC, I_N};
int outputShape[] = {OW, OH, OD, OC, ON};
for (int i = 0; i < 3; i++) {
int expectedOutputSize = (inputShape[i]
// create subInputs[i], if it was not created previously
if (subInputs[i] == nullptr) {
auto postfix = formatString("@input_depth=%d/%d", i + 1, ID);
- DataDesc subInputsDesc(inputDesc.type(), DimsOrder::NCHW, {IW, IH, IC, IN});
+ DataDesc subInputsDesc(inputDesc.type(), DimsOrder::NCHW, {IW, IH, IC, I_N});
subInputs[i] = model->duplicateData(input, postfix, subInputsDesc);
}
continue; // this subInputs[d] is not needed
}
auto postfix = formatString("@input_depth=%d/%d", d + 1, ID);
- DataDesc subInputsDesc3D(inputDesc.type(), DimsOrder::NCDHW, {IW, IH, 1, IC, IN});
+ DataDesc subInputsDesc3D(inputDesc.type(), DimsOrder::NCDHW, {IW, IH, 1, IC, I_N});
subInputs3D[d] = model->duplicateData(input, postfix + "@3D", subInputsDesc3D);
_stageBuilder->addReshapeStage(model,
stage->name() + "@split",
ie::CNNNetwork loadSubNetwork(
const std::string& fileName,
- const std::pair<int, int>& imgSize, int* zdir_batchsize = nullptr) {
+ const std::pair<int, int>& imgSize,
+ const ie::ICore* core,
+ int* zdir_batchsize = nullptr) {
//
// Load network
//
- // ticket 30632 : replace with ICore interface
- InferenceEngine::Core reader;
- auto network = reader.ReadNetwork(fileName);
+ auto network = core->ReadNetwork(fileName, std::string());
//
// Set precision of input/output
// Convert p-nets
for (const auto& p_net_input : pyramid) {
- auto pNet = loadSubNetwork(pnet_ir_name, p_net_input);
- auto res = compileSubNetwork(pNet, env.config);
+ auto pNet = loadSubNetwork(pnet_ir_name, p_net_input, _core);
+ auto res = compileSubNetwork(pNet, env.config, _core);
mergedBlobSize += res->blob.size();
compiledSubNetworks.emplace_back(std::move(res));
}
int stage2_zdir_batchsize = 1;
// Convert r-net
{
- auto rNet = loadSubNetwork(rnet_ir_name, r_net_input, &stage2_zdir_batchsize);
- auto res = compileSubNetwork(rNet, env.config);
+ auto rNet = loadSubNetwork(rnet_ir_name, r_net_input, _core, &stage2_zdir_batchsize);
+ auto res = compileSubNetwork(rNet, env.config, _core);
mergedBlobSize += res->blob.size();
compiledSubNetworks.emplace_back(std::move(res));
}
// Convert o-net
{
- auto oNet = loadSubNetwork(onet_ir_name, o_net_input);
- auto res = compileSubNetwork(oNet, env.config);
+ auto oNet = loadSubNetwork(onet_ir_name, o_net_input, _core);
+ auto res = compileSubNetwork(oNet, env.config, _core);
mergedBlobSize += res->blob.size();
compiledSubNetworks.emplace_back(std::move(res));
}
ExecutableNetwork::ExecutableNetwork(
std::shared_ptr<IMvnc> mvnc,
std::vector<DevicePtr>& devicePool,
- const MyriadConfig& config) :
- _config(config) {
+ const MyriadConfig& config,
+ const ie::ICore* core) :
+ _config(config),
+ _core(core) {
VPU_PROFILE(ExecutableNetwork);
_log = std::make_shared<Logger>(
ICNNNetwork& network,
std::shared_ptr<IMvnc> mvnc,
std::vector<DevicePtr>& devicePool,
- const MyriadConfig& config) :
- ExecutableNetwork(std::move(mvnc), devicePool, config) {
+ const MyriadConfig& config,
+ const ie::ICore* core) :
+ ExecutableNetwork(std::move(mvnc), devicePool, config, core) {
VPU_PROFILE(ExecutableNetwork);
const auto compilerLog = std::make_shared<Logger>(
network,
static_cast<Platform>(_device->_platform),
_config.compileConfig(),
- compilerLog);
+ compilerLog,
+ _core);
_actualNumExecutors = compiledGraph->numExecutors;
_graphBlob = std::move(compiledGraph->blob);
ExecutableNetwork::ExecutableNetwork(std::istream& strm,
std::shared_ptr<IMvnc> mvnc,
std::vector<DevicePtr> &devicePool,
- const MyriadConfig& config) :
- ExecutableNetwork(std::move(mvnc), devicePool, config) {
+ const MyriadConfig& config,
+ const ie::ICore* core) :
+ ExecutableNetwork(std::move(mvnc), devicePool, config, core) {
VPU_PROFILE(ExecutableNetwork);
Import(strm, devicePool, config);
}
const std::string& blobFilename,
std::shared_ptr<IMvnc> mvnc,
std::vector<DevicePtr>& devicePool,
- const MyriadConfig& config) :
- ExecutableNetwork(std::move(mvnc), devicePool, config) {
+ const MyriadConfig& config,
+ const ie::ICore* core) :
+ ExecutableNetwork(std::move(mvnc), devicePool, config, core) {
VPU_PROFILE(ExecutableNetwork);
std::ifstream blobFile{blobFilename, std::ios::binary};
Import(blobFile, devicePool, config);
explicit ExecutableNetwork(InferenceEngine::ICNNNetwork &network,
std::shared_ptr<IMvnc> mvnc,
std::vector<DevicePtr> &devicePool,
- const MyriadConfig& config);
+ const MyriadConfig& config,
+ const ie::ICore* core);
explicit ExecutableNetwork(std::istream& strm,
std::shared_ptr<IMvnc> mvnc,
std::vector<DevicePtr> &devicePool,
- const MyriadConfig& config);
+ const MyriadConfig& config,
+ const ie::ICore* core);
explicit ExecutableNetwork(const std::string &blobFilename,
std::shared_ptr<IMvnc> mvnc,
std::vector<DevicePtr> &devicePool,
- const MyriadConfig& config);
+ const MyriadConfig& config,
+ const ie::ICore* core);
virtual ~ExecutableNetwork() {
DevicePtr _device;
GraphMetaInfo _graphMetaData;
MyriadConfig _config;
+ const ie::ICore* _core = nullptr;
int _actualNumExecutors = 0;
std::vector<std::string> _supportedMetrics;
ExecutableNetwork(std::shared_ptr<IMvnc> mvnc,
std::vector<DevicePtr> &devicePool,
- const MyriadConfig& config);
+ const MyriadConfig& config,
+ const ie::ICore* core);
InferenceEngine::ITaskExecutor::Ptr getNextTaskExecutor() {
std::string id = _taskExecutorGetResultIds.front();
vpu::EliminateShapeOfAfterDSR().run_on_function(function);
}
- return std::make_shared<ExecutableNetwork>(*clonedNetwork, _mvnc, _devicePool, parsedConfigCopy);
+ return std::make_shared<ExecutableNetwork>(*clonedNetwork,
+ _mvnc, _devicePool,
+ parsedConfigCopy, GetCore());
}
void Engine::SetConfig(const std::map<std::string, std::string> &config) {
network,
static_cast<Platform>(parsedConfigCopy.platform()),
parsedConfigCopy.compileConfig(),
- log);
+ log,
+ GetCore());
for (const auto& layerName : layerNames) {
res.supportedLayersMap.insert({ layerName, GetName() });
const auto executableNetwork =
std::make_shared<ExecutableNetwork>(
- model, _mvnc, _devicePool, parsedConfigCopy);
+ model, _mvnc, _devicePool, parsedConfigCopy, GetCore());
return InferenceEngine::ExecutableNetwork{IExecutableNetwork::Ptr(
new ExecutableNetworkBase<ExecutableNetworkInternal>(executableNetwork),
--- /dev/null
+// Copyright (C) 2018-2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <gmock/gmock.h>
+#include "ie_icore.hpp"
+
+class MockICore : public InferenceEngine::ICore {
+public:
+ MOCK_QUALIFIED_METHOD0(GetTaskExecutor, const, std::shared_ptr<InferenceEngine::ITaskExecutor>());
+
+ MOCK_QUALIFIED_METHOD2(ReadNetwork, const, InferenceEngine::CNNNetwork(const std::string&, const InferenceEngine::Blob::CPtr&));
+ MOCK_QUALIFIED_METHOD2(ReadNetwork, const, InferenceEngine::CNNNetwork(const std::string&, const std::string&));
+
+ MOCK_METHOD3(LoadNetwork, InferenceEngine::ExecutableNetwork(
+ const InferenceEngine::CNNNetwork&, const std::string&, const std::map<std::string, std::string>&));
+
+ MOCK_METHOD3(ImportNetwork, InferenceEngine::ExecutableNetwork(
+ std::istream&, const std::string&, const std::map<std::string, std::string>&));
+
+ MOCK_QUALIFIED_METHOD3(QueryNetwork, const, InferenceEngine::QueryNetworkResult(
+ const InferenceEngine::ICNNNetwork&, const std::string&, const std::map<std::string, std::string>&));
+
+ MOCK_QUALIFIED_METHOD2(GetMetric, const, InferenceEngine::Parameter(const std::string&, const std::string&));
+
+ ~MockICore() = default;
+};
LINK_LIBRARIES
vpu_graph_transformer_test_static
unitTestUtils
+ ngraphFunctions
mvnc
ADD_CPPLINT
LABELS
consoleOutput());
stageBuilder = std::make_shared<StageBuilder>();
- frontEnd = std::make_shared<FrontEnd>(stageBuilder);
+ frontEnd = std::make_shared<FrontEnd>(stageBuilder, &_mockCore);
backEnd = std::make_shared<BackEnd>();
passManager = std::make_shared<PassManager>(stageBuilder, backEnd);
}
#pragma once
+#include <list>
+
+#include <gtest/gtest.h>
+
#include <vpu/compile_env.hpp>
#include <vpu/model/stage.hpp>
#include <vpu/model/model.hpp>
#include <vpu/backend/backend.hpp>
#include <vpu/utils/ie_helpers.hpp>
-#include <gtest/gtest.h>
-
-#include <list>
+#include <unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp>
namespace vpu {
TestModel CreateTestModel();
private:
+ MockICore _mockCore;
Logger::Ptr _log;
std::list<ModelPtr> _models;
};
//
#include <gtest/gtest.h>
-#include <tests_common.hpp>
#include <memory>
#include <ie_common.h>
#include <vpu/graph_transformer.hpp>
#include <vpu/utils/logger.hpp>
-#include <myriad_plugin/myriad_config.h>
#include <ngraph/op/util/attr_types.hpp>
#include <ngraph_functions/subgraph_builders.hpp>
+#include <unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp>
+
using namespace ::testing;
using namespace vpu;
using namespace InferenceEngine;
-class VPUBlobReaderHeaderTests: public TestsCommon, public testing::WithParamInterface<std::vector<size_t>> {
+class VPUBlobReaderHeaderTests: public ::testing::Test, public testing::WithParamInterface<std::vector<size_t>> {
private:
std::vector<size_t> inputShape;
CompilationConfig compileConfig;
auto log = std::make_shared<Logger>("GraphCompiler", LogLevel::None, consoleOutput());
- _compiledGraph = compileNetwork(_network, Platform::MYRIAD_X, compileConfig, log);
+ _compiledGraph = compileNetwork(_network, Platform::MYRIAD_X, compileConfig, log, &_mockCore);
}
CNNNetwork _network;
CompiledGraph::Ptr _compiledGraph;
+ MockICore _mockCore;
};
TEST_P(VPUBlobReaderHeaderTests, canReadCorrectMagicNumber) {
auto actualDims = actual.second->getTensorDesc().getDims();
size_t actualTotalSize = std::accumulate(actualDims.begin(), actualDims.end(), 1, std::multiplies<size_t>());
- ASSERT_TRUE(expectedNetworkInputs.count(actual.first) > 0);
+ ASSERT_GT(expectedNetworkInputs.count(actual.first), 0);
auto expectedDims = expectedNetworkInputs[actual.first]->getTensorDesc().getDims();
size_t expectedTotalSize = std::accumulate(expectedDims.begin(), expectedDims.end(), 1, std::multiplies<size_t>());
auto expectedNetworkInputs = _network.getInputsInfo();
for (auto&& actual : parsedNetworkInputs) {
- ASSERT_TRUE(expectedNetworkInputs.count(actual.first) > 0);
+ ASSERT_GT(expectedNetworkInputs.count(actual.first), 0);
}
for (auto&& expected : expectedNetworkInputs) {
- ASSERT_TRUE(parsedNetworkInputs.count(expected.first) > 0);
+ ASSERT_GT(parsedNetworkInputs.count(expected.first), 0);
}
}
auto actualDims = actual.second->getDims();
size_t actualTotalSize = std::accumulate(actualDims.begin(), actualDims.end(), 1, std::multiplies<size_t>());
- ASSERT_TRUE(expectedNetworkOutputs.count(actual.first) > 0);
+ ASSERT_GT(expectedNetworkOutputs.count(actual.first), 0);
auto expectedDims = expectedNetworkOutputs[actual.first]->getDims();
size_t expectedTotalSize = std::accumulate(expectedDims.begin(), expectedDims.end(), 1, std::multiplies<size_t>());
auto expectedNetworkOutputs = _network.getOutputsInfo();
for (auto&& actual : parsedNetworkOutputs) {
- ASSERT_TRUE(expectedNetworkOutputs.count(actual.first) > 0);
+ ASSERT_GT(expectedNetworkOutputs.count(actual.first), 0);
}
for (auto&& expected : expectedNetworkOutputs) {
- ASSERT_TRUE(parsedNetworkOutputs.count(expected.first) > 0);
+ ASSERT_GT(parsedNetworkOutputs.count(expected.first), 0);
}
}
vpuLayersTests::SetUp();
_stageBuilder = std::make_shared<StageBuilder>();
- _frontEnd = std::make_shared<FrontEnd>(_stageBuilder);
- _backEnd = std::make_shared<BackEnd>();
- _passManager = std::make_shared<PassManager>(_stageBuilder, _backEnd);
_platform = CheckMyriadX() ? Platform::MYRIAD_X : Platform::MYRIAD_2;
}
private:
vpu::Platform _platform = vpu::Platform::MYRIAD_X;
- vpu::FrontEnd::Ptr _frontEnd;
- vpu::PassManager::Ptr _passManager;
- vpu::BackEnd::Ptr _backEnd;
InferenceEngine::ExecutableNetwork _executableNetwork;
};
consoleOutput());
stageBuilder = std::make_shared<StageBuilder>();
- frontEnd = std::make_shared<FrontEnd>(stageBuilder);
+ frontEnd = std::make_shared<FrontEnd>(stageBuilder, &_mockCore);
backEnd = std::make_shared<BackEnd>();
passManager = std::make_shared<PassManager>(stageBuilder, backEnd);
}
#include <vpu/middleend/pass_manager.hpp>
#include <vpu/backend/backend.hpp>
+#include <unit_test_utils/mocks/cpp_interfaces/interface/mock_icore.hpp>
+
namespace vpu {
template <class Cont, class Cond>
TestModel CreateTestModel(const DataDesc& dataDesc);
private:
+ MockICore _mockCore;
Logger::Ptr _log;
std::list<ModelPtr> _models;
};