{}
};
-INSTANTIATE_TEST_CASE_P(PreprocessingPrecisionConvertTests, PreprocessingPrecisionConvertTest,
+INSTANTIATE_TEST_CASE_P(PreprocessingPrecisionConvertTestsViaSetInput, PreprocessingPrecisionConvertTest,
::testing::Combine(
::testing::ValuesIn(inputPrecisions),
+ ::testing::Values(1, 2, 3, 4, 5), // Number of input tensor channels
+ ::testing::Values(true), // Use SetInput
+ ::testing::Values("TEMPLATE"),
+ ::testing::ValuesIn(configs)),
+ PreprocessingPrecisionConvertTest::getTestCaseName);
+
+INSTANTIATE_TEST_CASE_P(PreprocessingPrecisionConvertTestsViaGetBlob, PreprocessingPrecisionConvertTest,
+ ::testing::Combine(
+ ::testing::ValuesIn(inputPrecisions),
+ ::testing::Values(4, 5), // Number of input tensor channels (blob_copy only supports 4d and 5d tensors)
+ ::testing::Values(false), // use GetBlob
::testing::Values("TEMPLATE"),
::testing::ValuesIn(configs)),
PreprocessingPrecisionConvertTest::getTestCaseName);
{}
};
-INSTANTIATE_TEST_CASE_P(smoke_BehaviourPreprocessingTests, PreprocessingPrecisionConvertTest,
+INSTANTIATE_TEST_CASE_P(BehaviourPreprocessingTestsViaSetInput, PreprocessingPrecisionConvertTest,
::testing::Combine(
::testing::ValuesIn(inputPrecisions),
+ ::testing::Values(1, 2, 3, 4, 5), // Number of input tensor channels
+ ::testing::Values(true), // Use SetInput
::testing::Values(CommonTestUtils::DEVICE_CPU),
::testing::ValuesIn(configs)),
PreprocessingPrecisionConvertTest::getTestCaseName);
+INSTANTIATE_TEST_CASE_P(BehaviourPreprocessingTestsViaGetBlob, PreprocessingPrecisionConvertTest,
+ ::testing::Combine(
+ ::testing::ValuesIn(inputPrecisions),
+ ::testing::Values(4, 5), // Number of input tensor channels (blob_copy only supports 4d and 5d tensors)
+ ::testing::Values(false), // use GetBlob
+ ::testing::Values(CommonTestUtils::DEVICE_CPU),
+ ::testing::ValuesIn(configs)),
+ PreprocessingPrecisionConvertTest::getTestCaseName);
} // namespace
using PreprocessingPrecisionConvertParams = std::tuple<
InferenceEngine::Precision, // Input precision
+ unsigned, // channels number
+ bool, // Use normal (i.e. SetInput() or unusal i.e. GetBlob()) inut method
std::string, // Device name
std::map<std::string, std::string> // Config
>;
public:
static std::string getTestCaseName(testing::TestParamInfo<PreprocessingPrecisionConvertParams> obj) {
InferenceEngine::Precision inPrc;
+ bool useSetInput;
+ unsigned channels;
std::string targetDevice;
std::map<std::string, std::string> configuration;
- std::tie(inPrc, targetDevice, configuration) = obj.param;
+ std::tie(inPrc, channels, useSetInput, targetDevice, configuration) = obj.param;
std::ostringstream result;
result << "inPRC=" << inPrc.name() << "_";
+ result << channels << "Ch" << "_";
+ result << (useSetInput ? "SetInput" : "GetBlob") << "_";
result << "targetDevice=" << targetDevice;
if (!configuration.empty()) {
for (auto& configItem : configuration) {
return result.str();
}
+ // Need to override Infer() due to usage of GetBlob() as input method.
+ // Mostly a copy of LayerTestsCommon::Infer()
+ void Infer() override {
+ inferRequest = executableNetwork.CreateInferRequest();
+ inputs.clear();
+
+ for (const auto &input : executableNetwork.GetInputsInfo()) {
+ const auto &info = input.second;
+ auto blob = GenerateInput(*info);
+ if (!use_set_input) {
+ InferenceEngine::Blob::Ptr input = inferRequest.GetBlob(info->name());
+ blob_copy(blob, input);
+ } else {
+ inferRequest.SetBlob(info->name(), blob);
+ }
+
+ inputs.push_back(blob);
+ }
+ if (configuration.count(InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED) &&
+ configuration.count(InferenceEngine::PluginConfigParams::YES)) {
+ auto batchSize = executableNetwork.GetInputsInfo().begin()->second->getTensorDesc().getDims()[0] / 2;
+ inferRequest.SetBatch(batchSize);
+ }
+ inferRequest.Infer();
+ }
+
void SetUp() override {
// This test:
// - Strive to test the plugin internal preprocessing (precision conversion) only.
SetRefMode(LayerTestsUtils::RefMode::INTERPRETER);
- std::tie(inPrc, targetDevice, configuration) = this->GetParam();
+ std::tie(inPrc, channels, use_set_input, targetDevice, configuration) = this->GetParam();
bool specialZero = true;
- std::vector<size_t> inputShape {4, 4};
+ std::vector<size_t> inputShape(channels, 4);
auto make_ngraph = [&](bool with_extra_conv) {
auto in_prec = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(with_extra_conv ? inPrc : decltype(inPrc)(InferenceEngine::Precision::FP32));
public:
std::shared_ptr<InferenceEngine::Core> ie = PluginCache::get().ie();
std::shared_ptr<ngraph::Function> reference_function;
+ bool use_set_input = true;
+ unsigned channels = 0;
};