--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "multi-device/multi_device_config.hpp"
+
+#include "behavior/preprocessing.hpp"
+
+using namespace BehaviorTestsDefinitions;
+
+namespace {
+
+const std::vector<InferenceEngine::Precision> inputPrecisions = {
+ InferenceEngine::Precision::U8,
+ InferenceEngine::Precision::FP32
+};
+
+const std::vector<std::map<std::string, std::string>> configs = {
+ {}
+};
+
+INSTANTIATE_TEST_CASE_P(PreprocessingPrecisionConvertTests, PreprocessingPrecisionConvertTest,
+ ::testing::Combine(
+ ::testing::ValuesIn(inputPrecisions),
+ ::testing::Values("TEMPLATE"),
+ ::testing::ValuesIn(configs)),
+ PreprocessingPrecisionConvertTest::getTestCaseName);
+
+} // namespace
namespace {
-template <typename T>
-void copyToFloat(float* dst, const InferenceEngine::Blob* src) {
+template <typename T, typename DstT>
+void copyFrom(const InferenceEngine::Blob* src, DstT* dst) {
if (!dst) {
return;
}
for (size_t i = 0; i < t_blob->size(); i++) dst[i] = srcPtr[i];
}
+template <typename T>
+void copyToFloat(float* dst, const InferenceEngine::Blob* src) {
+ copyFrom<T>(src, dst);
+}
+
} // namespace
void MKLDNNPlugin::MKLDNNInferRequest::InferImpl() {
case InferenceEngine::Precision::I8:
pushInput<int8_t>(input.first, input.second);
break;
- case InferenceEngine::Precision::U16:
- // U16 is unsupported by mkldnn, so here we convert the blob and send FP32
- iconv = InferenceEngine::make_shared_blob<float>({InferenceEngine::Precision::FP32,
+ case InferenceEngine::Precision::U16: {
+ // U16 is unsupported by mkldnn, so here we convert the blob and send I32
+ iconv = InferenceEngine::make_shared_blob<std::int32_t>({InferenceEngine::Precision::I32,
input.second->getTensorDesc().getDims(),
input.second->getTensorDesc().getLayout()});
convertedInputs.push_back(iconv);
iconv->allocate();
- in_f = dynamic_cast<InferenceEngine::TBlob<float> *>(iconv.get());
- if (in_f == nullptr)
+ auto in = dynamic_cast<InferenceEngine::TBlob<std::int32_t> *>(iconv.get());
+ if (in == nullptr)
THROW_IE_EXCEPTION << "Cannot get TBlob";
- copyToFloat<uint16_t>(in_f->data(), input.second.get());
- pushInput<float>(input.first, iconv);
+ copyFrom<uint16_t, std::int32_t>(input.second.get(), in->data());
+ pushInput<std::int32_t>(input.first, iconv);
+ }
break;
case InferenceEngine::Precision::I16:
if (graph->hasMeanImageFor(input.first)) {
DataConfigurator(ConfLayout l, bool constant, int inplace = -1):
layout(l), constant(constant), inplace(inplace) {}
+ DataConfigurator(ConfLayout l, Precision::ePrecision prc):
+ layout(l), prc(prc) {}
+
ConfLayout layout;
bool constant = false;
int inplace = -1;
+ Precision::ePrecision prc = Precision::UNSPECIFIED; // by default use the layer precision
};
void addConfig(const CNNLayer* layer, std::vector<DataConfigurator> in_l,
// fixing of BF16 precisions where they are - layers naturally support only FP32
// if we see BF16, that means another floating point format which will be converted by reorder
// added by current mkl-dnn cpu plugin when it figure out diff in data types on input and output of edges
- InferenceEngine::Precision precision = data_desc.getPrecision();
+ InferenceEngine::Precision precision = (conf.prc == Precision::UNSPECIFIED) ? data_desc.getPrecision() : Precision(conf.prc);
if (precision == Precision::BF16) {
precision = Precision::FP32;
}
srcStrides = layer->insData[REVERSESEQUENCE_DATA].lock()->getTensorDesc().getBlockingDesc().getStrides();
work_amount_dst = srcStrides[0] * src_dims[0];
- addConfig(layer, { DataConfigurator(ConfLayout::PLN), DataConfigurator(ConfLayout::PLN) }, { DataConfigurator(ConfLayout::PLN) });
+ addConfig(layer,
+ { DataConfigurator(ConfLayout::PLN, Precision::FP32), DataConfigurator(ConfLayout::PLN) },
+ { DataConfigurator(ConfLayout::PLN, Precision::FP32) });
} catch (InferenceEngine::details::InferenceEngineException &ex) {
errorMsg = ex.what();
}
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "multi-device/multi_device_config.hpp"
+
+#include "behavior/preprocessing.hpp"
+
+using namespace BehaviorTestsDefinitions;
+
+namespace {
+
+const std::vector<InferenceEngine::Precision> inputPrecisions = {
+ InferenceEngine::Precision::U16,
+ InferenceEngine::Precision::FP32
+};
+
+const std::vector<std::map<std::string, std::string>> configs = {
+ {}
+};
+
+INSTANTIATE_TEST_CASE_P(smoke_BehaviourPreprocessingTests, PreprocessingPrecisionConvertTest,
+ ::testing::Combine(
+ ::testing::ValuesIn(inputPrecisions),
+ ::testing::Values(CommonTestUtils::DEVICE_CPU),
+ ::testing::ValuesIn(configs)),
+ PreprocessingPrecisionConvertTest::getTestCaseName);
+
+} // namespace
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+
+#include <ie_core.hpp>
+#include "common_test_utils/test_assertions.hpp"
+#include "common_test_utils/common_utils.hpp"
+#include "functional_test_utils/plugin_cache.hpp"
+#include "functional_test_utils/layer_test_utils.hpp"
+#include "functional_test_utils/blob_utils.hpp"
+#include "ie_preprocess.hpp"
+#include "functional_test_utils/behavior_test_utils.hpp"
+
+namespace {
+void setInputNetworkPrecision(InferenceEngine::CNNNetwork &network, InferenceEngine::InputsDataMap &inputs_info,
+ InferenceEngine::Precision input_precision) {
+ inputs_info = network.getInputsInfo();
+ ASSERT_EQ(1u, inputs_info.size());
+ inputs_info.begin()->second->setPrecision(input_precision);
+}
+
+}
+
+namespace BehaviorTestsDefinitions {
+
+using PreprocessingPrecisionConvertParams = std::tuple<
+ InferenceEngine::Precision, // Input precision
+ std::string, // Device name
+ std::map<std::string, std::string> // Config
+>;
+
+struct PreprocessingPrecisionConvertTest :
+ public testing::WithParamInterface<PreprocessingPrecisionConvertParams>,
+ LayerTestsUtils::LayerTestsCommon {
+public:
+ static std::string getTestCaseName(testing::TestParamInfo<PreprocessingPrecisionConvertParams> obj) {
+ InferenceEngine::Precision inPrc;
+ std::string targetDevice;
+ std::map<std::string, std::string> configuration;
+ std::tie(inPrc, targetDevice, configuration) = obj.param;
+ std::ostringstream result;
+ result << "inPRC=" << inPrc.name() << "_";
+ result << "targetDevice=" << targetDevice;
+ if (!configuration.empty()) {
+ for (auto& configItem : configuration) {
+ result << "configItem=" << configItem.first << "_" << configItem.second << "_";
+ }
+ }
+ return result.str();
+ }
+
+ void SetUp() override {
+ // This test:
+ // - Strive to test the plugin internal preprocessing (precision conversion) only.
+ // Thus (logically) no-op graph is used.
+ // - Reference code mimic the preprocessing via extra ngraph Convert operation.
+ // - Create/uses two (different) graphs here : one to feed the the plugin and one calculate the reference result.
+
+ SetRefMode(LayerTestsUtils::RefMode::INTERPRETER);
+
+ std::tie(inPrc, targetDevice, configuration) = this->GetParam();
+
+ bool specialZero = true;
+
+ std::vector<size_t> inputShape {4, 4};
+
+ auto make_ngraph = [&](bool with_extra_conv) {
+ auto in_prec = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(with_extra_conv ? inPrc : decltype(inPrc)(InferenceEngine::Precision::FP32));
+ auto paramsIn = ngraph::builder::makeParams(in_prec, {inputShape});
+ auto paramIn = ngraph::helpers::convert2OutputVector(
+ ngraph::helpers::castOps2Nodes<ngraph::op::Parameter>(paramsIn));
+
+ auto toF32 = std::make_shared<ngraph::opset1::Convert>(paramIn[0], ngraph::element::Type_t::f32);
+
+ auto constNode = std::make_shared<ngraph::opset1::Constant>(
+ ngraph::element::Type_t::i64, ngraph::Shape{inputShape.size()}, inputShape);
+ auto reshape = std::dynamic_pointer_cast<ngraph::opset1::Reshape>(
+ std::make_shared<ngraph::opset1::Reshape>(with_extra_conv ? toF32 : paramIn[0], constNode, specialZero));
+ ngraph::ResultVector results{std::make_shared<ngraph::opset1::Result>(reshape)};
+ return std::make_shared<ngraph::Function>(results, paramsIn, "Reshape");
+ };
+
+ function = make_ngraph(false);
+ reference_function = make_ngraph(true); //use extra ops to mimic the preprocessing
+ }
+
+ void Validate() override {
+ //force the reference implementation to use graph with extra Convert operation
+ function = reference_function;
+ LayerTestsUtils::LayerTestsCommon::Validate();
+ }
+
+public:
+ std::shared_ptr<InferenceEngine::Core> ie = PluginCache::get().ie();
+ std::shared_ptr<ngraph::Function> reference_function;
+};
+
+
+TEST_P(PreprocessingPrecisionConvertTest, InternalPluginPrecisionConvert) {
+ // Skip test according to plugin specific disabledTestPatterns() (if any)
+ SKIP_IF_CURRENT_TEST_IS_DISABLED()
+ Run();
+}
+} // namespace BehaviorTestsDefinitions
np.int32,
pytest.param(np.int64, marks=xfail_issue_35926),
pytest.param(np.uint8, marks=xfail_issue_36479),
- pytest.param(np.uint16, marks=xfail_issue_36479),
+ np.uint16,
pytest.param(np.uint32, marks=xfail_issue_36476),
pytest.param(np.uint64, marks=xfail_issue_36478),
],