# Default packages
#
-set(FIRMWARE_PACKAGE_VERSION 1445)
+set(FIRMWARE_PACKAGE_VERSION 1452)
set(VPU_CLC_MA2X8X_VERSION "movi-cltools-20.09.1")
#
void parseSwish(const Model& model, const ie::CNNLayerPtr& layer, const DataVector& inputs, const DataVector& outputs) const;
void parseActivation(const Model& model, const ie::CNNLayerPtr& layer, const DataVector& inputs, const DataVector& outputs) const;
void parseLogicalNot(const Model& model, const ie::CNNLayerPtr& layer, const DataVector& inputs, const DataVector& outputs) const;
+ void parseGatherND(const Model& model, const ie::CNNLayerPtr& layer, const DataVector& inputs, const DataVector& outputs) const;
void parseHSwish(const Model& model, const ie::CNNLayerPtr& layer, const DataVector& inputs, const DataVector& outputs) const;
//
StridedSlice = 133,
SoftPlus = 134,
Swish = 135,
+ GatherND = 136,
HSwish = 137,
)
const Data& input0,
const Data& input1,
const Data& output);
+
+ Stage addGatherNDStage(
+ const Model& model,
+ const std::string& name,
+ const ie::CNNLayerPtr& layer,
+ const Data& input,
+ const Data& indices,
+ const Data& output,
+ int32_t batch_dims);
};
} // namespace vpu
{"SoftPlus", LAYER_PARSER(parseSoftPlus)},
{"Swish", LAYER_PARSER(parseSwish)},
{"Activation", LAYER_PARSER(parseActivation)},
+ {"GatherND", LAYER_PARSER(parseGatherND)},
{"HSwish", LAYER_PARSER(parseHSwish)},
}} {
VPU_THROW_UNLESS(_core != nullptr, "Argument core is null");
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vpu/frontend/frontend.hpp>
+
+#include <memory>
+#include <string>
+
+namespace vpu {
+
+namespace {
+
+class GatherNDStage final : public StageNode {
+public:
+ using StageNode::StageNode;
+
+protected:
+ StagePtr cloneImpl() const override {
+ return std::make_shared<GatherNDStage>(*this);
+ }
+
+ void propagateDataOrderImpl(StageDataInfo<DimsOrder> &orderInfo) override {
+ const auto input1 = inputEdge(0)->input();
+ const auto input2 = inputEdge(1)->input();
+ const auto output = outputEdge(0)->output();
+
+ orderInfo.setInput(inputEdge(0),
+ DimsOrder::fromNumDims(input1->desc().numDims()));
+ orderInfo.setInput(inputEdge(1),
+ DimsOrder::fromNumDims(input2->desc().numDims()));
+ orderInfo.setOutput(outputEdge(0),
+ DimsOrder::fromNumDims(output->desc().numDims()));
+ }
+
+ void getDataStridesRequirementsImpl(
+ StageDataInfo<StridesRequirement> &stridesInfo) override {
+ for (const auto &inEdge : inputEdges()) {
+ stridesInfo.setInput(inEdge, StridesRequirement::compact());
+ }
+ stridesInfo.setOutput(outputEdge(0), StridesRequirement::compact());
+ }
+
+ void finalizeDataLayoutImpl() override {}
+
+ void
+ getBatchSupportInfoImpl(StageDataInfo<BatchSupport> &batchInfo) override {}
+
+ StageSHAVEsRequirements getSHAVEsRequirementsImpl() const override {
+ return StageSHAVEsRequirements::NotNeeded;
+ }
+
+ void initialCheckImpl() const override {
+ VPU_THROW_UNLESS(numInputs() == 2,
+ "{} stage with name {} must have only 1 output, actually "
+ "provided {} inputs",
+ type(), name(), numInputs());
+ VPU_THROW_UNLESS(numOutputs() == 1,
+ "{} stage with name {} must have only 1 output, actually "
+ "provided {} outputs",
+ type(), name(), numInputs());
+ VPU_THROW_UNLESS(inputs()[0]->desc().type() == outputs()[0]->desc().type(),
+ "First input and output must have the same DataType, "
+ "actual input type is {} and output type is {}",
+ inputs()[0]->desc().type(), outputs()[0]->desc().type());
+ assertInputsOutputsTypes(
+ this, {{DataType::U8, DataType::FP16, DataType::S32}, {DataType::S32}},
+ {{DataType::U8, DataType::FP16, DataType::S32}});
+ }
+
+ void serializeParamsImpl(BlobSerializer &serializer) const override {
+ const auto batchDims = attrs().get<int32_t>("batch_dims");
+ serializer.append(batchDims);
+ }
+
+ void serializeDataImpl(BlobSerializer &serializer) const override {
+ auto input0 = inputEdge(0)->input();
+ auto input1 = inputEdge(1)->input();
+ auto output = outputEdge(0)->output();
+
+ input0->serializeBuffer(serializer);
+ output->serializeBuffer(serializer);
+ input1->serializeBuffer(serializer);
+ }
+};
+
+}// namespace
+
+Stage StageBuilder::addGatherNDStage(const Model &model,
+ const std::string &name,
+ const ie::CNNLayerPtr &layer,
+ const Data &input, const Data &indices,
+ const Data &output, int32_t batchDims) {
+ auto stage = model->addNewStage<GatherNDStage>(
+ layer->name, StageType::GatherND, layer, {input, indices}, {output});
+
+ stage->attrs().set<int32_t>("batch_dims", batchDims);
+
+ return stage;
+}
+
+void FrontEnd::parseGatherND(const Model &model, const ie::CNNLayerPtr &layer,
+ const DataVector &inputs,
+ const DataVector &outputs) const {
+ VPU_THROW_UNLESS(layer, "CNNLayer pointer is null.");
+ VPU_THROW_UNLESS(inputs.size() == 2,
+ "{} layer with name {} must have only 1 output, actually "
+ "provided {} outputs",
+ layer->type, layer->name, inputs.size());
+ VPU_THROW_UNLESS(outputs.size() == 1,
+ "{} layer with name {} must have only 1 output, actually "
+ "provided {} outputs",
+ layer->type, layer->name, outputs.size());
+
+ const auto batchDims = layer->GetParamAsInt("batch_dims", 0);
+
+ _stageBuilder->addGatherNDStage(model, layer->name, layer, inputs[0],
+ inputs[1], outputs[0], batchDims);
+}
+
+}// namespace vpu
gatherNDArgsSubset1,
::testing::ValuesIn(dPrecisions),
::testing::ValuesIn(iPrecisions),
- ::testing::Values(CommonTestUtils::DEVICE_CPU)),
+ ::testing::Values(CommonTestUtils::DEVICE_CPU),
+ ::testing::Values<Config>({})),
GatherNDLayerTest::getTestCaseName);
const auto gatherNDArgsSubset2 = ::testing::Combine(
gatherNDArgsSubset2,
::testing::ValuesIn(dPrecisions),
::testing::ValuesIn(iPrecisions),
- ::testing::Values(CommonTestUtils::DEVICE_CPU)),
+ ::testing::Values(CommonTestUtils::DEVICE_CPU),
+ ::testing::Values<Config>({})),
GatherNDLayerTest::getTestCaseName);
} // namespace
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include "single_layer_tests/gather_nd.hpp"
+#include <vpu/private_plugin_config.hpp>
+
+using namespace LayerTestsDefinitions;
+
+namespace {
+
+const std::vector<InferenceEngine::Precision> netPrecisions = {
+ InferenceEngine::Precision::U8,
+ InferenceEngine::Precision::FP16,
+ InferenceEngine::Precision::FP32,
+};
+
+const std::vector<InferenceEngine::Precision> indicesPrecisions = {
+ InferenceEngine::Precision::I32,
+};
+
+const std::vector<GatherNDParamsSubset> layerParams = {
+ // ngraph examples
+ // N = 1000: Not enough memory. replaced with 500
+ // Probably always calculating with FP32 precision
+ GatherNDParamsSubset{{500, 256, 10, 15}, {25, 125, 3}, 0},
+ GatherNDParamsSubset{{30, 2, 100, 35}, {30, 2, 3, 1}, 2},
+ // some random tests
+ GatherNDParamsSubset{{3, 3}, {2, 2}, 0},
+ GatherNDParamsSubset{{5, 3}, {2, 1}, 0},
+ GatherNDParamsSubset{{5, 3, 4}, {2, 2}, 0},
+ GatherNDParamsSubset{{6, 3, 4}, {2, 1, 2}, 0},
+ GatherNDParamsSubset{{5, 2, 6, 8}, {1}, 0},
+ GatherNDParamsSubset{{6, 6, 9, 7}, {2}, 0},
+ GatherNDParamsSubset{{2, 4, 9, 4}, {3}, 0},
+ GatherNDParamsSubset{{5, 2, 3, 7}, {4}, 0},
+ GatherNDParamsSubset{{2, 2, 2}, {2, 1}, 1},
+ GatherNDParamsSubset{{2, 2, 2, 2}, {2, 1}, 1},
+ GatherNDParamsSubset{{2, 2, 2, 2}, {2, 2, 1}, 2},
+};
+
+INSTANTIATE_TEST_CASE_P(
+ smoke_GatherND,
+ GatherNDLayerTest,
+ testing::Combine(
+ testing::ValuesIn(layerParams),
+ testing::ValuesIn(netPrecisions),
+ testing::ValuesIn(indicesPrecisions),
+ testing::Values(CommonTestUtils::DEVICE_MYRIAD),
+ testing::Values<Config>({{InferenceEngine::MYRIAD_DETECT_NETWORK_BATCH, CONFIG_VALUE(NO)}})),
+ GatherNDLayerTest::getTestCaseName);
+
+} // namespace
#include "functional_test_utils/layer_test_utils.hpp"
+using Config = std::map<std::string, std::string>;
typedef std::tuple<
std::vector<size_t>, // Data shapes
GatherNDParamsSubset,
InferenceEngine::Precision, // Data precision
InferenceEngine::Precision, // Indices precision
- LayerTestsUtils::TargetDevice // Device name
+ LayerTestsUtils::TargetDevice, // Device name
+ Config // Plugin config
> GatherNDParams;
namespace LayerTestsDefinitions {
class GatherNDLayerTest : public testing::WithParamInterface<GatherNDParams>,
- public LayerTestsUtils::LayerTestsCommon {
+ public LayerTestsUtils::LayerTestsCommon {
public:
static std::string getTestCaseName(const testing::TestParamInfo<GatherNDParams> &obj);
InferenceEngine::Precision dPrecision, iPrecision;
int batchDims;
std::string device;
+ Config config;
GatherNDParamsSubset gatherArgsSubset;
- std::tie(gatherArgsSubset, dPrecision, iPrecision, device) = obj.param;
+ std::tie(gatherArgsSubset, dPrecision, iPrecision, device, config) = obj.param;
std::tie(dataShape, indicesShape, batchDims) = gatherArgsSubset;
std::ostringstream result;
result << "DP=" << dPrecision.name() << "_";
result << "IP=" << iPrecision.name() << "_";
result << "device=" << device;
+ if (!config.empty()) {
+ result << "_config=";
+ for (const auto& cfg : config) {
+ result << "{" << cfg.first << ": " << cfg.second << "}";
+ }
+ }
+
return result.str();
}
InferenceEngine::Precision dPrecision, iPrecision;
int batchDims;
GatherNDParamsSubset gatherArgsSubset;
- std::tie(gatherArgsSubset, dPrecision, iPrecision, targetDevice) = this->GetParam();
+ std::tie(gatherArgsSubset, dPrecision, iPrecision, targetDevice, configuration) = this->GetParam();
std::tie(dataShape, indicesShape, batchDims) = gatherArgsSubset;
auto ngDPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(dPrecision);
const std::vector<int64_t>& axes,
float eps,
ngraph::op::EpsMode epsMode);
+
} // namespace builder
} // namespace ngraph