void CLDNNGraph::UpdateLayersMaps() {
primitiveIDs = m_program->primitiveIDs;
primitivesToIRLayersMap = m_program->primitivesToIRLayersMap;
+ IRToNgraphLayersMap = m_program->IRToNgraphLayersMap;
prevPrimitiveIDs = m_program->prevPrimitiveIDs;
profilingIDs = m_program->profilingIDs;
perfMap = m_program->perfMap;
return res;
};
+ auto split_string = [](std::string src, std::string delimiter = ",") -> std::vector<std::string> {
+ std::vector<std::string> tokens;
+ std::string tokenBuf;
+ size_t prev = 0, pos = 0, srcLength = src.length(), delimLength = delimiter.length();
+ do {
+ pos = src.find(delimiter, prev);
+ if (pos == std::string::npos) {
+ pos = srcLength;
+ }
+ tokenBuf = src.substr(prev, pos - prev);
+ if (!tokenBuf.empty()) {
+ tokens.push_back(tokenBuf);
+ }
+ prev = pos + delimLength;
+ } while (pos < srcLength && prev < srcLength);
+
+ return tokens;
+ };
+
auto remove_type_from_name = [](const std::string& name) -> std::string {
auto it = std::find(name.begin(), name.end(), ':');
if (it == name.end() || (it + 1) == name.end())
if (primitivesToIRLayersMap.find(name) == primitivesToIRLayersMap.end())
return {};
- return primitivesToIRLayersMap.at(name);
+ auto cnn_names = primitivesToIRLayersMap.at(name);
+ std::vector<std::string> res;
+
+ for (auto& cnn_name : cnn_names) {
+ if (IRToNgraphLayersMap.find(cnn_name) != IRToNgraphLayersMap.end()) {
+ auto ngraph_names = split_string(IRToNgraphLayersMap.at(cnn_name));
+ res.insert(res.end(), ngraph_names.begin(), ngraph_names.end());
+ } else {
+ res.push_back(cnn_name);
+ }
+ }
+ return res;
};
auto create_layer = [&](const cldnn::primitive_info& prim_info) -> CNNLayer::Ptr {
std::vector<std::shared_ptr<cldnn::network>> m_networks;
std::map<std::string, cldnn::primitive_id> primitiveIDs;
std::map<cldnn::primitive_id, std::vector<std::string>> primitivesToIRLayersMap;
+ std::map<cldnn::primitive_id, std::string> IRToNgraphLayersMap;
std::map<std::string, std::vector<cldnn::primitive_id>> prevPrimitiveIDs;
std::map<cldnn::primitive_id, std::pair<std::string, PerfCounter>> perfMap;
infLoopProtection = 0; // found a layer with all inputs already existing
CreateSingleLayerPrimitive(topology, currLayer); // currLayer will be advanced if layer was skipped or merged
prevPrimitiveIDs[layerName] = GetPrevLayersPrimitives(currLayer);
+ IRToNgraphLayersMap[currLayer->name] = currLayer->params["originalLayersNames"];
push_if(GetNextLayers(currLayer));
}
std::map<std::string, cldnn::primitive_id> primitiveIDs;
std::map<cldnn::primitive_id, std::vector<std::string>> primitivesToIRLayersMap;
+ std::map<cldnn::primitive_id, std::string> IRToNgraphLayersMap;
std::map<std::string, std::vector<cldnn::primitive_id>> prevPrimitiveIDs;
std::map<cldnn::primitive_id, std::pair<std::string, PerfCounter>> perfMap;
--- /dev/null
+// Copyright (C) 2020 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include <vector>
+#include "ngraph_conversion_tests/conv_bias_fusion.hpp"
+
+using namespace NGraphConversionTestsDefinitions;
+
+namespace {
+
+INSTANTIATE_TEST_CASE_P(Basic, ConvBiasFusion, ::testing::Values(CommonTestUtils::DEVICE_GPU), ConvBiasFusion::getTestCaseName);
+
+} // namespace
class ConvBiasFusion : public CommonTestUtils::TestsCommon, public testing::WithParamInterface<std::string> {
public:
static std::string getTestCaseName(const testing::TestParamInfo<std::string> & obj);
+
+protected:
+ std::string getOutputName() const;
};
} // namespace NGraphConversionTestsDefinitions
return "Device=" + obj.param;
}
+std::string ConvBiasFusion::getOutputName() const {
+ if (this->GetParam() == CommonTestUtils::DEVICE_GPU)
+ return "add_cldnn_output_postprocess";
+ else
+ return "add";
+}
+
TEST_P(ConvBiasFusion, ConvBiasFusion) {
std::string device = this->GetParam();
std::shared_ptr<ngraph::Function> f(nullptr);
auto net = exeNetwork.GetExecGraphInfo();
IE_SUPPRESS_DEPRECATED_START
- auto add_layer = net.getLayerByName("add");
+ auto add_layer = net.getLayerByName(getOutputName().c_str());
ASSERT_EQ(add_layer->params["originalLayersNames"], "add,conv");
IE_SUPPRESS_DEPRECATED_END
}
/*
-// Copyright (c) 2018 Intel Corporation
+// Copyright (c) 2018-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
r_node.can_be_optimized(true);
r_node.requires_reinterpret(!ident.first);
if (ident.first) { // no need of reshape
- p.add_optimized_primitive_info(r_node.get_primitive()->id);
+ if (r_node.is_output()) {
+ // if removed reorder is output, we need to add it's dependency id to the optimized primitives list,
+ // because it's name will be changed after extract_and_remove call
+ p.add_optimized_primitive_info(r_node.get_dependency(0).get_primitive()->id, {r_node.get_primitive()->id});
+ } else {
+ p.add_optimized_primitive_info(r_node.get_primitive()->id);
+ }
p.extract_and_remove(
r_node); // try to remove if possible (with respect to r_node not being marked as output)
}