From 5636ee623e3d53e2d89ff639a992213a921a7295 Mon Sep 17 00:00:00 2001 From: "Efimov Alexander/AI Tools Lab/./Samsung Electronics" Date: Wed, 25 Jul 2018 15:03:27 +0300 Subject: [PATCH] Bugfix: fix tensor naming in c++ soft backend (#678) Add shape inference in codegen Signed-off-by: Efimov Alexander --- contrib/nnc/libs/backend/soft/include/generator.h | 9 ++- contrib/nnc/libs/backend/soft/src/generator.cpp | 78 +++++++++++++++++------ 2 files changed, 66 insertions(+), 21 deletions(-) diff --git a/contrib/nnc/libs/backend/soft/include/generator.h b/contrib/nnc/libs/backend/soft/include/generator.h index e75f7fa..a87733e 100644 --- a/contrib/nnc/libs/backend/soft/include/generator.h +++ b/contrib/nnc/libs/backend/soft/include/generator.h @@ -2,6 +2,7 @@ #define _NNC_SOFT_BACKEND_GENERATOR_H_ #include "nnc/core/IR/model/graph/graph.h" + #include #include @@ -21,11 +22,12 @@ class BaseCodeGenerator public: void generate(nncc::contrib::core::IR::model::Graph *g); +protected: + virtual void formatTensorNames(const ModelAnalyzer &ma) = 0; virtual void materializeHeader(std::ostream &out, const ModelAnalyzer &ma) = 0; virtual void materializeCode(std::ostream &out, const ModelAnalyzer &ma) = 0; void materializeModelParams(std::ostream &out, const ModelAnalyzer &ma); -protected: BaseCodeGenerator(BaseCodeGenerator &g) = default; // check validity of selected output files, throws appropriate exception on error @@ -34,6 +36,8 @@ protected: BaseCodeGenerator(const std::string &headerFile, const std::string &codeFile, const std::string &modelFile); + std::vector _formattedTensors; + // Code output file std::string _headerFile; // Code output file @@ -57,6 +61,7 @@ public: const std::string &modelFile); protected: + void formatTensorNames(const ModelAnalyzer &ma) override; void materializeHeader(std::ostream &out, const ModelAnalyzer &ma) override; void gatherOperationArguments(const ModelAnalyzer &ma, @@ -80,6 +85,8 @@ public: const std::string &codeFile, const std::string &modelFile); +protected: + void formatTensorNames(const ModelAnalyzer &ma) override; void materializeHeader(std::ostream &out, const ModelAnalyzer &ma) override; void materializeCode(std::ostream &out, const ModelAnalyzer &ma) override; }; diff --git a/contrib/nnc/libs/backend/soft/src/generator.cpp b/contrib/nnc/libs/backend/soft/src/generator.cpp index 4df3636..9eea247 100644 --- a/contrib/nnc/libs/backend/soft/src/generator.cpp +++ b/contrib/nnc/libs/backend/soft/src/generator.cpp @@ -151,6 +151,8 @@ void BaseCodeGenerator::generate(Graph *g) // visit and analyze graph ModelAnalyzer ma; g->accept(&ma); + // rename tensors for specific backend language + formatTensorNames(ma); // Print header auto headerStream = getStream(_headerFile); materializeHeader(*headerStream, ma); @@ -176,6 +178,11 @@ CCodeGenerator CCodeGenerator::create(const std::string &headerFile, return gen; } +void CCodeGenerator::formatTensorNames(const ModelAnalyzer &ma) +{ + // TODO format tensor names according to c backend requirements +} + void CCodeGenerator::materializeHeader(ostream &out, const ModelAnalyzer &ma) { // TODO emit C header to out stream @@ -195,6 +202,36 @@ CPPCodeGenerator CPPCodeGenerator::create(const std::string &headerFile, return gen; } +void CPPCodeGenerator::formatTensorNames(const ModelAnalyzer &ma) +{ + int tmpTensors = 0; + for (const ModelAnalyzer::TensorDescription &td: ma.getTensors()) + { + string formattedName; + if(td._name.empty()) + { + assert(!td._isNNInput && !td._isNNOutput); + formattedName = "Tensor_" + to_string(tmpTensors++); + } + else + { + if (td._isNNInput || td._isNNOutput) + { + formattedName.append("_"); + } + formattedName.append(td._name); + for (char &c: formattedName) + { + if (!isalnum(c)) + { + c = '_'; + } + } + } + _formattedTensors.push_back(std::move(formattedName)); + } +} + void CPPCodeGenerator::materializeHeader(ostream &out, const ModelAnalyzer &ma) { string className = ma.getModelName() + "Model"; @@ -207,13 +244,13 @@ void CPPCodeGenerator::materializeHeader(ostream &out, const ModelAnalyzer &ma) // generate input setters for (const size_t inId: ma.getInputs()) { - const string &tName = ma.getTensors()[inId]._name; + const string &tName = _formattedTensors[inId]; out << " void set" << tName << "(const Tensor& t);\n"; } // generate output getters for (const size_t outId: ma.getOutputs()) { - const string &tName = ma.getTensors()[outId]._name; + const string &tName = _formattedTensors[outId]; out << " std::shared_ptr get" << tName << "();\n"; } out << " void doInference();\n\n" @@ -224,19 +261,20 @@ void CPPCodeGenerator::materializeHeader(ostream &out, const ModelAnalyzer &ma) // generate input/output tensors for (const size_t inId: ma.getInputs()) { - const string &tName = ma.getTensors()[inId]._name; + const string &tName = _formattedTensors[inId]; out << " Tensor " << tName << ";\n"; } for (const size_t outId: ma.getOutputs()) { - const string &tName = ma.getTensors()[outId]._name; + const string &tName = _formattedTensors[outId]; out << " std::shared_ptr " << tName << ";\n"; } out << "};\n"; } // print allocation of temporary tensors -static void printTmpTensors(ostream &out, const ModelAnalyzer &ma, const ModelAnalyzer::OpDescr &op) +void printTmpTensors(ostream &out, const ModelAnalyzer &ma, + const vector &formatted, const ModelAnalyzer::OpDescr &op) { for (size_t id: op._outputs) { @@ -244,7 +282,7 @@ static void printTmpTensors(ostream &out, const ModelAnalyzer &ma, const ModelAn assert(!td._isNNInput && "no input nodes should be inserted into inference sequence"); if (td._isNNOutput) continue; - const string &tName = ma.getTensors()[id]._name; + const string &tName = formatted[id]; out << " Tensor " << tName << ";\n"; } } @@ -269,7 +307,7 @@ void CPPCodeGenerator::gatherOperationArguments(const ModelAnalyzer &ma, { for (size_t id: argIds) { - const string &tensorName = ma.getTensors()[id]._name; + const string &tensorName = _formattedTensors[id]; if (ma.getTensors()[id]._isNNOutput) { args.push_back("*" + tensorName); @@ -292,7 +330,7 @@ void CPPCodeGenerator::materializeInferenceSequence(ostream &out, const ModelAna if (op._type == Type::IN) continue; // create temporary tensors - printTmpTensors(out, ma, op); + printTmpTensors(out, ma, _formattedTensors, op); // materialize call out << " " << op._opName << "("; const auto &prevNodes = op._node->getPrevNodes(); @@ -321,34 +359,34 @@ void CPPCodeGenerator::materializeCode(ostream &out, const ModelAnalyzer &ma) out << className << "::" << className << "(const string ¶metersPath)\n" "{\n" " readParameters(_parameters, parametersPath, " << - ma.getFormatVersion() << ", " << ma.getModelHash() << ");" - "}\n"; + ma.getFormatVersion() << ", " << ma.getModelHash() << ");\n" + "}\n\n"; // gen input setters for (size_t inId: ma.getInputs()) { - const string &inName = ma.getTensors()[inId]._name; - out << "void " << className << "::set_" << inName << "(const Tensor& t)\n" + const string &inName = _formattedTensors[inId]; + out << "void " << className << "::set" << inName << "(const Tensor& t)\n" "{\n" - " _" << inName << " = t;" - "}\n"; + " " << inName << " = t;\n" + "}\n\n"; } // gen output getters for (size_t outId: ma.getOutputs()) { - const string &outName = ma.getTensors()[outId]._name; - out << "shared_ptr " << className <<"::get_" << outName << "()\n" + const string &outName = _formattedTensors[outId]; + out << "shared_ptr " << className <<"::get" << outName << "()\n" "{\n" - " return _" << outName << ";" - "}\n"; + " return " << outName << ";\n" + "}\n\n"; } out << "void " << className << "::doInference()\n" "{\n"; for (size_t outId: ma.getOutputs()) { - const string &outName = ma.getTensors()[outId]._name; - out << " _" << outName << ".reset(new Tensor());\n"; + const string &outName = _formattedTensors[outId]; + out << " " << outName << ".reset(new Tensor());\n"; } // gen inference sequence -- 2.7.4