From: Efimov Alexander/AI Tools Lab/./Samsung Electronics Date: Wed, 18 Jul 2018 12:29:25 +0000 (+0300) Subject: Add operation inputs info in ModelAnalyzer (#693) X-Git-Tag: nncc_backup~2390 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=3f335f28d4f025b3c1ba2586ca20c11410e904c2;p=platform%2Fcore%2Fml%2Fnnfw.git Add operation inputs info in ModelAnalyzer (#693) Move op inputs inference from code generator to ModelAnalyzer Signed-off-by: Efimov Alexander --- diff --git a/contrib/nnc/libs/backend/soft/include/generator.h b/contrib/nnc/libs/backend/soft/include/generator.h index 40f79b2..e75f7fa 100644 --- a/contrib/nnc/libs/backend/soft/include/generator.h +++ b/contrib/nnc/libs/backend/soft/include/generator.h @@ -19,7 +19,6 @@ class ModelAnalyzer; class BaseCodeGenerator { public: - void generate(nncc::contrib::core::IR::model::Graph *g); virtual void materializeHeader(std::ostream &out, const ModelAnalyzer &ma) = 0; @@ -53,12 +52,17 @@ class CPPCodeGenerator: public BaseCodeGenerator } public: - static CPPCodeGenerator create(const std::string &headerFile, const std::string &codeFile, const std::string &modelFile); +protected: void materializeHeader(std::ostream &out, const ModelAnalyzer &ma) override; + + void gatherOperationArguments(const ModelAnalyzer &ma, + const std::vector &argIds, + std::vector &args); + void materializeInferenceSequence(std::ostream &out, const ModelAnalyzer &ma); void materializeCode(std::ostream &out, const ModelAnalyzer &ma) override; }; @@ -72,7 +76,6 @@ class CCodeGenerator: public BaseCodeGenerator } public: - static CCodeGenerator create(const std::string &headerFile, const std::string &codeFile, const std::string &modelFile); diff --git a/contrib/nnc/libs/backend/soft/include/model_analyzer.h b/contrib/nnc/libs/backend/soft/include/model_analyzer.h index 75264ca..9172e19 100644 --- a/contrib/nnc/libs/backend/soft/include/model_analyzer.h +++ b/contrib/nnc/libs/backend/soft/include/model_analyzer.h @@ -6,9 +6,12 @@ #include "nnc/core/linalg/TensorVariant.h" #include +#include +#include #include #include #include +#include namespace nncc { @@ -22,6 +25,8 @@ namespace soft namespace ADT = nncc::contrib::core::IR::model::ADT; namespace ops = nncc::contrib::core::IR::model::ops; +const size_t INVALID_TENSOR_ID = std::numeric_limits::max(); + class ModelAnalyzer: public nncc::contrib::core::IR::model::Visitor { public: @@ -40,7 +45,8 @@ public: struct TensorDescription { std::string _name; - bool _isNNOutput; // true if is it NN output tensor + bool _isNNInput; // true if this is NN input tensor + bool _isNNOutput; // true if this is NN output tensor }; // operation description @@ -56,22 +62,29 @@ public: Type _type; ADT::INode *_node; std::string _opName; + // list of input tensors + std::vector _inputs; // list of output tensors - std::vector _outputs; + std::vector _outputs; size_t _paramStartOffset; }; - const std::vector &getInputs() const + const std::vector &getInputs() const { return _inputs; } - const std::vector &getOutputs() const + const std::vector &getOutputs() const { return _outputs; } - const std::vector &getInferenceSequence() const + const std::vector &getTensors() const + { + return _tensors; + } + + const std::list &getInferenceSequence() const { return _inferenceSequence; } @@ -100,6 +113,8 @@ public: private: void addOpDescr(ADT::INode *node, const std::string &name); + size_t allocateTensor(const std::string &name = std::string(), + bool isNNInput = false, bool isNNOutput = false); void packData(const void *data, size_t size); template @@ -111,10 +126,12 @@ private: const uint32_t _formatVersion = 1; uint32_t _modelHash = 0; std::vector _packedParameters; - std::vector _inferenceSequence; - size_t _localTensorsN = 0; - std::vector _inputs; - std::vector _outputs; + std::list _inferenceSequence; + size_t _allocatedTensors = 0; + std::vector _inputs; + std::vector _outputs; + std::vector _tensors; + std::map _nodeToDescr; }; } // namespace soft diff --git a/contrib/nnc/libs/backend/soft/src/generator.cpp b/contrib/nnc/libs/backend/soft/src/generator.cpp index 9254051..4df3636 100644 --- a/contrib/nnc/libs/backend/soft/src/generator.cpp +++ b/contrib/nnc/libs/backend/soft/src/generator.cpp @@ -205,64 +205,47 @@ void CPPCodeGenerator::materializeHeader(ostream &out, const ModelAnalyzer &ma) "public:\n" " " << className << "(const std::string ¶metersPath);\n"; // generate input setters - for (const string &inName: ma.getInputs()) - out << " void set_" << inName << "(const Tensor& t);\n"; + for (const size_t inId: ma.getInputs()) + { + const string &tName = ma.getTensors()[inId]._name; + out << " void set" << tName << "(const Tensor& t);\n"; + } // generate output getters - for (const string &outName: ma.getOutputs()) - out << " std::shared_ptr get_" << outName << "();\n"; + for (const size_t outId: ma.getOutputs()) + { + const string &tName = ma.getTensors()[outId]._name; + out << " std::shared_ptr get" << tName << "();\n"; + } out << " void doInference();\n\n" "private:\n" " " << className << "() = delete;\n" - " " << className << "(const " << className << " &orig) = delete;" + " " << className << "(const " << className << " &orig) = delete;\n" " " << className << " &operator=(const " << className << " &orig) = delete;\n"; // generate input/output tensors - for (const string &inName: ma.getInputs()) - out << " Tensor _" << inName << ";\n"; - for (const string &outName: ma.getOutputs()) - out << " std::shared_ptr _" << outName << ";\n"; - out << "};\n"; -} - -// print allocation of temporary tensors -static void printTmpTensors(ostream &out, const ModelAnalyzer::OpDescr &op) -{ - for (const ModelAnalyzer::TensorDescription &td: op._outputs) + for (const size_t inId: ma.getInputs()) { - if (td._isNNOutput) - continue; - out << " Tensor " << td._name << ";\n"; + const string &tName = ma.getTensors()[inId]._name; + out << " Tensor " << tName << ";\n"; } -} - -// generate function output arguments -static void gatherOperationCallOutputs(const ModelAnalyzer::OpDescr &op, vector &args) -{ - for (const ModelAnalyzer::TensorDescription &td: op._outputs) + for (const size_t outId: ma.getOutputs()) { - const string &tensorName = td._name; - if (td._isNNOutput) - args.push_back("*" + tensorName); - else - args.push_back(tensorName); + const string &tName = ma.getTensors()[outId]._name; + out << " std::shared_ptr " << tName << ";\n"; } + out << "};\n"; } -// generate function input arguments -static void gatherOperationCallInputs(const ModelAnalyzer::OpDescr &op, map &node2Descr, vector &args) +// print allocation of temporary tensors +static void printTmpTensors(ostream &out, const ModelAnalyzer &ma, const ModelAnalyzer::OpDescr &op) { - for (const INode::IODescriptor &d: op._node->getPrevNodes()) + for (size_t id: op._outputs) { - size_t idx = d.index; - INode *node = d.node; - assert(node2Descr.find(node) != node2Descr.end()); - const ModelAnalyzer::OpDescr &descr = *node2Descr[node]; - const ModelAnalyzer::TensorDescription &tDescr = descr._outputs[idx]; - const string &tensorName = tDescr._name; - if (tDescr._isNNOutput) - args.push_back("*" + tensorName); - else - args.push_back(tensorName); + const ModelAnalyzer::TensorDescription &td = ma.getTensors()[id]; + assert(!td._isNNInput && "no input nodes should be inserted into inference sequence"); + if (td._isNNOutput) + continue; + const string &tName = ma.getTensors()[id]._name; + out << " Tensor " << tName << ";\n"; } } @@ -279,20 +262,37 @@ static void printOperationArgs(ostream &out, const vector &args) } } +// gather function arguments from vector of tensor descriptions +void CPPCodeGenerator::gatherOperationArguments(const ModelAnalyzer &ma, + const vector &argIds, + vector &args) +{ + for (size_t id: argIds) + { + const string &tensorName = ma.getTensors()[id]._name; + if (ma.getTensors()[id]._isNNOutput) + { + args.push_back("*" + tensorName); + } + else + { + args.push_back(tensorName); + } + } +} + // generate inference sequence -static void materializeCPPInferenceSequence(ostream &out, const ModelAnalyzer &ma) +void CPPCodeGenerator::materializeInferenceSequence(ostream &out, const ModelAnalyzer &ma) { using OpDescr = ModelAnalyzer::OpDescr; - map node2Descr; for (const ModelAnalyzer::OpDescr &op: ma.getInferenceSequence()) { - node2Descr.insert(pair(op._node, &op)); using Type = OpDescr::Type; using TensorDescription = ModelAnalyzer::TensorDescription; if (op._type == Type::IN) continue; // create temporary tensors - printTmpTensors(out, op); + printTmpTensors(out, ma, op); // materialize call out << " " << op._opName << "("; const auto &prevNodes = op._node->getPrevNodes(); @@ -300,11 +300,11 @@ static void materializeCPPInferenceSequence(ostream &out, const ModelAnalyzer &m vector args; args.reserve(prevNodes.size() + outTensors.size() + 1); // gather output arguments - gatherOperationCallOutputs(op, args); + gatherOperationArguments(ma, op._outputs, args); // parameters offset args.push_back(to_string(op._paramStartOffset)); // gather input arguments - gatherOperationCallInputs(op, node2Descr, args); + gatherOperationArguments(ma, op._inputs, args); // put arguments into stream printOperationArgs(out, args); out << ");\n"; @@ -325,8 +325,9 @@ void CPPCodeGenerator::materializeCode(ostream &out, const ModelAnalyzer &ma) "}\n"; // gen input setters - for (const string &inName: ma.getInputs()) + for (size_t inId: ma.getInputs()) { + const string &inName = ma.getTensors()[inId]._name; out << "void " << className << "::set_" << inName << "(const Tensor& t)\n" "{\n" " _" << inName << " = t;" @@ -334,8 +335,9 @@ void CPPCodeGenerator::materializeCode(ostream &out, const ModelAnalyzer &ma) } // gen output getters - for (const string &outName: ma.getOutputs()) + for (size_t outId: ma.getOutputs()) { + const string &outName = ma.getTensors()[outId]._name; out << "shared_ptr " << className <<"::get_" << outName << "()\n" "{\n" " return _" << outName << ";" @@ -343,13 +345,14 @@ void CPPCodeGenerator::materializeCode(ostream &out, const ModelAnalyzer &ma) } out << "void " << className << "::doInference()\n" "{\n"; - for (const string &outName: ma.getOutputs()) + for (size_t outId: ma.getOutputs()) { + const string &outName = ma.getTensors()[outId]._name; out << " _" << outName << ".reset(new Tensor());\n"; } // gen inference sequence - materializeCPPInferenceSequence(out, ma); + materializeInferenceSequence(out, ma); out << "}"; } diff --git a/contrib/nnc/libs/backend/soft/src/model_analyzer.cpp b/contrib/nnc/libs/backend/soft/src/model_analyzer.cpp index eb7702b..f6dd8c7 100644 --- a/contrib/nnc/libs/backend/soft/src/model_analyzer.cpp +++ b/contrib/nnc/libs/backend/soft/src/model_analyzer.cpp @@ -93,29 +93,58 @@ void ModelAnalyzer::addOpDescr(ADT::INode *node, const string &opName) { size_t offset = _packedParameters.size(); OpDescr::Type type = OpDescr::Type::ORDINARY; - vector outputs; + vector nodeOutputs; const std::string &name = node->getName(); + size_t nodeTid = INVALID_TENSOR_ID; if (node->getPrevNodes().empty()) { - _inputs.push_back(name); + // process input node + nodeTid = allocateTensor(name, true); + _inputs.push_back(nodeTid); type = OpDescr::Type::IN; - } else - if (!name.empty()) + } + else if (!name.empty()) { - _outputs.push_back(name); + // process output node + nodeTid = allocateTensor(name, false, true); + _outputs.push_back(nodeTid); type = OpDescr::Type::OUT; } - if (type != OpDescr::Type::ORDINARY) + else { - outputs.push_back({"_" + name, type == OpDescr::Type::OUT}); - } else + // proces ordinary node + nodeTid = allocateTensor(); + } + assert(nodeTid != INVALID_TENSOR_ID); + nodeOutputs.push_back(nodeTid); + // process node inputs + vector nodeInputs; + for (const ADT::INode::IODescriptor &d: node->getPrevNodes()) { - outputs.push_back({"tensor_" + to_string(_localTensorsN++), false}); + size_t idx = d.index; + ADT::INode *node = d.node; + assert(_nodeToDescr.find(node) != _nodeToDescr.end()); + const OpDescr &descr = *_nodeToDescr[node]; + const size_t &inTid = descr._outputs[idx]; + nodeInputs.push_back(inTid); } - _inferenceSequence.push_back({type, node, opName, std::move(outputs), offset}); + _inferenceSequence.push_back({type, node, opName, + std::move(nodeInputs), + std::move(nodeOutputs), + offset}); + _nodeToDescr[node] = &_inferenceSequence.back(); // TODO add model hashing } +size_t ModelAnalyzer::allocateTensor(const string &name, bool isNNInput, bool isNNOutput) +{ + assert(!(name.empty() && (isNNOutput || isNNInput)) && "Input or output tensor must have name"); + size_t id = _allocatedTensors++; + _tensors.push_back({name, isNNInput, isNNOutput}); + assert(_tensors.size() == _allocatedTensors); + return id; +} + void ModelAnalyzer::visit(ADT::INode *node, ops::ConcatOp &op) { addOpDescr(node, "concat");