From: Efimov Alexander/AI Tools Lab/./Samsung Electronics Date: Wed, 12 Sep 2018 17:37:20 +0000 (+0300) Subject: [nnc] Documentation for soft backend (#1237) X-Git-Tag: nncc_backup~1834 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=55ec4b96ec3de0bbb9ae28b720f2c86854cde893;p=platform%2Fcore%2Fml%2Fnnfw.git [nnc] Documentation for soft backend (#1237) Added doxygen documentation for soft backend Signed-off-by: Efimov Alexander --- diff --git a/contrib/nnc/include/passes/soft_backend/BaseGenerator.h b/contrib/nnc/include/passes/soft_backend/BaseGenerator.h index 7d6a3d9..0a4762c 100644 --- a/contrib/nnc/include/passes/soft_backend/BaseGenerator.h +++ b/contrib/nnc/include/passes/soft_backend/BaseGenerator.h @@ -23,15 +23,54 @@ class ModelAnalyzer; class Serializer; +/** + * @brief BaseCodeGenerator class provides interface for artifact + * generation and contains common functionality for all soft backends + * + * Derivatives should override: + * + constructor to set proper filenames for artifact parts + * + code file generation function: materuializeCode + * + header file generation to expose artifact inerface to user + */ class BaseCodeGenerator : public Pass { public: + /** + * @brief Method represents base generation sequence: analysis, serialization, header/code generation, etc + * @param data PassData object containing Model IR + * @return returns empty PassData object + */ PassData run(PassData data) override; protected: + /** + * @brief This function processes tensor names + * to transform them into valid identificators of target language + * @param ma Intermediate artifact information + */ virtual void formatTensorNames(const ModelAnalyzer &ma) = 0; + /** + * @brief Derivative classes should override this function to generate header of artifact + * @param out Stream to write header text + * @param ma Intermediate artifact information + */ virtual void materializeHeader(std::ostream &out, const ModelAnalyzer &ma) = 0; + /** + * @brief Derivative classes should override this function to generate implementation of artifact + * @param out Stream to write header text + * @param ma Intermediate artifact information + * @param s Serializer holds parameters of network and various meta-information: serializer version, hashes, etc + */ virtual void materializeCode(std::ostream &out, const ModelAnalyzer &ma, const Serializer &s) = 0; + /** + * @brief Writes serialized parameters to out stream + * @param out Stream to write serialized parameters + * @param s Serializer holds parameters of network + * + * Contents of generated file: + * + header(magic number to identify file type, protocol version, hashes of network and params) + * + array of serialized network parameters + */ void materializeModelParams(std::ostream &out, const Serializer &s); BaseCodeGenerator(); diff --git a/contrib/nnc/include/passes/soft_backend/CGenerator.h b/contrib/nnc/include/passes/soft_backend/CGenerator.h index e94d2c3..936431f 100644 --- a/contrib/nnc/include/passes/soft_backend/CGenerator.h +++ b/contrib/nnc/include/passes/soft_backend/CGenerator.h @@ -13,7 +13,10 @@ namespace backend namespace soft { -// C generator +/** + * @brief CCodeGenerator implements interfaces that provides BaseCodeGenerator for C language + * This includes header file generation, code file generation and variable renaming according to C naming requirements + */ class CCodeGenerator: public BaseCodeGenerator { public: diff --git a/contrib/nnc/include/passes/soft_backend/CPPGenerator.h b/contrib/nnc/include/passes/soft_backend/CPPGenerator.h index e39eab7..b261ba2 100644 --- a/contrib/nnc/include/passes/soft_backend/CPPGenerator.h +++ b/contrib/nnc/include/passes/soft_backend/CPPGenerator.h @@ -13,7 +13,10 @@ namespace backend namespace soft { -// C++ generator +/** + * @brief CPPCodeGenerator implements interfaces that provides BaseCodeGenerator for C++ language + * This includes header file generation, code file generation and variable renaming according to C++ naming requirements + */ class CPPCodeGenerator: public BaseCodeGenerator { public: @@ -23,9 +26,22 @@ protected: void formatTensorNames(const ModelAnalyzer &ma) override; void materializeHeader(std::ostream &out, const ModelAnalyzer &ma) override; + /** + * @brief Form list of function call arguments + * @param ma Intermediate model representation + * @param argIds List of argument variable ids + * @param args Result list of arguments transformed in form of strings + */ void gatherOperationArguments(const ModelAnalyzer &ma, const std::vector &argIds, std::vector &args); + /** + * @brief Prints setter of artifact + * @param out Output stream + * @param className Name of artifact + * @param setterName Name of setter function + * @param varName Name of variable that setter fills + */ void printSetter(std::ostream &out, const std::string &className, const std::string &setterName, const std::string &varName); void printGetter(std::ostream &out, const std::string &className, const std::string &setterName, const std::string &varName); void materializeInferenceSequence(std::ostream &out, const ModelAnalyzer &ma); diff --git a/contrib/nnc/passes/soft_backend/BaseGenerator.cpp b/contrib/nnc/passes/soft_backend/BaseGenerator.cpp index 8004e27..90e1ced 100644 --- a/contrib/nnc/passes/soft_backend/BaseGenerator.cpp +++ b/contrib/nnc/passes/soft_backend/BaseGenerator.cpp @@ -35,6 +35,13 @@ namespace soft namespace { +/** + * @brief Creates pointer to some output stream to encapsulate resource management into deleter + * for example may be used to return std::cout + * @param path Path to opened file + * @return Pointer output stream + * @throws PluginException if did not succeed + */ unique_ptr getStream(const string &path) { unique_ptr ofs(new ofstream(path)); @@ -45,6 +52,11 @@ unique_ptr getStream(const string &path) return ofs; } +/** + * @brief Creates directory + * @param path Path to desired directory + * @throws PluginException in did not succeed + */ void createDir(const string &path) { int res = mkdir(path.c_str(), S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH); diff --git a/contrib/nnc/passes/soft_backend/cpp_generator.cpp b/contrib/nnc/passes/soft_backend/cpp_generator.cpp index 710069d..698a0bc 100644 --- a/contrib/nnc/passes/soft_backend/cpp_generator.cpp +++ b/contrib/nnc/passes/soft_backend/cpp_generator.cpp @@ -33,6 +33,10 @@ namespace backend namespace soft { +/** + * @brief Renames tensors with respect to C++ naming conventions + * @param ma Intermediate artifact information + */ void CPPCodeGenerator::formatTensorNames(const ModelAnalyzer &ma) { int tmpTensors = 0; @@ -63,6 +67,13 @@ void CPPCodeGenerator::formatTensorNames(const ModelAnalyzer &ma) } } +/** + * + Writes to out support data types and methods: Shape, Tensor. + * This is part of user interface to feed data to artifact. + * + Writes actual model class that contains: + * network constructor, setters to feed data to network, getters to get results, + * and doInference method that performs actual inference. + */ void CPPCodeGenerator::materializeHeader(ostream &out, const ModelAnalyzer &ma) { string className = ma.getModelName() + "Model"; @@ -115,8 +126,14 @@ void CPPCodeGenerator::materializeHeader(ostream &out, const ModelAnalyzer &ma) out << "};\n"; } -// print allocation of temporary tensors -void printTmpTensors(ostream &out, const ModelAnalyzer &ma, +/** + * @brief prints list of temporary variables for given operation + * @param out Stream to write program text + * @param ma Intermediate artifact representation + * @param formatted List of formatted tensor names + * @param op Operation that requires variables + */ +static void printTmpTensors(ostream &out, const ModelAnalyzer &ma, const vector &formatted, const ModelAnalyzer::OpDescr &op) { for (size_t id: op._outputs) @@ -130,7 +147,11 @@ void printTmpTensors(ostream &out, const ModelAnalyzer &ma, } } -// print operation call arguments +/** + * @brief Prints list of function arguments, separated by commas + * @param out Stream to write program text + * @param args arguments to print + */ static void printOperationArgs(ostream &out, const vector &args) { bool insertComma = false; @@ -143,7 +164,6 @@ static void printOperationArgs(ostream &out, const vector &args) } } -// gather function arguments from vector of tensor descriptions void CPPCodeGenerator::gatherOperationArguments(const ModelAnalyzer &ma, const vector &argIds, vector &args) @@ -178,7 +198,6 @@ void CPPCodeGenerator::printGetter(ostream &out, const string &className, const "}\n\n"; } -// generate inference sequence void CPPCodeGenerator::materializeInferenceSequence(ostream &out, const ModelAnalyzer &ma) { using OpDescr = ModelAnalyzer::OpDescr; @@ -207,7 +226,9 @@ void CPPCodeGenerator::materializeInferenceSequence(ostream &out, const ModelAna } } -// TODO think about better string formatting to make code more readable +/** + * Function writes to output stream needed code snippets, and implementations of artifact class functions. + */ void CPPCodeGenerator::materializeCode(ostream &out, const ModelAnalyzer &ma, const Serializer &s) { string className = ma.getModelName() + "Model"; diff --git a/contrib/nnc/passes/soft_backend/model_analyzer.cpp b/contrib/nnc/passes/soft_backend/model_analyzer.cpp index a49d67f..cfa639f 100644 --- a/contrib/nnc/passes/soft_backend/model_analyzer.cpp +++ b/contrib/nnc/passes/soft_backend/model_analyzer.cpp @@ -117,6 +117,10 @@ void ModelAnalyzer::visit(ADT::INode *node, ops::SoftmaxOp &op) addOpDescr(node, "softmax"); } +/** + * Model Ir does not separate different types of pool operations, but for code generation + * it is easier to implement different types of pooling by different functions + */ void ModelAnalyzer::visit(ADT::INode *node, ops::PoolOp &op) { const char *funcName = nullptr; diff --git a/contrib/nnc/passes/soft_backend/model_analyzer.h b/contrib/nnc/passes/soft_backend/model_analyzer.h index baa5deb..fc3af53 100644 --- a/contrib/nnc/passes/soft_backend/model_analyzer.h +++ b/contrib/nnc/passes/soft_backend/model_analyzer.h @@ -27,6 +27,10 @@ namespace ops = model::ops; const size_t INVALID_TENSOR_ID = std::numeric_limits::max(); +/** + * @brief Constructs inference sequence for given computational graph, + * gathers list of variables used in artifact. + */ class ModelAnalyzer: public model::IVisitor { public: @@ -45,6 +49,10 @@ public: void visit(ADT::INode *node, ops::BatchNormOp &op) override; void visit(ADT::INode *node, ops::DropoutOp &op) override; + /** + * @brief Represents variable used in artifact. + * This variable can store inputs, outputs of network and temporary data. + */ struct TensorDescription { std::string _name; @@ -52,7 +60,9 @@ public: bool _isNNOutput; // true if this is NN output tensor }; - // operation description + /** + * @brief OpDescr represents operation call in inference sequence + */ struct OpDescr { enum class Type @@ -72,43 +82,79 @@ public: size_t _paramStartOffset; }; + /** + * @return vector of id's of network input tensors + */ const std::vector &getInputs() const { return _inputs; } + /** + * @return vector of id's of tensors with unique names taken from Model IR + */ const std::vector &getNamedTensors() const { return _named_tensors; } + /** + * @return vector of id's of network output tensors + */ const std::vector &getOutputs() const { return _outputs; } + /** + * @return vector of all network tensors + */ const std::vector &getTensors() const { return _tensors; } + /** + * @return Inference sequence + */ const std::list &getInferenceSequence() const { return _inferenceSequence; } + /** + * @return Inference sequence + */ std::list &getInferenceSequence() { return _inferenceSequence; } + /** + * @return Model name, taken from Model IR + */ const std::string &getModelName() const { return _modelName; } private: + /** + * @brief Common function to add function call in inference sequence + * @param node Node representing added call + * @param name Function name + * + * Inserts information about CG operation into inference sequence: name of operation, + * creates tensors for operation outputs, binds operation inputs with tensors from previous operations + */ void addOpDescr(ADT::INode *node, const std::string &name); + /** + * @brief Creates variable in artifact + * @param name Name of variable + * @param isNNInput If true this variable can be set by "set" method of artifact + * @param isNNOutput If true this variable can be gathered by user by "get" method of artifact + * @return Id of created variable + */ size_t allocateTensor(const std::string &name = std::string(), bool isNNInput = false, bool isNNOutput = false); diff --git a/contrib/nnc/passes/soft_backend/serializer.cpp b/contrib/nnc/passes/soft_backend/serializer.cpp index f0ad082..425437f 100644 --- a/contrib/nnc/passes/soft_backend/serializer.cpp +++ b/contrib/nnc/passes/soft_backend/serializer.cpp @@ -63,7 +63,12 @@ void Serializer::serializeT(const T &obj) packData(&obj, sizeof(T)); } -// convert enum to it's underlying type +/** + * @brief Convert enum to it's underlying type + * @tparam E Enum type + * @param enumVal Value of enum + * @return Integer value that correspond to enumVal + */ template typename underlying_type::type etoi(E enumVal) { diff --git a/contrib/nnc/passes/soft_backend/serializer.h b/contrib/nnc/passes/soft_backend/serializer.h index ba8252a..d5296c0 100644 --- a/contrib/nnc/passes/soft_backend/serializer.h +++ b/contrib/nnc/passes/soft_backend/serializer.h @@ -22,6 +22,16 @@ namespace model = nncc::contrib::core::IR::model; namespace ADT = model::ADT; namespace ops = model::ops; +/** + * @brief Serializer of network parameters for soft backend + * + * Serializer class responsible for serialization of given computational graph parameters and + * binding of inference operations to this data. + * It owns buffer that contains serialized data. + * To serialize data `serialize` method should be called with sequence from ModelAnalyzer object + * To gather this vector use `getBuffer` method. + * Objects of this class are one-off and not designed to serialize more than one IR + */ class Serializer: public model::IVisitor { public: @@ -58,11 +68,36 @@ public: return _modelHash; } private: + /** + * @brief Low level function to serialize untyped data buffer + * @param data Buffer containing data to serialize + * @param size Size of data to serialize + */ void packData(const void *data, size_t size); template + /** + * @brief Serialize trivially copyable objects + * @tparam T Type of object to serialize + * @param obj Reference to object to serialize + */ void serializeT(const T &obj); + /** + * @brief Serialize Tensor shape object + * @param s shape to serialize + */ void serializeShape(const nncc::contrib::core::data::Shape &s); + /** + * @brief Function serializes type of given tensor base data, + * it's shape and raw data in 'c' format(i.e. layout of multidimensional C array) + * @param t Tensor to serialize + */ void serializeTensor(const contrib::core::ADT::TensorVariant &t); + /** + * @brief Serialize pads for operations like Conv2D + * @tparam Op Operation type + * @param op Reference to operation where pads are stored + * @param padsRank Number of pads to serialize + */ template void serializePads(const Op &op, int32_t padsRank);