class Serializer;
+/**
+ * @brief BaseCodeGenerator class provides interface for artifact
+ * generation and contains common functionality for all soft backends
+ *
+ * Derivatives should override:
+ * + constructor to set proper filenames for artifact parts
+ * + code file generation function: materuializeCode
+ * + header file generation to expose artifact inerface to user
+ */
class BaseCodeGenerator : public Pass
{
public:
+ /**
+ * @brief Method represents base generation sequence: analysis, serialization, header/code generation, etc
+ * @param data PassData object containing Model IR
+ * @return returns empty PassData object
+ */
PassData run(PassData data) override;
protected:
+ /**
+ * @brief This function processes tensor names
+ * to transform them into valid identificators of target language
+ * @param ma Intermediate artifact information
+ */
virtual void formatTensorNames(const ModelAnalyzer &ma) = 0;
+ /**
+ * @brief Derivative classes should override this function to generate header of artifact
+ * @param out Stream to write header text
+ * @param ma Intermediate artifact information
+ */
virtual void materializeHeader(std::ostream &out, const ModelAnalyzer &ma) = 0;
+ /**
+ * @brief Derivative classes should override this function to generate implementation of artifact
+ * @param out Stream to write header text
+ * @param ma Intermediate artifact information
+ * @param s Serializer holds parameters of network and various meta-information: serializer version, hashes, etc
+ */
virtual void materializeCode(std::ostream &out, const ModelAnalyzer &ma, const Serializer &s) = 0;
+ /**
+ * @brief Writes serialized parameters to out stream
+ * @param out Stream to write serialized parameters
+ * @param s Serializer holds parameters of network
+ *
+ * Contents of generated file:
+ * + header(magic number to identify file type, protocol version, hashes of network and params)
+ * + array of serialized network parameters
+ */
void materializeModelParams(std::ostream &out, const Serializer &s);
BaseCodeGenerator();
namespace soft
{
-// C generator
+/**
+ * @brief CCodeGenerator implements interfaces that provides BaseCodeGenerator for C language
+ * This includes header file generation, code file generation and variable renaming according to C naming requirements
+ */
class CCodeGenerator: public BaseCodeGenerator
{
public:
namespace soft
{
-// C++ generator
+/**
+ * @brief CPPCodeGenerator implements interfaces that provides BaseCodeGenerator for C++ language
+ * This includes header file generation, code file generation and variable renaming according to C++ naming requirements
+ */
class CPPCodeGenerator: public BaseCodeGenerator
{
public:
void formatTensorNames(const ModelAnalyzer &ma) override;
void materializeHeader(std::ostream &out, const ModelAnalyzer &ma) override;
+ /**
+ * @brief Form list of function call arguments
+ * @param ma Intermediate model representation
+ * @param argIds List of argument variable ids
+ * @param args Result list of arguments transformed in form of strings
+ */
void gatherOperationArguments(const ModelAnalyzer &ma,
const std::vector<size_t> &argIds,
std::vector<std::string> &args);
+ /**
+ * @brief Prints setter of artifact
+ * @param out Output stream
+ * @param className Name of artifact
+ * @param setterName Name of setter function
+ * @param varName Name of variable that setter fills
+ */
void printSetter(std::ostream &out, const std::string &className, const std::string &setterName, const std::string &varName);
void printGetter(std::ostream &out, const std::string &className, const std::string &setterName, const std::string &varName);
void materializeInferenceSequence(std::ostream &out, const ModelAnalyzer &ma);
namespace
{
+/**
+ * @brief Creates pointer to some output stream to encapsulate resource management into deleter
+ * for example may be used to return std::cout
+ * @param path Path to opened file
+ * @return Pointer output stream
+ * @throws PluginException if did not succeed
+ */
unique_ptr<ofstream> getStream(const string &path)
{
unique_ptr<ofstream> ofs(new ofstream(path));
return ofs;
}
+/**
+ * @brief Creates directory
+ * @param path Path to desired directory
+ * @throws PluginException in did not succeed
+ */
void createDir(const string &path)
{
int res = mkdir(path.c_str(), S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
namespace soft
{
+/**
+ * @brief Renames tensors with respect to C++ naming conventions
+ * @param ma Intermediate artifact information
+ */
void CPPCodeGenerator::formatTensorNames(const ModelAnalyzer &ma)
{
int tmpTensors = 0;
}
}
+/**
+ * + Writes to out support data types and methods: Shape, Tensor.
+ * This is part of user interface to feed data to artifact.
+ * + Writes actual model class that contains:
+ * network constructor, setters to feed data to network, getters to get results,
+ * and doInference method that performs actual inference.
+ */
void CPPCodeGenerator::materializeHeader(ostream &out, const ModelAnalyzer &ma)
{
string className = ma.getModelName() + "Model";
out << "};\n";
}
-// print allocation of temporary tensors
-void printTmpTensors(ostream &out, const ModelAnalyzer &ma,
+/**
+ * @brief prints list of temporary variables for given operation
+ * @param out Stream to write program text
+ * @param ma Intermediate artifact representation
+ * @param formatted List of formatted tensor names
+ * @param op Operation that requires variables
+ */
+static void printTmpTensors(ostream &out, const ModelAnalyzer &ma,
const vector<string> &formatted, const ModelAnalyzer::OpDescr &op)
{
for (size_t id: op._outputs)
}
}
-// print operation call arguments
+/**
+ * @brief Prints list of function arguments, separated by commas
+ * @param out Stream to write program text
+ * @param args arguments to print
+ */
static void printOperationArgs(ostream &out, const vector<string> &args)
{
bool insertComma = false;
}
}
-// gather function arguments from vector of tensor descriptions
void CPPCodeGenerator::gatherOperationArguments(const ModelAnalyzer &ma,
const vector<size_t> &argIds,
vector<string> &args)
"}\n\n";
}
-// generate inference sequence
void CPPCodeGenerator::materializeInferenceSequence(ostream &out, const ModelAnalyzer &ma)
{
using OpDescr = ModelAnalyzer::OpDescr;
}
}
-// TODO think about better string formatting to make code more readable
+/**
+ * Function writes to output stream needed code snippets, and implementations of artifact class functions.
+ */
void CPPCodeGenerator::materializeCode(ostream &out, const ModelAnalyzer &ma, const Serializer &s)
{
string className = ma.getModelName() + "Model";
addOpDescr(node, "softmax");
}
+/**
+ * Model Ir does not separate different types of pool operations, but for code generation
+ * it is easier to implement different types of pooling by different functions
+ */
void ModelAnalyzer::visit(ADT::INode *node, ops::PoolOp &op)
{
const char *funcName = nullptr;
const size_t INVALID_TENSOR_ID = std::numeric_limits<size_t>::max();
+/**
+ * @brief Constructs inference sequence for given computational graph,
+ * gathers list of variables used in artifact.
+ */
class ModelAnalyzer: public model::IVisitor
{
public:
void visit(ADT::INode *node, ops::BatchNormOp &op) override;
void visit(ADT::INode *node, ops::DropoutOp &op) override;
+ /**
+ * @brief Represents variable used in artifact.
+ * This variable can store inputs, outputs of network and temporary data.
+ */
struct TensorDescription
{
std::string _name;
bool _isNNOutput; // true if this is NN output tensor
};
- // operation description
+ /**
+ * @brief OpDescr represents operation call in inference sequence
+ */
struct OpDescr
{
enum class Type
size_t _paramStartOffset;
};
+ /**
+ * @return vector of id's of network input tensors
+ */
const std::vector<size_t> &getInputs() const
{
return _inputs;
}
+ /**
+ * @return vector of id's of tensors with unique names taken from Model IR
+ */
const std::vector<size_t> &getNamedTensors() const
{
return _named_tensors;
}
+ /**
+ * @return vector of id's of network output tensors
+ */
const std::vector<size_t> &getOutputs() const
{
return _outputs;
}
+ /**
+ * @return vector of all network tensors
+ */
const std::vector<TensorDescription> &getTensors() const
{
return _tensors;
}
+ /**
+ * @return Inference sequence
+ */
const std::list<OpDescr> &getInferenceSequence() const
{
return _inferenceSequence;
}
+ /**
+ * @return Inference sequence
+ */
std::list<OpDescr> &getInferenceSequence()
{
return _inferenceSequence;
}
+ /**
+ * @return Model name, taken from Model IR
+ */
const std::string &getModelName() const
{
return _modelName;
}
private:
+ /**
+ * @brief Common function to add function call in inference sequence
+ * @param node Node representing added call
+ * @param name Function name
+ *
+ * Inserts information about CG operation into inference sequence: name of operation,
+ * creates tensors for operation outputs, binds operation inputs with tensors from previous operations
+ */
void addOpDescr(ADT::INode *node, const std::string &name);
+ /**
+ * @brief Creates variable in artifact
+ * @param name Name of variable
+ * @param isNNInput If true this variable can be set by "set" method of artifact
+ * @param isNNOutput If true this variable can be gathered by user by "get" method of artifact
+ * @return Id of created variable
+ */
size_t allocateTensor(const std::string &name = std::string(),
bool isNNInput = false, bool isNNOutput = false);
packData(&obj, sizeof(T));
}
-// convert enum to it's underlying type
+/**
+ * @brief Convert enum to it's underlying type
+ * @tparam E Enum type
+ * @param enumVal Value of enum
+ * @return Integer value that correspond to enumVal
+ */
template <class E>
typename underlying_type<E>::type etoi(E enumVal)
{
namespace ADT = model::ADT;
namespace ops = model::ops;
+/**
+ * @brief Serializer of network parameters for soft backend
+ *
+ * Serializer class responsible for serialization of given computational graph parameters and
+ * binding of inference operations to this data.
+ * It owns buffer that contains serialized data.
+ * To serialize data `serialize` method should be called with sequence from ModelAnalyzer object
+ * To gather this vector use `getBuffer` method.
+ * Objects of this class are one-off and not designed to serialize more than one IR
+ */
class Serializer: public model::IVisitor
{
public:
return _modelHash;
}
private:
+ /**
+ * @brief Low level function to serialize untyped data buffer
+ * @param data Buffer containing data to serialize
+ * @param size Size of data to serialize
+ */
void packData(const void *data, size_t size);
template <typename T>
+ /**
+ * @brief Serialize trivially copyable objects
+ * @tparam T Type of object to serialize
+ * @param obj Reference to object to serialize
+ */
void serializeT(const T &obj);
+ /**
+ * @brief Serialize Tensor shape object
+ * @param s shape to serialize
+ */
void serializeShape(const nncc::contrib::core::data::Shape &s);
+ /**
+ * @brief Function serializes type of given tensor base data,
+ * it's shape and raw data in 'c' format(i.e. layout of multidimensional C array)
+ * @param t Tensor to serialize
+ */
void serializeTensor(const contrib::core::ADT::TensorVariant &t);
+ /**
+ * @brief Serialize pads for operations like Conv2D
+ * @tparam Op Operation type
+ * @param op Reference to operation where pads are stored
+ * @param padsRank Number of pads to serialize
+ */
template <class Op>
void serializePads(const Op &op, int32_t padsRank);