#include <vector>
#include <utility>
#include <string>
+#include <cassert>
namespace nncc
{
return _packedParameters;
}
- uint64_t getFormatVersion() const
+ uint32_t getFormatVersion() const
{
return _formatVersion;
}
+ // generate hash from analyzed Model IR
+ uint32_t getModelHash() const
+ {
+ assert(!_inferenceSequence.empty() && "Empty model! Did you apply visitor?");
+ return _modelHash;
+ }
+
private:
void addOpDescr(ADT::INode *node, const std::string &name);
- const uint64_t _formatVersion = 1;
+ const uint32_t _formatVersion = 1;
+ uint32_t _modelHash = 0;
std::vector<char> _packedParameters;
std::vector<OpDescr> _inferenceSequence;
size_t _localTensorsN = 0;
namespace soft
{
+namespace parameters_format
+{
+ const int MAGIC_LEN = 4;
+ const int VERSION_LEN = 4;
+ const int HASH_LEN = 4;
+ const int HEADER_LEN = MAGIC_LEN + VERSION_LEN + HASH_LEN;
+
+ const char MAGIC[MAGIC_LEN + 1] = "NNMP"; // Neural Network Model Parameters
+}
+
namespace
{
void BaseCodeGenerator::materializeModelParams(ostream &out, const ModelAnalyzer &ma)
{
- // TODO dump compressed model data
+ using namespace parameters_format;
+
+ // First form a dump header
+ char header[HEADER_LEN];
+ uint32_t version = ma.getFormatVersion();
+ uint32_t hash = ma.getModelHash();
+ static_assert(VERSION_LEN == sizeof(version), "version length mismatch");
+ static_assert(HASH_LEN == sizeof(hash), "hash length mismatch");
+ memcpy(header, MAGIC, MAGIC_LEN);
+ memcpy(header + MAGIC_LEN, &version, VERSION_LEN);
+ memcpy(header + MAGIC_LEN + VERSION_LEN, &hash, HASH_LEN);
+
+ out.write(header, HEADER_LEN);
+ if (out.fail())
+ {
+ throw PluginException("Failed to write model parameters header");
+ }
+ auto ¶ms = ma.getPackedParameters();
+ out.write(params.data(), params.size());
+ if (out.fail())
+ {
+ throw PluginException("Failed to write model Parameters");
+ }
}
void BaseCodeGenerator::generate(Graph *g)