From 9aa4662a92603b1894a37e99e7b9336e1d5aae08 Mon Sep 17 00:00:00 2001 From: "Efimov Alexander/AI Tools Lab/./Samsung Electronics" Date: Sun, 15 Jul 2018 09:40:42 +0300 Subject: [PATCH] Soft backend generator for cpp header (#603) Dump compressed parameters - Implementation of compressed parameters dump - C++ Header generator materializer implementation - Implementation of C++ header materializer - Add tests for basic structures of generated c++ header Signed-off-by: Efimov Alexander --- .../libs/backend/soft/include/cpp_header_types.def | 157 +++++++++++++++++++++ .../nnc/libs/backend/soft/include/model_analyzer.h | 6 + .../backend/soft/src/cpp_header_types.test.cpp | 100 +++++++++++++ contrib/nnc/libs/backend/soft/src/generator.cpp | 35 ++++- 4 files changed, 297 insertions(+), 1 deletion(-) create mode 100644 contrib/nnc/libs/backend/soft/include/cpp_header_types.def create mode 100644 contrib/nnc/libs/backend/soft/src/cpp_header_types.test.cpp diff --git a/contrib/nnc/libs/backend/soft/include/cpp_header_types.def b/contrib/nnc/libs/backend/soft/include/cpp_header_types.def new file mode 100644 index 0000000..007be5c --- /dev/null +++ b/contrib/nnc/libs/backend/soft/include/cpp_header_types.def @@ -0,0 +1,157 @@ +S(const int MAX_DIMS = 8;) +S() +S(template) +S(class BaseVector) +S({) +S(public:) +S() +S( BaseVector()) +S( {) +S( _dims = 0;) +S( _volume = 0;) +S( }) +S() +S( template ) +S( BaseVector(std::initializer_list data): _dims(data.size())) +S( {) +S( assert(_dims <= maxDims);) +S( size_t *dataPtr = _data;) +S( for (T value: data)) +S( {) +S( *dataPtr++ = static_cast(value);) +S( _volume *= static_cast(value);) +S( }) +S( }) +S() +S( BaseVector(const BaseVector &orig): _dims(orig._dims), _volume(orig._volume)) +S( {) +S( for (int i = 0; i < _dims; ++i)) +S( _data[i] = orig._data[i];) +S( }) +S() +S( BaseVector &operator=(const BaseVector &orig)) +S( {) +S( _dims = orig._dims;) +S( _volume = orig._volume;) +S( for (int i = 0; i < _dims; ++i)) +S( _data[i] = orig._data[i];) +S( return *this;) +S( }) +S() +S( void change(std::initializer_list data)) +S( {) +S( _dims = data.size();) +S( assert(_dims <= maxDims);) +S( _volume = 1;) +S( size_t *dataPtr = _data;) +S( for (size_t value: data)) +S( {) +S( *dataPtr++ = value;) +S( _volume *= value;) +S( }) +S( }) +S() +S( int dims() const) +S( {) +S( return _dims;) +S( }) +S() +S( size_t operator[](int dim) const) +S( {) +S( assert(dim < _dims);) +S( return _data[dim];) +S( }) +S() +S( size_t getVolume() const) +S( {) +S( return _volume;) +S( }) +S() +S(private:) +S( size_t _data[maxDims];) +S( int _dims;) +S( size_t _volume = 1;) +S(};) +S() +S(using Shape = BaseVector;) +S() +S(using Index = BaseVector;) +S() +S(class Tensor) +S({) +S(public:) +S( Tensor(): _shape(), _data(nullptr), _managed(true){}) +S() +S( Tensor(const Shape& shape, float *data): _shape(shape), _data(data){}) +S() +S( Tensor(const Shape& shape): _shape(shape), _data(new float[shape.getVolume()]), _managed(true) {}) +S() +S( ~Tensor()) +S( {) +S( if (_managed)) +S( delete [] _data;) +S( }) +S() +S( void fillData(const float *data)) +S( {) +S( assert(_managed);) +S( for (int i = 0; i < _shape.getVolume(); ++i)) +S( _data[i] = data[i];) +S( }) +S() +S( Tensor &operator=(const Tensor &t)) +S( {) +S( assert(_managed);) +S( if (this != &t)) +S( {) +S( reShape(t.getShape());) +S( fillData(t._data);) +S( }) +S( return *this;) +S( }) +S() +S( float &at(const Index &idx)) +S( {) +S( return *(_data + getOffset(idx));) +S( }) +S() +S( float at(const Index &idx) const) +S( {) +S( return *(_data + getOffset(idx));) +S( }) +S() +S( void reShape(const Shape &shape)) +S( {) +S( size_t oldVolume = _shape.getVolume();) +S( _shape = shape;) +S( if (_managed && oldVolume != shape.getVolume())) +S( {) +S( delete [] _data;) +S( _data = new float[shape.getVolume()];) +S( }) +S( }) +S() +S( const Shape &getShape() const) +S( {) +S( return _shape;) +S( }) +S() +S(private:) +S( size_t getOffset(const Index &idx) const) +S( {) +S( assert(idx.dims() == _shape.dims());) +S( size_t offset = 0;) +S( size_t stride = 1;) +S( for (int i = _shape.dims() - 1; i >= 0; --i)) +S( {) +S( assert(idx[i] < _shape[i]);) +S( offset += stride * idx[i];) +S( stride *= _shape[i];) +S( }) +S( return offset;) +S( }) +S() +S( Shape _shape;) +S( float *_data;) +S( bool _managed = false;) +S(};) diff --git a/contrib/nnc/libs/backend/soft/include/model_analyzer.h b/contrib/nnc/libs/backend/soft/include/model_analyzer.h index 74e895d..75264ca 100644 --- a/contrib/nnc/libs/backend/soft/include/model_analyzer.h +++ b/contrib/nnc/libs/backend/soft/include/model_analyzer.h @@ -86,6 +86,11 @@ public: return _formatVersion; } + const std::string &getModelName() const + { + return _modelName; + } + // generate hash from analyzed Model IR uint32_t getModelHash() const { @@ -102,6 +107,7 @@ private: void serializeShape(const nncc::core::ADT::tensor::Shape &s); void serializeTensor(const contrib::core::ADT::TensorVariant &t); + std::string _modelName = "NN"; const uint32_t _formatVersion = 1; uint32_t _modelHash = 0; std::vector _packedParameters; diff --git a/contrib/nnc/libs/backend/soft/src/cpp_header_types.test.cpp b/contrib/nnc/libs/backend/soft/src/cpp_header_types.test.cpp new file mode 100644 index 0000000..9ac59cf --- /dev/null +++ b/contrib/nnc/libs/backend/soft/src/cpp_header_types.test.cpp @@ -0,0 +1,100 @@ +#include +#include +#include +#include +#define S(...) __VA_ARGS__ +#include + +#include "gtest/gtest.h" + +TEST(SOFT_BACKEND, shape_and_index) +{ + Shape s1 {2,3,4}; + ASSERT_EQ(s1.dims(), 3); + ASSERT_EQ(s1[0], 2); + ASSERT_EQ(s1[1], 3); + ASSERT_EQ(s1[2], 4); + ASSERT_DEATH(s1[4], ""); + ASSERT_EQ(s1.getVolume(), 24); + ASSERT_EQ(s1.dims(), 3); + + Shape s2(s1); + ASSERT_EQ(s2.getVolume(), 24); + ASSERT_EQ(s2.dims(), 3); + ASSERT_EQ(s2[0], 2); + ASSERT_EQ(s2[1], 3); + ASSERT_EQ(s2[2], 4); + + Shape s3 {1}; + ASSERT_EQ(s3.getVolume(), 1); + ASSERT_EQ(s3.dims(), 1); + s3 = s1; + ASSERT_EQ(s3.getVolume(), 24); + ASSERT_EQ(s3.dims(), 3); + ASSERT_EQ(s3[0], 2); + ASSERT_EQ(s3[1], 3); + ASSERT_EQ(s3[2], 4); +} + +TEST(SOFT_BACKEND, tensor) +{ + // test reshape + Tensor t1; + ASSERT_EQ(t1.getShape().getVolume(), 0); + const size_t t1Height = 2; + const size_t t1Width = 4; + t1.reShape(Shape{t1Height, t1Width}); + ASSERT_EQ(t1.getShape().getVolume(), t1Height * t1Width); + // test at functions + float expectedSum = 0; + for (int i = 0; i < t1Height; ++i) + for (int j = 0; j < t1Width; ++j) + { + int elem = (i + 1) * (j + 1); + expectedSum += elem; + t1.at({i, j}) = elem; + } + float sum = 0; + for (int i = 0; i < t1Height; ++i) + for (int j = 0; j < t1Width; ++j) + { + sum += t1.at({i, j}); + } + ASSERT_EQ(sum, expectedSum); + + // test construction with shape + const size_t t2Height = 3; + const size_t t2Width = 4; + Tensor t2({t2Height, t2Width}); + ASSERT_EQ(t2.getShape().getVolume(), t2Height * t2Width); + + // test unmanaged tensor + const size_t t3Depth = 2; + const size_t t3Height = 2; + const size_t t3Width = 3; + std::vector data({1.0, 2.0, 4.0}); + data.resize(t3Depth * t3Height * t3Width); + float *dataPtr = data.data(); + Tensor t3(Shape({t3Depth, t3Height, t3Width}), dataPtr); + ASSERT_EQ(t3.getShape().getVolume(), t3Depth * t3Height * t3Width); + sum = 0; + for (int k = 0; k < t3Depth; ++k) + for (int i = 0; i < t3Height; ++i) + for (int j = 0; j < t3Width; ++j) + { + sum += t3.at({k, i, j}); + } + ASSERT_EQ(sum, std::accumulate(dataPtr, dataPtr + t3.getShape().getVolume(), 0.0f)); + + // test tensor copy + const size_t t4Width = 4; + Tensor t4({t4Width}); + t4 = t3; + const Shape &t4Shape = t4.getShape(); + for (int k = 0; k < t3Depth; ++k) + for (int i = 0; i < t3Height; ++i) + for (int j = 0; j < t3Height; ++j) + { + ASSERT_EQ(t3.at({k, i, j}), t4.at({k, i, j})); + } +} diff --git a/contrib/nnc/libs/backend/soft/src/generator.cpp b/contrib/nnc/libs/backend/soft/src/generator.cpp index 9d9df76..a58e1fc 100644 --- a/contrib/nnc/libs/backend/soft/src/generator.cpp +++ b/contrib/nnc/libs/backend/soft/src/generator.cpp @@ -187,9 +187,42 @@ CPPCodeGenerator CPPCodeGenerator::create(const std::string &headerFile, return gen; } +// put generator base types into string +#ifdef S + #error S macro already defined! +#endif +#define S(...) #__VA_ARGS__ "\n" + static const char *cpp_header_types = + #include "cpp_header_types.def" + ; +#undef S + void CPPCodeGenerator::materializeHeader(ostream &out, const ModelAnalyzer &ma) { - // TODO emit C++ header + string className = ma.getModelName() + "Model"; + + out << cpp_header_types; + out << "class " << className << "\n" + "{\n" + "public:\n" + " " << className << "(const std::string ¶metersPath);\n"; + // generate input setters + for (const string &inName: ma.getInputs()) + out << " void set_" << inName << "(const Tensor& t);\n"; + // generate output getters + for (const string &outName: ma.getOutputs()) + out << " std::shared_ptr get_" << outName << "();\n"; + out << " void doInference();\n\n" + "private:\n" + " " << className << "() = delete;\n" + " " << className << "(const " << className << " &orig) = delete;" + " " << className << " &operator=(const " << className << " &orig) = delete;\n"; + // generate input/output tensors + for (const string &inName: ma.getInputs()) + out << " Tensor _" << inName << ";\n"; + for (const string &outName: ma.getOutputs()) + out << " std::shared_ptr _" << outName << ";\n"; + out << "};\n"; } void CPPCodeGenerator::materializeCode(ostream &out, const ModelAnalyzer &ma) -- 2.7.4