--- /dev/null
+S(const int MAX_DIMS = 8;)
+S()
+S(template<int maxDims>)
+S(class BaseVector)
+S({)
+S(public:)
+S()
+S( BaseVector())
+S( {)
+S( _dims = 0;)
+S( _volume = 0;)
+S( })
+S()
+S( template <class T>)
+S( BaseVector(std::initializer_list<T> data): _dims(data.size()))
+S( {)
+S( assert(_dims <= maxDims);)
+S( size_t *dataPtr = _data;)
+S( for (T value: data))
+S( {)
+S( *dataPtr++ = static_cast<size_t>(value);)
+S( _volume *= static_cast<size_t>(value);)
+S( })
+S( })
+S()
+S( BaseVector(const BaseVector &orig): _dims(orig._dims), _volume(orig._volume))
+S( {)
+S( for (int i = 0; i < _dims; ++i))
+S( _data[i] = orig._data[i];)
+S( })
+S()
+S( BaseVector &operator=(const BaseVector &orig))
+S( {)
+S( _dims = orig._dims;)
+S( _volume = orig._volume;)
+S( for (int i = 0; i < _dims; ++i))
+S( _data[i] = orig._data[i];)
+S( return *this;)
+S( })
+S()
+S( void change(std::initializer_list<size_t> data))
+S( {)
+S( _dims = data.size();)
+S( assert(_dims <= maxDims);)
+S( _volume = 1;)
+S( size_t *dataPtr = _data;)
+S( for (size_t value: data))
+S( {)
+S( *dataPtr++ = value;)
+S( _volume *= value;)
+S( })
+S( })
+S()
+S( int dims() const)
+S( {)
+S( return _dims;)
+S( })
+S()
+S( size_t operator[](int dim) const)
+S( {)
+S( assert(dim < _dims);)
+S( return _data[dim];)
+S( })
+S()
+S( size_t getVolume() const)
+S( {)
+S( return _volume;)
+S( })
+S()
+S(private:)
+S( size_t _data[maxDims];)
+S( int _dims;)
+S( size_t _volume = 1;)
+S(};)
+S()
+S(using Shape = BaseVector<MAX_DIMS>;)
+S()
+S(using Index = BaseVector<MAX_DIMS>;)
+S()
+S(class Tensor)
+S({)
+S(public:)
+S( Tensor(): _shape(), _data(nullptr), _managed(true){})
+S()
+S( Tensor(const Shape& shape, float *data): _shape(shape), _data(data){})
+S()
+S( Tensor(const Shape& shape): _shape(shape), _data(new float[shape.getVolume()]), _managed(true) {})
+S()
+S( ~Tensor())
+S( {)
+S( if (_managed))
+S( delete [] _data;)
+S( })
+S()
+S( void fillData(const float *data))
+S( {)
+S( assert(_managed);)
+S( for (int i = 0; i < _shape.getVolume(); ++i))
+S( _data[i] = data[i];)
+S( })
+S()
+S( Tensor &operator=(const Tensor &t))
+S( {)
+S( assert(_managed);)
+S( if (this != &t))
+S( {)
+S( reShape(t.getShape());)
+S( fillData(t._data);)
+S( })
+S( return *this;)
+S( })
+S()
+S( float &at(const Index &idx))
+S( {)
+S( return *(_data + getOffset(idx));)
+S( })
+S()
+S( float at(const Index &idx) const)
+S( {)
+S( return *(_data + getOffset(idx));)
+S( })
+S()
+S( void reShape(const Shape &shape))
+S( {)
+S( size_t oldVolume = _shape.getVolume();)
+S( _shape = shape;)
+S( if (_managed && oldVolume != shape.getVolume()))
+S( {)
+S( delete [] _data;)
+S( _data = new float[shape.getVolume()];)
+S( })
+S( })
+S()
+S( const Shape &getShape() const)
+S( {)
+S( return _shape;)
+S( })
+S()
+S(private:)
+S( size_t getOffset(const Index &idx) const)
+S( {)
+S( assert(idx.dims() == _shape.dims());)
+S( size_t offset = 0;)
+S( size_t stride = 1;)
+S( for (int i = _shape.dims() - 1; i >= 0; --i))
+S( {)
+S( assert(idx[i] < _shape[i]);)
+S( offset += stride * idx[i];)
+S( stride *= _shape[i];)
+S( })
+S( return offset;)
+S( })
+S()
+S( Shape _shape;)
+S( float *_data;)
+S( bool _managed = false;)
+S(};)
return _formatVersion;
}
+ const std::string &getModelName() const
+ {
+ return _modelName;
+ }
+
// generate hash from analyzed Model IR
uint32_t getModelHash() const
{
void serializeShape(const nncc::core::ADT::tensor::Shape &s);
void serializeTensor(const contrib::core::ADT::TensorVariant &t);
+ std::string _modelName = "NN";
const uint32_t _formatVersion = 1;
uint32_t _modelHash = 0;
std::vector<char> _packedParameters;
--- /dev/null
+#include <cstddef>
+#include <cassert>
+#include <initializer_list>
+#include <numeric>
+#define S(...) __VA_ARGS__
+#include <cpp_header_types.def>
+
+#include "gtest/gtest.h"
+
+TEST(SOFT_BACKEND, shape_and_index)
+{
+ Shape s1 {2,3,4};
+ ASSERT_EQ(s1.dims(), 3);
+ ASSERT_EQ(s1[0], 2);
+ ASSERT_EQ(s1[1], 3);
+ ASSERT_EQ(s1[2], 4);
+ ASSERT_DEATH(s1[4], "");
+ ASSERT_EQ(s1.getVolume(), 24);
+ ASSERT_EQ(s1.dims(), 3);
+
+ Shape s2(s1);
+ ASSERT_EQ(s2.getVolume(), 24);
+ ASSERT_EQ(s2.dims(), 3);
+ ASSERT_EQ(s2[0], 2);
+ ASSERT_EQ(s2[1], 3);
+ ASSERT_EQ(s2[2], 4);
+
+ Shape s3 {1};
+ ASSERT_EQ(s3.getVolume(), 1);
+ ASSERT_EQ(s3.dims(), 1);
+ s3 = s1;
+ ASSERT_EQ(s3.getVolume(), 24);
+ ASSERT_EQ(s3.dims(), 3);
+ ASSERT_EQ(s3[0], 2);
+ ASSERT_EQ(s3[1], 3);
+ ASSERT_EQ(s3[2], 4);
+}
+
+TEST(SOFT_BACKEND, tensor)
+{
+ // test reshape
+ Tensor t1;
+ ASSERT_EQ(t1.getShape().getVolume(), 0);
+ const size_t t1Height = 2;
+ const size_t t1Width = 4;
+ t1.reShape(Shape{t1Height, t1Width});
+ ASSERT_EQ(t1.getShape().getVolume(), t1Height * t1Width);
+ // test at functions
+ float expectedSum = 0;
+ for (int i = 0; i < t1Height; ++i)
+ for (int j = 0; j < t1Width; ++j)
+ {
+ int elem = (i + 1) * (j + 1);
+ expectedSum += elem;
+ t1.at({i, j}) = elem;
+ }
+ float sum = 0;
+ for (int i = 0; i < t1Height; ++i)
+ for (int j = 0; j < t1Width; ++j)
+ {
+ sum += t1.at({i, j});
+ }
+ ASSERT_EQ(sum, expectedSum);
+
+ // test construction with shape
+ const size_t t2Height = 3;
+ const size_t t2Width = 4;
+ Tensor t2({t2Height, t2Width});
+ ASSERT_EQ(t2.getShape().getVolume(), t2Height * t2Width);
+
+ // test unmanaged tensor
+ const size_t t3Depth = 2;
+ const size_t t3Height = 2;
+ const size_t t3Width = 3;
+ std::vector<float> data({1.0, 2.0, 4.0});
+ data.resize(t3Depth * t3Height * t3Width);
+ float *dataPtr = data.data();
+ Tensor t3(Shape({t3Depth, t3Height, t3Width}), dataPtr);
+ ASSERT_EQ(t3.getShape().getVolume(), t3Depth * t3Height * t3Width);
+ sum = 0;
+ for (int k = 0; k < t3Depth; ++k)
+ for (int i = 0; i < t3Height; ++i)
+ for (int j = 0; j < t3Width; ++j)
+ {
+ sum += t3.at({k, i, j});
+ }
+ ASSERT_EQ(sum, std::accumulate(dataPtr, dataPtr + t3.getShape().getVolume(), 0.0f));
+
+ // test tensor copy
+ const size_t t4Width = 4;
+ Tensor t4({t4Width});
+ t4 = t3;
+ const Shape &t4Shape = t4.getShape();
+ for (int k = 0; k < t3Depth; ++k)
+ for (int i = 0; i < t3Height; ++i)
+ for (int j = 0; j < t3Height; ++j)
+ {
+ ASSERT_EQ(t3.at({k, i, j}), t4.at({k, i, j}));
+ }
+}
return gen;
}
+// put generator base types into string
+#ifdef S
+ #error S macro already defined!
+#endif
+#define S(...) #__VA_ARGS__ "\n"
+ static const char *cpp_header_types =
+ #include "cpp_header_types.def"
+ ;
+#undef S
+
void CPPCodeGenerator::materializeHeader(ostream &out, const ModelAnalyzer &ma)
{
- // TODO emit C++ header
+ string className = ma.getModelName() + "Model";
+
+ out << cpp_header_types;
+ out << "class " << className << "\n"
+ "{\n"
+ "public:\n"
+ " " << className << "(const std::string ¶metersPath);\n";
+ // generate input setters
+ for (const string &inName: ma.getInputs())
+ out << " void set_" << inName << "(const Tensor& t);\n";
+ // generate output getters
+ for (const string &outName: ma.getOutputs())
+ out << " std::shared_ptr<Tensor> get_" << outName << "();\n";
+ out << " void doInference();\n\n"
+ "private:\n"
+ " " << className << "() = delete;\n"
+ " " << className << "(const " << className << " &orig) = delete;"
+ " " << className << " &operator=(const " << className << " &orig) = delete;\n";
+ // generate input/output tensors
+ for (const string &inName: ma.getInputs())
+ out << " Tensor _" << inName << ";\n";
+ for (const string &outName: ma.getOutputs())
+ out << " std::shared_ptr<Tensor> _" << outName << ";\n";
+ out << "};\n";
}
void CPPCodeGenerator::materializeCode(ostream &out, const ModelAnalyzer &ma)