Add serialized test info readers and tensor comparator (#871)
authorDmitry Mozolev/AI Tools Lab /SRR/Engineer/삼성전자 <d.mozolev@samsung.com>
Fri, 3 Aug 2018 14:25:51 +0000 (17:25 +0300)
committerSergey Vostokov/AI Tools Lab /SRR/Staff Engineer/삼성전자 <s.vostokov@samsung.com>
Fri, 3 Aug 2018 14:25:50 +0000 (17:25 +0300)
Added functions that use flatbuffers to get NN operator
parameters needed for Model IR graph construction, and a tensor
comparing function that acts as a GTest assert.

Signed-off-by: Dmitry Mozolev <d.mozolev@samsung.com>
contrib/nnc/libs/backend/interpreter/test/include/op_info_util.h [new file with mode: 0644]
contrib/nnc/libs/backend/interpreter/test/src/op_info_util.cpp [new file with mode: 0644]

diff --git a/contrib/nnc/libs/backend/interpreter/test/include/op_info_util.h b/contrib/nnc/libs/backend/interpreter/test/include/op_info_util.h
new file mode 100644 (file)
index 0000000..3bccf9e
--- /dev/null
@@ -0,0 +1,30 @@
+#ifndef NNC_INTERPRETER_OP_TEST_UTIL_H
+#define NNC_INTERPRETER_OP_TEST_UTIL_H
+
+#include <memory>
+#include <cassert>
+
+#include "nnc/core/linalg/TensorVariant.h"
+#include "nnc/core/IR/model/operations/common.h"
+#include "nnc/core/IR/model/operations/pool_op.h"
+
+#include "nnc/core/IR/model/actions/ShapeInference.h"
+
+#include "op_info_generated.h"
+#include "shape_helper.h"
+#include "graph_creator.h"
+
+using namespace nncc::contrib::frontend::common;
+using namespace nncc::contrib::core::IR::model;
+
+std::shared_ptr<TensorVariant> getTensor(const opinfo::Tensor* t);
+std::shared_ptr<TensorVariant> getKernel(const opinfo::OperatorInfo* opInfo);
+ops::PaddingType getPaddingType(const opinfo::OperatorInfo* opInfo);
+ops::PoolOp::PoolingType getPoolingType(const opinfo::OperatorInfo* opInfo);
+Shape getShapeParam(const opinfo::OperatorInfo* opInfo, unsigned int n);
+int getAxis(const opinfo::OperatorInfo* opInfo);
+
+__attribute__ ((unused)) void printTensor(const TensorVariant& lhs);
+void assertTensorEq(const TensorVariant &lhs, const TensorVariant &rhs);
+
+#endif // NNC_INTERPRETER_OP_TEST_UTIL_H
diff --git a/contrib/nnc/libs/backend/interpreter/test/src/op_info_util.cpp b/contrib/nnc/libs/backend/interpreter/test/src/op_info_util.cpp
new file mode 100644 (file)
index 0000000..9c826b7
--- /dev/null
@@ -0,0 +1,139 @@
+#include "gtest/gtest.h"
+
+#include "nnc/core/linalg/Tensor.h"
+#include "nnc/core/linalg/ShapeRange.h"
+#include "nncc/core/ADT/tensor/Shape.h"
+
+#include "op_info_util.h"
+
+std::shared_ptr<TensorVariant> getTensor(const opinfo::Tensor* t)
+{
+  std::shared_ptr<char> tensorBufferCopy(
+          new char[t->data()->size() * sizeof(float)], [](char *d) { delete[] d; });
+  std::copy(t->data()->begin(), t->data()->end(), reinterpret_cast<float*>(tensorBufferCopy.get()));
+
+  size_t elementSize = sizeof(float);
+  TensorVariant::DTYPE type = TensorVariant::DTYPE::FLOAT;
+
+  Shape tensorShape = ShapeHelper::createShape(*t->shape()->dims(), t->shape()->dims()->size());
+
+  return std::make_shared<TensorVariant>(tensorShape, tensorBufferCopy, type, elementSize);
+}
+
+std::shared_ptr<TensorVariant> getKernel(const opinfo::OperatorInfo* opInfo)
+{
+  return getTensor(opInfo->kernels()->Get(0));
+}
+
+ops::PaddingType getPaddingType(const opinfo::OperatorInfo* opInfo)
+{
+  switch (opInfo->padType())
+  {
+    case opinfo::PadType_VALID:
+      return ops::PaddingType::Valid;
+    case opinfo::PadType_SAME:
+      return ops::PaddingType::Same;
+    default:
+      assert(false);
+  }
+}
+
+ops::PoolOp::PoolingType getPoolingType(const opinfo::OperatorInfo* opInfo)
+{
+  switch (opInfo->poolType())
+  {
+    case opinfo::PoolType_MAXPOOL:
+      return ops::PoolOp::PoolingType::MAX;
+    case opinfo::PoolType_AVGPOOL:
+      return ops::PoolOp::PoolingType::AVG;
+    default:
+      assert(false);
+  }
+}
+
+Shape getShapeParam(const opinfo::OperatorInfo* opInfo, unsigned int n)
+{
+  auto shapeIter = opInfo->shapes()->Get(n)->dims();
+  return ShapeHelper::createShape(*shapeIter, shapeIter->size());
+}
+
+int getAxis(const opinfo::OperatorInfo* opInfo)
+{
+  return opInfo->axis();
+}
+
+/** @brief Utility function for printing tensors, can be used for debugging.
+ */
+__attribute__ ((unused)) void printTensor(const TensorVariant& lhs)
+{
+  using nncc::contrib::core::data::ShapeRange;
+  using nncc::contrib::core::data::Tensor;
+
+  Tensor<float> accessor(lhs);
+
+  for(auto& idx : ShapeRange(accessor.getShape()))
+  {
+    float val = accessor.at(idx);
+    std::cout << val << std::endl;
+  }
+}
+
+/** @brief Custom float comparator.
+ * It is supposed to be equivalent to GTest's ASSERT_FLOAT_EQ when allowedUlpsDiff is 4.
+ */
+static inline ::testing::AssertionResult areFloatsEqual(float f1, float f2, int allowedUlpsDiff)
+{
+  auto intRepr1 = *reinterpret_cast<int*>(&f1);
+  auto intRepr2 = *reinterpret_cast<int*>(&f2);
+
+  int ulpsDiff = std::abs(intRepr1 - intRepr2);
+
+  if (ulpsDiff <= allowedUlpsDiff)
+    return ::testing::AssertionSuccess();
+  else
+    return ::testing::AssertionFailure() << "ULP difference is " << ulpsDiff;
+}
+
+void assertTensorEq(const TensorVariant &lhs, const TensorVariant &rhs)
+{
+  using nncc::contrib::core::data::ShapeRange;
+  using nncc::contrib::core::data::Tensor;
+
+  Tensor<float> lhsAccessor(lhs);
+  Tensor<float> rhsAccessor(rhs);
+
+  ASSERT_EQ(lhsAccessor.getShape(), rhsAccessor.getShape());
+
+  for(auto& idx : ShapeRange(lhsAccessor.getShape()))
+  {
+    ASSERT_TRUE(areFloatsEqual(lhsAccessor.at(idx), rhsAccessor.at(idx), 4));
+  }
+}
+
+// Having to put print operator to the same namespace as Shape so that it can be found
+namespace nncc
+{
+namespace core
+{
+namespace ADT
+{
+namespace tensor
+{
+
+std::ostream &operator<<(std::ostream &os, const Shape &sh)
+{
+  os << "Shape(";
+  for (uint32_t i = 0; i < sh.rank(); ++i)
+  {
+    if (i != 0)
+      os << ", ";
+    os << sh.dim(i);
+  }
+  os << ")";
+  return os;
+}
+
+} // namespace tensor
+} // namespace ADT
+} // namespace core
+} // namespace nncc