Support new caffe operations in soft backend (#1476)
authorEfimov Alexander/AI Tools Lab/./Samsung Electronics <a.efimov@samsung.com>
Thu, 13 Sep 2018 17:49:03 +0000 (20:49 +0300)
committerРоман Михайлович Русяев/AI Tools Lab /SRR/Staff Engineer/삼성전자 <r.rusyaev@samsung.com>
Thu, 13 Sep 2018 17:49:03 +0000 (20:49 +0300)
Support new operations:
+ Scale (works on channel axis only)
+ Dropout (passes tensor without changes)
+ BatchNorm (passes tensor without changes)

Signed-off-by: Efimov Alexander <a.efimov@samsung.com>
contrib/nnc/passes/soft_backend/code_snippets/cpp_batchnorm.def [new file with mode: 0644]
contrib/nnc/passes/soft_backend/code_snippets/cpp_dropout.def [new file with mode: 0644]
contrib/nnc/passes/soft_backend/code_snippets/cpp_scale.def [new file with mode: 0644]
contrib/nnc/passes/soft_backend/cpp_generator.cpp
contrib/nnc/passes/soft_backend/serializer.cpp
contrib/nnc/unittests/soft_backend/cpp_operations.cpp

diff --git a/contrib/nnc/passes/soft_backend/code_snippets/cpp_batchnorm.def b/contrib/nnc/passes/soft_backend/code_snippets/cpp_batchnorm.def
new file mode 100644 (file)
index 0000000..66c60f3
--- /dev/null
@@ -0,0 +1,11 @@
+void batchNorm(Tensor &out, const char *params, const Tensor &in)
+{
+  out.reShape(in.getShape());
+  float eps = deserializeT<float>(params);
+  (float)eps;
+  float avgFraction = deserializeT<float>(params);
+  (float)avgFraction;
+  bool spatial = deserializeT<int32_t>(params);
+  (void)spatial;
+  out.fillData(in.getData());
+}
diff --git a/contrib/nnc/passes/soft_backend/code_snippets/cpp_dropout.def b/contrib/nnc/passes/soft_backend/code_snippets/cpp_dropout.def
new file mode 100644 (file)
index 0000000..9fcb51a
--- /dev/null
@@ -0,0 +1,7 @@
+void dropout(Tensor &out, const char *params, const Tensor &in)
+{
+  out.reShape(in.getShape());
+  float rate = deserializeT<float>(params);
+  (void)rate;
+  out.fillData(in.getData());
+}
diff --git a/contrib/nnc/passes/soft_backend/code_snippets/cpp_scale.def b/contrib/nnc/passes/soft_backend/code_snippets/cpp_scale.def
new file mode 100644 (file)
index 0000000..828c6d4
--- /dev/null
@@ -0,0 +1,25 @@
+void scale(Tensor &out, const char *params, const Tensor &in)
+{
+  out.reShape(in.getShape());
+  Kernel weights = deserializeKernel(params);
+  Shape inShape = in.getShape();
+  const int wSize = weights.dims.sizes[0];
+  assert(wSize == inShape[inShape.getDims() - 1]);
+  assert(weights.dims.sizes[1] == 1);
+  assert(weights.dims.sizes[2] == 1);
+  assert(weights.dims.sizes[3] == 1);
+
+  const float *wData = weights.data;
+  const float *inData = in.getData();
+  float *outData = out.getData();
+  int32_t dataSize = inShape.getNumElems();
+
+  assert(dataSize % wSize == 0);
+  for (int32_t sliceOffset = 0; sliceOffset < dataSize; sliceOffset += wSize)
+  {
+    for (int i = 0; i < wSize; i++)
+    {
+      outData[sliceOffset + i] = inData[sliceOffset + i] * wData[i];
+    }
+  }
+}
index 698a0bc..8ce0e4b 100644 (file)
@@ -23,6 +23,9 @@ using namespace nncc::contrib::core::IR::model;
 #include "cpp_pool.generated.h"
 #include "cpp_relu.generated.h"
 #include "cpp_softmax.generated.h"
+#include "cpp_scale.generated.h"
+#include "cpp_dropout.generated.h"
+#include "cpp_batchnorm.generated.h"
 
 namespace nncc
 {
@@ -251,6 +254,9 @@ void CPPCodeGenerator::materializeCode(ostream &out, const ModelAnalyzer &ma, co
   out.write(param_constants, sizeof(param_constants));
 
   out.write(cpp_operations, sizeof(cpp_operations));
+  out.write(cpp_scale, sizeof(cpp_scale));
+  out.write(cpp_dropout, sizeof(cpp_dropout));
+  out.write(cpp_batchnorm, sizeof(cpp_batchnorm));
 
   // gen NN constructor
   out << className << "::" << className << "(const string &parametersPath)\n"
index 425437f..278bf84 100644 (file)
@@ -236,7 +236,7 @@ void Serializer::visit(ADT::INode *node, ops::BatchNormOp &op)
   _curOp->_paramStartOffset = _buffer.size();
   serializeT<float>(op.getEps());
   serializeT<float>(op.getMovingAvgFraction());
-  serializeT<bool>(op.getSpatial());
+  serializeT<int32_t>(op.getSpatial());
 }
 
 void Serializer::visit(ADT::INode *node, ops::ScaleOp &op)
index 66d4ae2..e812863 100644 (file)
@@ -21,6 +21,7 @@
 #include "param_constants.def"
 #include "code_snippets/cpp_header_types.def"
 #include "code_snippets/cpp_operations.def"
+#include "code_snippets/cpp_scale.def"
 
 // soft backend part
 
@@ -39,6 +40,7 @@
 #include "core/modelIR/operations/concat_op.h"
 #include "core/modelIR/operations/bias_add_op.h"
 #include "core/modelIR/operations/softmax_op.h"
+#include "core/modelIR/operations/scale_op.h"
 
 // various headers
 #include "core/modelIR/TensorVariant.h"
@@ -265,6 +267,20 @@ TEST(cpp_operations_test, bias)
   createAndRunTestGraph(opGenerator, biasAdd, inputNTensors, aInputTensor);
 }
 
+TEST(cpp_operations_test, scale)
+{
+  vector<int> inputShapeData{2, 3, 4, 5};
+  data::Shape weightsShape{5};
+  vector<unique_ptr<TensorVariant>> inputNTensors(1);
+  Tensor aInputTensor;
+  fillTensors(inputNTensors[0], aInputTensor, inputShapeData, 1.0f);
+
+  TensorVariant weights = createNTensor(weightsShape, 1.0f);
+  auto opGenerator = [weights](Graph &g){return g.create<IR::model::ops::ScaleOp>("y", weights);};
+
+  createAndRunTestGraph(opGenerator, scale, inputNTensors, aInputTensor);
+}
+
 TEST(cpp_operations_test, capped_relu)
 {
   // test prerequisites