[nnc] Support constant in acl backend (#2723)
authorEfimov Alexander/AI Tools Lab/./Samsung Electronics <a.efimov@samsung.com>
Mon, 24 Dec 2018 10:42:32 +0000 (13:42 +0300)
committerGitHub Enterprise <noreply-CODE@samsung.com>
Mon, 24 Dec 2018 10:42:32 +0000 (13:42 +0300)
- Support generation of constant operation in acl backend
- Add test for constant operation

Signed-off-by: Efimov Alexander <a.efimov@samsung.com>
contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp
contrib/nnc/unittests/acl_backend/MIRToDOM.cpp

index 62a2011..ea381f3 100644 (file)
@@ -389,8 +389,29 @@ void AclCppOpGenerator::visit(ops::VariableOp& op) {
 }
 
 void AclCppOpGenerator::visit(ops::ConstantOp& op) {
-  // TODO: NIY
-  assert(false);
+  Shape out_shape = op.getOutputShape(0);
+  TensorVariant data = op.getValue();
+
+  Shape transposed_shape;
+  // FIXME This is temporary solution,
+  // need to move this shape transposes into genTensor function and
+  // implement transpose operation to support ranks greater than 2
+  switch (out_shape.rank()) {
+    case 2:
+      transposed_shape = transposeShape<1, 0>(out_shape);
+      break;
+    case 1:
+      transposed_shape = out_shape;
+      break;
+    default:
+      throw AclCppException("Unsupported number of dimensions: " + to_string(out_shape.rank()));
+  }
+
+  shared_ptr<ArtifactId> out = genTensor(op, transposed_shape);
+
+  allocate(out);
+  // Serialize the weights tensor and generate the function to deserialize it in the artifact.
+  serializeTensor(out, data);
 }
 
 void AclCppOpGenerator::visit(ops::ReluOp& op) {
index f983d0d..2362b1d 100644 (file)
@@ -37,6 +37,7 @@
 #include "core/modelIR/operations/ReluOp.h"
 #include "core/modelIR/operations/ReduceFOp.h"
 #include "core/modelIR/operations/CappedReluOp.h"
+#include "core/modelIR/operations/ConstantOp.h"
 #include "core/modelIR/operations/ReshapeOp.h"
 #include "core/modelIR/operations/ConcatOp.h"
 #include "core/modelIR/operations/BiasAddOp.h"
@@ -160,14 +161,39 @@ void checkDomStructure(const ArtifactModule& m,
 
 TensorVariant createTensorVariant(const Shape& shape) {
   size_t data_size = shape.numElements();
-  shared_ptr<float> data(new float[data_size], default_delete<float[]>());
-  return TensorVariant(shape, data, DTYPE::FLOAT32);
+  float* data = new float[data_size];
+  for (int32_t i = 0; i < shape.numElements(); ++i)
+    data[i] = i;
+
+  shared_ptr<float> ptr(data, default_delete<float[]>());
+  return TensorVariant(shape, ptr, DTYPE::FLOAT32);
 }
 
 }
 
+#include <fstream>
+
 // Actual tests
 
+TEST(acl_backend_mir_to_dom, constant) {
+  Shape shape{3, 4};
+  TensorVariant constant_data = createTensorVariant(shape);
+
+  Graph g;
+  OpConstructor op_generator = [&constant_data](Graph& g, const vector<IODescriptor>& inputs) {
+    return g.create<mir::ops::ConstantOp>("data", constant_data);
+  };
+
+  fillGraph(g, op_generator, {});
+
+  stringstream params_out;
+  AclCppOpGenerator dom_gen(artifactName, params_out);
+
+  const ArtifactModule& m = dom_gen.generate(&g);
+
+  checkDomStructure(m, {}, {});
+}
+
 TEST(acl_backend_mir_to_dom, bias) {
   const int32_t channels = 2;
   TensorVariant w = createTensorVariant({channels});