From 9c5fce82ba68d84b7d8bdc6a162fe9cffe757714 Mon Sep 17 00:00:00 2001 From: "Efimov Alexander/AI Tools Lab/./Samsung Electronics" Date: Mon, 24 Dec 2018 13:42:32 +0300 Subject: [PATCH] [nnc] Support constant in acl backend (#2723) - Support generation of constant operation in acl backend - Add test for constant operation Signed-off-by: Efimov Alexander --- .../passes/acl_soft_backend/AclCppOpGenerator.cpp | 25 ++++++++++++++++-- contrib/nnc/unittests/acl_backend/MIRToDOM.cpp | 30 ++++++++++++++++++++-- 2 files changed, 51 insertions(+), 4 deletions(-) diff --git a/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp b/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp index 62a2011..ea381f3 100644 --- a/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp +++ b/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp @@ -389,8 +389,29 @@ void AclCppOpGenerator::visit(ops::VariableOp& op) { } void AclCppOpGenerator::visit(ops::ConstantOp& op) { - // TODO: NIY - assert(false); + Shape out_shape = op.getOutputShape(0); + TensorVariant data = op.getValue(); + + Shape transposed_shape; + // FIXME This is temporary solution, + // need to move this shape transposes into genTensor function and + // implement transpose operation to support ranks greater than 2 + switch (out_shape.rank()) { + case 2: + transposed_shape = transposeShape<1, 0>(out_shape); + break; + case 1: + transposed_shape = out_shape; + break; + default: + throw AclCppException("Unsupported number of dimensions: " + to_string(out_shape.rank())); + } + + shared_ptr out = genTensor(op, transposed_shape); + + allocate(out); + // Serialize the weights tensor and generate the function to deserialize it in the artifact. + serializeTensor(out, data); } void AclCppOpGenerator::visit(ops::ReluOp& op) { diff --git a/contrib/nnc/unittests/acl_backend/MIRToDOM.cpp b/contrib/nnc/unittests/acl_backend/MIRToDOM.cpp index f983d0d..2362b1d 100644 --- a/contrib/nnc/unittests/acl_backend/MIRToDOM.cpp +++ b/contrib/nnc/unittests/acl_backend/MIRToDOM.cpp @@ -37,6 +37,7 @@ #include "core/modelIR/operations/ReluOp.h" #include "core/modelIR/operations/ReduceFOp.h" #include "core/modelIR/operations/CappedReluOp.h" +#include "core/modelIR/operations/ConstantOp.h" #include "core/modelIR/operations/ReshapeOp.h" #include "core/modelIR/operations/ConcatOp.h" #include "core/modelIR/operations/BiasAddOp.h" @@ -160,14 +161,39 @@ void checkDomStructure(const ArtifactModule& m, TensorVariant createTensorVariant(const Shape& shape) { size_t data_size = shape.numElements(); - shared_ptr data(new float[data_size], default_delete()); - return TensorVariant(shape, data, DTYPE::FLOAT32); + float* data = new float[data_size]; + for (int32_t i = 0; i < shape.numElements(); ++i) + data[i] = i; + + shared_ptr ptr(data, default_delete()); + return TensorVariant(shape, ptr, DTYPE::FLOAT32); } } +#include + // Actual tests +TEST(acl_backend_mir_to_dom, constant) { + Shape shape{3, 4}; + TensorVariant constant_data = createTensorVariant(shape); + + Graph g; + OpConstructor op_generator = [&constant_data](Graph& g, const vector& inputs) { + return g.create("data", constant_data); + }; + + fillGraph(g, op_generator, {}); + + stringstream params_out; + AclCppOpGenerator dom_gen(artifactName, params_out); + + const ArtifactModule& m = dom_gen.generate(&g); + + checkDomStructure(m, {}, {}); +} + TEST(acl_backend_mir_to_dom, bias) { const int32_t channels = 2; TensorVariant w = createTensorVariant({channels}); -- 2.7.4