From: Efimov Alexander/AI Tools Lab/./Samsung Electronics Date: Fri, 7 Dec 2018 14:28:20 +0000 (+0300) Subject: [nnc] Add acl backend tests for ops in mobilenet and inception (#2570) X-Git-Tag: nncc_backup~1150 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=7b6a61fe121a3a68a1ca72f1a6fe97b40ac5485a;p=platform%2Fcore%2Fml%2Fnnfw.git [nnc] Add acl backend tests for ops in mobilenet and inception (#2570) Added tests for operations: - scale - bias - concat - reshape - dropout - conv2d - depthwise2d - softmax - pool - fc - relu Signed-off-by: Efimov Alexander --- diff --git a/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp b/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp index c6625bf..ec9661c 100644 --- a/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp +++ b/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp @@ -356,7 +356,12 @@ void AclCppOpGenerator::visit(ops::BiasAddOp& op) { } void AclCppOpGenerator::visit(ops::VariableOp& op) { - auto tensor = genTensor(op, transposeShape<2, 1, 3, 0>(op.getOutputShape(0))); + shared_ptr tensor; + if (op.getOutputShape(0).rank() == 2) { + tensor = genTensor(op, transposeShape<1, 0>(op.getOutputShape(0))); + } else { + tensor = genTensor(op, transposeShape<2, 1, 3, 0>(op.getOutputShape(0))); + } allocate(tensor); } diff --git a/contrib/nnc/unittests/acl_backend/MIRToDOM.cpp b/contrib/nnc/unittests/acl_backend/MIRToDOM.cpp index a4ee6bb..bb6e018 100644 --- a/contrib/nnc/unittests/acl_backend/MIRToDOM.cpp +++ b/contrib/nnc/unittests/acl_backend/MIRToDOM.cpp @@ -63,20 +63,20 @@ const char* artifactName = "nnmodel"; /** * @brief Creates graph with one operation generated by opGen function and returns this operation node * @param g reference to graph which should be filled with operations - * @param opConstr functor which creates main operations of graph - * @param inputShapes vector of network input shapes + * @param op_constr functor which creates main operations of graph + * @param input_shapes vector of network input shapes * */ -void fillGraph(Graph& g, OpConstructor opConstr, const vector& inputShapes) { +void fillGraph(Graph& g, OpConstructor op_constr, const vector& input_shapes) { // Create inputs vector inputs; - int numInputs = inputShapes.size(); - for (int i = 0; i < numInputs; ++i) { - auto inputOp = g.create("x" + to_string(i), inputShapes[i]); + int num_inputs = input_shapes.size(); + for (int i = 0; i < num_inputs; ++i) { + auto inputOp = g.create("x" + to_string(i), input_shapes[i]); inputs.push_back(inputOp->getOutput(0)); } // Create operation - Operation* op = opConstr(g, inputs); + Operation* op = op_constr(g, inputs); // Mark outputs g.markOutput(op); @@ -93,7 +93,7 @@ void checkDomIncludes(const ArtifactModule& m) { /** * @brief Check that artifact DOM contains appropriate getters * @param c Main artifact class - * @param names List of values accessible via getters + * @param tensors List of values accessible via getters */ void checkDomArtifactGetters(const ArtifactClass& c, const vector& tensors) { // TODO @@ -102,7 +102,7 @@ void checkDomArtifactGetters(const ArtifactClass& c, const vector& tenso /** * @brief Check that artifact class constructor initializes all layers * @param c Main artifact class - * @param layers List of NN layers + * @param tensors List of NN layers */ void checkDomArtifactConstructor(const ArtifactClass& c, const vector& tensors) { // TODO @@ -120,6 +120,8 @@ void checkDomInference(const ArtifactFunction& f, const vector& layers) /** * @brief Check that artifact DOM contains appropriate class * @param m Root module of DOM + * @param layers Names of NN layers in inference sequence + * @param tensors Names of tensors in artifact */ void checkArtifactClass(const ArtifactClass& c, const vector& layers, @@ -140,6 +142,8 @@ void checkArtifactClass(const ArtifactClass& c, /** * @brief Root of check functions * @param m Main artifact module + * @param layers Names of NN layers in inference sequence + * @param tensors Names of tensors in artifact */ void checkDomStructure(const ArtifactModule& m, const vector& layers, @@ -152,22 +156,27 @@ void checkDomStructure(const ArtifactModule& m, checkArtifactClass(*cls, layers, tensors); } +TensorVariant createTensorVariant(const Shape& shape) { + size_t data_size = shape.numElements(); + shared_ptr data(new float[data_size], default_delete()); + return TensorVariant(shape, data, DTYPE::FLOAT32); +} + } // Actual tests TEST(acl_backend_mir_to_dom, bias) { - const int channels = 2; - shared_ptr data(new float[channels], default_delete()); - TensorVariant w({channels}, data, DTYPE::FLOAT32); + const int32_t channels = 2; + TensorVariant w = createTensorVariant({channels}); Graph g; - OpConstructor opGenerator = [&w](Graph& g, const vector& inputs) { + OpConstructor op_generator = [&w](Graph& g, const vector& inputs) { return g.create("bias", inputs[0], w); }; - vector inputShapes{{1, 10, 10, channels}}; + vector input_shapes{{1, 10, 10, channels}}; - fillGraph(g, opGenerator, inputShapes); + fillGraph(g, op_generator, input_shapes); stringstream params_out; AclCppOpGenerator dom_gen(artifactName, params_out); @@ -177,16 +186,51 @@ TEST(acl_backend_mir_to_dom, bias) { checkDomStructure(m, {}, {}); } -TEST(acl_backend_mir_to_dom, DISABLED_scale) { - // TODO +TEST(acl_backend_mir_to_dom, scale) { + const int32_t channels = 2; + TensorVariant w = createTensorVariant({channels}); + + Graph g; + OpConstructor op_generator = [&w](Graph& g, const vector& inputs) { + return g.create("scale", inputs[0], w); + }; + vector input_shapes{{1, 10, 10, channels}}; + + fillGraph(g, op_generator, input_shapes); + + stringstream params_out; + AclCppOpGenerator dom_gen(artifactName, params_out); + + const ArtifactModule& m = dom_gen.generate(&g); + + checkDomStructure(m, {}, {}); + + stringstream code_out; + ArtifactGeneratorCppCode code_gen(code_out); } TEST(acl_backend_mir_to_dom, DISABLED_capped_relu) { // TODO } -TEST(acl_backend_mir_to_dom, DISABLED_concat) { - // TODO +TEST(acl_backend_mir_to_dom, concat) { + Graph g; + OpConstructor op_generator = [](Graph& g, const vector& inputs) { + return g.create("concat", inputs, 3); + }; + vector input_shapes{{2, 3, 5, 1}, {2, 3, 5, 3}}; + + fillGraph(g, op_generator, input_shapes); + + stringstream params_out; + AclCppOpGenerator dom_gen(artifactName, params_out); + + const ArtifactModule& m = dom_gen.generate(&g); + + checkDomStructure(m, {}, {}); + + stringstream code_out; + ArtifactGeneratorCppCode code_gen(code_out); } TEST(acl_backend_mir_to_dom, DISABLED_add) { @@ -205,28 +249,142 @@ TEST(acl_backend_mir_to_dom, DISABLED_conv_transposed2d) { // TODO } -TEST(acl_backend_mir_to_dom, DISABLED_conv2d) { - // TODO +TEST(acl_backend_mir_to_dom, conv2d) { + const int32_t channels = 3; + mir::Shape kernel_shape{3, 3, channels, 1}; // Height, Width, input Channels, output Channel + mir::Shape strides{1, 1}; + mir::TensorVariant kernel = createTensorVariant(kernel_shape); + + Graph g; + OpConstructor op_generator = + [kernel, strides](mir::Graph& g, + const std::vector& inputs) { + std::vector padding{0, 0}; + return g.create("conv2d", inputs[0], kernel, strides, padding, padding); + }; + + vector input_shapes{{1, 10, 10, channels}}; + + fillGraph(g, op_generator, input_shapes); + + stringstream params_out; + AclCppOpGenerator dom_gen(artifactName, params_out); + + const ArtifactModule& m = dom_gen.generate(&g); + + checkDomStructure(m, {}, {}); + + stringstream code_out; + ArtifactGeneratorCppCode code_gen(code_out); } -TEST(acl_backend_mir_to_dom, DISABLED_depthwise_conv) { - // TODO +TEST(acl_backend_mir_to_dom, depthwise_conv) { + const int32_t channels = 3; + mir::Shape kernel_shape{3, 3, channels, 1}; // Height, Width, Channels, Channel multiplier + mir::Shape strides{1, 1}; + mir::TensorVariant kernel = createTensorVariant(kernel_shape); + + Graph g; + OpConstructor op_generator = + [kernel, strides](mir::Graph& g, + const std::vector& inputs) { + std::vector padding{0, 0}; + return g.create("depthwiseConv2d", + inputs[0], kernel, + strides, padding, padding); + }; + + vector input_shapes{{1, 10, 10, channels}}; + + fillGraph(g, op_generator, input_shapes); + + stringstream params_out; + AclCppOpGenerator dom_gen(artifactName, params_out); + + const ArtifactModule& m = dom_gen.generate(&g); + + checkDomStructure(m, {}, {}); + + stringstream code_out; + ArtifactGeneratorCppCode code_gen(code_out); } -TEST(acl_backend_mir_to_dom, DISABLED_fully_connected) { - // TODO +TEST(acl_backend_mir_to_dom, fully_connected) { + const int32_t in_size = 13; + const int32_t out_size = 7; + Shape input_shape_data{1, in_size}; + Shape weights_shape{in_size, out_size}; + TensorVariant weights = createTensorVariant(weights_shape); + + Graph g; + OpConstructor opGenerator = [weights](Graph& g, const vector& inputs) { + return g.create("fc", inputs[0], weights); + }; + + fillGraph(g, opGenerator, {input_shape_data}); + + stringstream params_out; + AclCppOpGenerator dom_gen(artifactName, params_out); + + const ArtifactModule& m = dom_gen.generate(&g); + + checkDomStructure(m, {}, {}); + + stringstream code_out; + ArtifactGeneratorCppCode code_gen(code_out); } -TEST(acl_backend_mir_to_dom, DISABLED_maxpool) { - // TODO +TEST(acl_backend_mir_to_dom, maxpool) { + mir::Shape window_shape{3, 3}; // Height, Width + mir::Shape strides{1, 1}; + + Graph g; + OpConstructor op_generator = [window_shape, strides](mir::Graph& g, + const std::vector& inputs) { + std::vector padding{0, 0}; + return g.create("maxPool", inputs[0], ops::PoolOp::PoolingType::MAX, + window_shape, strides, padding, padding, + mir::ops::PoolOp::BorderType::EMPTY, + ops::PoolOp::RoundMode::floor); + }; + + vector input_shapes{{1, 10, 10, 3}}; + + fillGraph(g, op_generator, input_shapes); + + stringstream params_out; + AclCppOpGenerator dom_gen(artifactName, params_out); + + const ArtifactModule& m = dom_gen.generate(&g); + + checkDomStructure(m, {}, {}); + + stringstream code_out; + ArtifactGeneratorCppCode code_gen(code_out); } TEST(acl_backend_mir_to_dom, DISABLED_avgpool) { // TODO } -TEST(acl_backend_mir_to_dom, DISABLED_relu) { - // TODO +TEST(acl_backend_mir_to_dom, relu) { + Graph g; + OpConstructor op_generator = [](Graph& g, const std::vector& inputs) { + return g.create("relu", inputs[0]); + }; + vector input_shapes{{1, 10, 10, 3}}; + + fillGraph(g, op_generator, input_shapes); + + stringstream params_out; + AclCppOpGenerator dom_gen(artifactName, params_out); + + const ArtifactModule& m = dom_gen.generate(&g); + + checkDomStructure(m, {}, {}); + + stringstream code_out; + ArtifactGeneratorCppCode code_gen(code_out); } TEST(acl_backend_mir_to_dom, DISABLED_elu) { @@ -241,12 +399,51 @@ TEST(acl_backend_mir_to_dom, DISABLED_reduce_mean) { // TODO } -TEST(acl_backend_mir_to_dom, DISABLED_softmax) { - // TODO +TEST(acl_backend_mir_to_dom, softmax) { + Graph g; + OpConstructor op_generator = [](Graph& g, const vector& inputs) { + return g.create("softmax", inputs[0], 3); + }; + vector input_shapes{{1, 1, 1, 3}}; + + fillGraph(g, op_generator, input_shapes); + + stringstream params_out; + AclCppOpGenerator dom_gen(artifactName, params_out); + + const ArtifactModule& m = dom_gen.generate(&g); + + checkDomStructure(m, {}, {}); + + stringstream code_out; + ArtifactGeneratorCppCode code_gen(code_out); } -TEST(acl_backend_mir_to_dom, DISABLED_reshape) { - // TODO +TEST(acl_backend_mir_to_dom, reshape) { + Graph g; + + const int32_t h = 10; + const int32_t w = 10; + const int32_t c = 3; + + Shape input_shape{1, h, w, c}; + Shape output_shape{1, h * w * c}; + + OpConstructor op_generator = [output_shape](Graph& g, const vector& inputs) { + return g.create("reshape", inputs[0], output_shape); + }; + + fillGraph(g, op_generator, {input_shape}); + + stringstream params_out; + AclCppOpGenerator dom_gen(artifactName, params_out); + + const ArtifactModule& m = dom_gen.generate(&g); + + checkDomStructure(m, {}, {}); + + stringstream code_out; + ArtifactGeneratorCppCode code_gen(code_out); } TEST(acl_backend_mir_to_dom, DISABLED_pad) {