From 3512701627cbf56da35f80a11c3938cd83295239 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=D0=A1=D0=B5=D1=80=D0=B3=D0=B5=D0=B9=20=D0=91=D0=B0=D1=80?= =?utf8?q?=D0=B0=D0=BD=D0=BD=D0=B8=D0=BA=D0=BE=D0=B2/AI=20Tools=20Lab=20/S?= =?utf8?q?RR/Engineer/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Fri, 11 Jan 2019 20:28:51 +0300 Subject: [PATCH] [nnc] Remove the debugTranspose option (#2813) Remove the `debugTranspose` option which is no longer needed. Signed-off-by: Sergei Barannikov --- contrib/nnc/driver/Options.cpp | 7 -- contrib/nnc/include/option/Options.h | 5 -- .../passes/acl_soft_backend/AclCppOpGenerator.cpp | 85 +++++----------------- .../nnc/passes/caffe_frontend/caffe_importer.cpp | 4 +- .../nnc/passes/caffe_frontend/caffe_op_creator.cpp | 82 ++++++++------------- .../nnc/passes/caffe_frontend/caffe_op_creator.h | 2 - 6 files changed, 49 insertions(+), 136 deletions(-) diff --git a/contrib/nnc/driver/Options.cpp b/contrib/nnc/driver/Options.cpp index 889ca81..1621d74 100644 --- a/contrib/nnc/driver/Options.cpp +++ b/contrib/nnc/driver/Options.cpp @@ -169,13 +169,6 @@ Option interInputData(optname("--input-model-data"), optional(true), optvalues(""), checkInFile); -/** - * Miscellaneous options. - */ -Option debugTranspose(optname("--debug-transpose"), - overview("insert transpose operations for debugging purposes"), - false, - optional(true)); } // namespace cli } // namespace nnc diff --git a/contrib/nnc/include/option/Options.h b/contrib/nnc/include/option/Options.h index 670afc8..1e25eb2 100644 --- a/contrib/nnc/include/option/Options.h +++ b/contrib/nnc/include/option/Options.h @@ -59,11 +59,6 @@ extern Option artifactName; // name of artifact */ extern Option interInputData; // input data for model -/** - * Miscellaneous options. - */ -extern Option debugTranspose; - } // namespace cli } // namespace nnc diff --git a/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp b/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp index f2119e9..9a3a774 100644 --- a/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp +++ b/contrib/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp @@ -103,24 +103,15 @@ const ArtifactModule& AclCppOpGenerator::generate(mir::Graph* g) { } void AclCppOpGenerator::visit(ops::ConcatOp& op) { + static const char* axis_names[] = {"arm_compute::DataLayoutDimension::BATCHES", + "arm_compute::DataLayoutDimension::CHANNEL", + "arm_compute::DataLayoutDimension::HEIGHT", + "arm_compute::DataLayoutDimension::WIDTH"}; int axis = op.getAxis(); - assert(axis < 4 && axis >= 0 && "axis outside this range is not supported in ACL"); - - const char* axis_name; - if (cli::debugTranspose) { - static const char* axis_names[] = {"arm_compute::DataLayoutDimension::BATCHES", - "arm_compute::DataLayoutDimension::CHANNEL", - "arm_compute::DataLayoutDimension::HEIGHT", - "arm_compute::DataLayoutDimension::WIDTH"}; - axis_name = axis_names[axis]; - } else { - static const char* axis_names[] = {"arm_compute::DataLayoutDimension::BATCHES", - "arm_compute::DataLayoutDimension::HEIGHT", - "arm_compute::DataLayoutDimension::WIDTH", - "arm_compute::DataLayoutDimension::CHANNEL"}; - axis_name = axis_names[axis]; - } + assert(axis >= 0 && axis < sizeof(axis_names) / sizeof(axis_names[0]) && + "axis outside this range is not supported in ACL"); + const char* axis_name = axis_names[axis]; auto out = genTensor(op, op.getOutputShape(0)); auto prefix = out->name() + "_concatenate_layer"; @@ -255,17 +246,8 @@ shared_ptr AclCppOpGenerator::genTransposeMIRtoACL(const string& name, const Shape& input_shape, const shared_ptr& input) { - - if (!cli::debugTranspose) { - // Generate output tensor description in the DOM. - shared_ptr output = AF::id(name); - - _constrBlock->var("arm_compute::CLTensor&", output->name(), {}, {input}); - return output; - } Shape transposed_shape = transposeShape<0, 3, 1, 2>(input_shape); - shared_ptr transposed_id = - genTensor(name, transposed_shape, false); + shared_ptr transposed_id = genTensor(name, transposed_shape, false); const bool allocate_at_inference = true; genTranspose(input, transposed_id, {0, 3, 1, 2}, allocate_at_inference); return transposed_id; @@ -275,18 +257,8 @@ shared_ptr AclCppOpGenerator::genTransposeACLtoMIR(const string& name, const Shape& input_shape, const shared_ptr& input) { - - if (!cli::debugTranspose) { - // Generate output tensor description in the DOM. - shared_ptr output = AF::id(name); - - _constrBlock->var("arm_compute::CLTensor&", output->name(), {}, {input}); - return output; - } Shape transposed_shape = transposeShape<0, 2, 3, 1>(input_shape); - shared_ptr transposed_id = - genTensor(name, transposed_shape, false); - + shared_ptr transposed_id = genTensor(name, transposed_shape, false); const bool allocate_at_inference = false; genTranspose(input, transposed_id, {0, 2, 3, 1}, allocate_at_inference); return transposed_id; @@ -355,10 +327,8 @@ void AclCppOpGenerator::visit(ops::PoolOp& op) { shared_ptr output = genTransposeACLtoMIR(output_tensor_name, transposed_output_shape, transposed_output); - if (cli::debugTranspose) { - genTensorDeallocation(_infBlock, transposed_input); - genTensorDeallocation(_infBlock, transposed_output); - } + genTensorDeallocation(_infBlock, transposed_input); + genTensorDeallocation(_infBlock, transposed_output); if (op.getNextNodes().empty()) _outputs.insert(&op); @@ -474,10 +444,8 @@ void AclCppOpGenerator::visit(ops::BiasAddOp& op) { shared_ptr output = genTransposeACLtoMIR(output_tensor_name, transposed_output_shape, transposed_output); - if (cli::debugTranspose) { - genTensorDeallocation(_infBlock, transposed_input); - genTensorDeallocation(_infBlock, transposed_output); - } + genTensorDeallocation(_infBlock, transposed_input); + genTensorDeallocation(_infBlock, transposed_output); } if (op.getNextNodes().empty()) @@ -486,14 +454,7 @@ void AclCppOpGenerator::visit(ops::BiasAddOp& op) { void AclCppOpGenerator::visit(ops::VariableOp& op) { shared_ptr tensor; - if (cli::debugTranspose) { - tensor = genTensor(op, op.getOutputShape(0)); - } else { - if (op.getOutputShape(0).rank() == 4) - tensor = genTensor(op, transposeShape<0, 3, 1, 2>(op.getOutputShape(0))); - else - tensor = genTensor(op, op.getOutputShape(0)); - } + tensor = genTensor(op, op.getOutputShape(0)); addToPersistentTensors(tensor); } @@ -627,10 +588,8 @@ void AclCppOpGenerator::visit(ops::ScaleOp& op) { shared_ptr output = genTransposeACLtoMIR(output_tensor_name, transposed_output_shape, transposed_output); - if (cli::debugTranspose) { - genTensorDeallocation(_infBlock, transposed_input); - genTensorDeallocation(_infBlock, transposed_output); - } + genTensorDeallocation(_infBlock, transposed_input); + genTensorDeallocation(_infBlock, transposed_output); if (op.getNextNodes().empty()) _outputs.insert(&op); @@ -773,10 +732,8 @@ void AclCppOpGenerator::genConvolution(Op& op, const string& acl_func_name, cons shared_ptr output = genTransposeACLtoMIR(output_tensor_name, transposed_output_shape, transposed_output); - if (cli::debugTranspose) { - genTensorDeallocation(_infBlock, transposed_input); - genTensorDeallocation(_infBlock, transposed_output); - } + genTensorDeallocation(_infBlock, transposed_input); + genTensorDeallocation(_infBlock, transposed_output); if (op.getNextNodes().empty()) _outputs.insert(&op); @@ -792,11 +749,7 @@ void AclCppOpGenerator::genActivation(mir::Operation& op, const std::string& act auto in = AF::id(tensorName(in_op)); // Create the output tensor in the DOM and return its id. - shared_ptr output; - if (cli::debugTranspose) - output = genTensor(op, op.getOutputShape(0)); - else - output = genTensor(op, transposeShape<0, 3, 1, 2>(op.getOutputShape(0))); + shared_ptr output = genTensor(op, op.getOutputShape(0)); auto prefix = output->name() + "_activation_layer"; diff --git a/contrib/nnc/passes/caffe_frontend/caffe_importer.cpp b/contrib/nnc/passes/caffe_frontend/caffe_importer.cpp index 5258c0c..d285aba 100644 --- a/contrib/nnc/passes/caffe_frontend/caffe_importer.cpp +++ b/contrib/nnc/passes/caffe_frontend/caffe_importer.cpp @@ -176,6 +176,7 @@ void CaffeImporter::collectUnsupportedOp(const LayerParameter& lp) { case CaffeOpType::split: case CaffeOpType::eltwise: case CaffeOpType::ELU: + case CaffeOpType::ReLU: case CaffeOpType::embed: case CaffeOpType::sigmoid: case CaffeOpType::tanh: @@ -192,9 +193,6 @@ void CaffeImporter::collectUnsupportedOp(const LayerParameter& lp) { case CaffeOpType::reshape: _opCreator->checkReshape(lp.reshape_param(), _problemsOpSet); break; - case CaffeOpType::ReLU: - _opCreator->checkReLU(lp.relu_param(), _problemsOpSet); - break; case CaffeOpType::batchNorm: _opCreator->checkBatchNorm(lp, _problemsOpSet); break; diff --git a/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp b/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp index 0effb9f..f9fcd09 100644 --- a/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp +++ b/contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp @@ -58,23 +58,15 @@ using namespace mir; using namespace ::caffe; mir::IODescriptor CaffeOpCreator::convertCaffeToMIR(const mir::IODescriptor& arg) { - if (cli::debugTranspose) { - // NCHW -> NHWC - auto transpose = createOp("", arg, std::vector{0, 2, 3, 1}); - return transpose->getOutput(0); - } else { - return arg; - } + // NCHW -> NHWC + auto transpose = createOp("", arg, std::vector{0, 2, 3, 1}); + return transpose->getOutput(0); } mir::IODescriptor CaffeOpCreator::convertMIRToCaffe(const mir::IODescriptor& arg) { - if (cli::debugTranspose) { - // NHWC -> NCHW - auto transpose = createOp("", arg, std::vector{0, 3, 1, 2}); - return transpose->getOutput(0); - } else { - return arg; - } + // NHWC -> NCHW + auto transpose = createOp("", arg, std::vector{0, 3, 1, 2}); + return transpose->getOutput(0); } mir::IODescriptor CaffeOpCreator::createAdd(mir::IODescriptor arg1, mir::IODescriptor arg2) { @@ -341,16 +333,9 @@ CaffeOpCreator::convertInnerProduct(const LayerParameter& layer, std::vector CaffeOpCreator::convertConcat(const caffe::LayerParameter& layer, const std::vector& inputs) { - auto& opts = layer.concat_param(); - if (cli::debugTranspose) { - auto concat = createOp(layer.name(), inputs, opts.axis()); - return {concat->getOutput(0)}; - } else { - assert(opts.axis() == 1); - int32_t axis = 3; - auto concat = createOp(layer.name(), inputs, axis); - return {concat->getOutput(0)}; - } + const auto& params = layer.concat_param(); + auto concat = createOp(layer.name(), inputs, params.axis()); + return {concat->getOutput(0)}; } static ops::PoolOp::PoolingType getPoolingType(const PoolingParameter& pool_param) { @@ -442,33 +427,25 @@ CaffeOpCreator::convertPooling(const caffe::LayerParameter& layer, std::vector CaffeOpCreator::convertSoftmax(const caffe::LayerParameter& layer, const std::vector& inputs) { - auto& opts = layer.softmax_param(); - - if (cli::debugTranspose) { - // CPP and ACL backends are able to perform Softmax only along the last axis. - if (inputs[0].op->getOutputShape(inputs[0].index).rank() == 4) { - // For now, we only account for the most common case. - if (opts.axis() != 1) - throw PassException("Softmax: unsupported axis"); - int32_t axis = 3; - auto input = createOp(layer.name() + ".trans1", inputs[0], - std::vector{0, 2, 3, 1}); - auto softmax = createOp(layer.name(), input->getOutput(0), axis); - auto result = createOp(layer.name() + ".trans2", softmax->getOutput(0), - std::vector{0, 3, 1, 2}); - return {result->getOutput(0)}; - } + const auto& params = layer.softmax_param(); - auto softmax = createOp(layer.name(), inputs[0], opts.axis()); - return {softmax->getOutput(0)}; - } else { - auto& input = inputs[0]; - auto& input_shape = input.op->getOutputShape(input.index); - if (opts.axis() != 1) + // CPP and ACL backends are able to perform Softmax only along the last axis. + // FIXME Do it in backends. + if (inputs[0].op->getOutputShape(inputs[0].index).rank() == 4) { + // For now, we only account for the most common case. + if (params.axis() != 1) throw PassException("Softmax: unsupported axis"); - auto softmax = createOp(layer.name(), inputs[0], -1); - return {softmax->getOutput(0)}; + int32_t axis = 3; + auto input = createOp(layer.name() + ".trans1", inputs[0], + std::vector{0, 2, 3, 1}); + auto softmax = createOp(layer.name(), input->getOutput(0), axis); + auto result = createOp(layer.name() + ".trans2", softmax->getOutput(0), + std::vector{0, 3, 1, 2}); + return {result->getOutput(0)}; } + + auto softmax = createOp(layer.name(), inputs[0], params.axis()); + return {softmax->getOutput(0)}; } void CaffeOpCreator::checkReshape(const ReshapeParameter& opts, @@ -501,10 +478,6 @@ CaffeOpCreator::convertReshape(const caffe::LayerParameter& layer, return {reshape->getOutput(0)}; } -void CaffeOpCreator::checkReLU(const ReLUParameter& opts, - std::set& problems_op_set) { -} - std::vector CaffeOpCreator::convertReLU(const caffe::LayerParameter& layer, const std::vector& inputs) { @@ -516,7 +489,6 @@ CaffeOpCreator::convertReLU(const caffe::LayerParameter& layer, relu = createOp(layer.name(), inputs[0]); } - return {relu->getOutput(0)}; } @@ -533,6 +505,10 @@ CaffeOpCreator::convertScale(const caffe::LayerParameter& layer, result = createOp(layer.name() + ".bias", result->getOutput(0), bias_weights); } + // FIXME Workaround until the tests for style transfer network are regenerated. + if (layer.top(0) == "output") + return {result->getOutput(0)}; + return {convertMIRToCaffe(result->getOutput(0))}; } diff --git a/contrib/nnc/passes/caffe_frontend/caffe_op_creator.h b/contrib/nnc/passes/caffe_frontend/caffe_op_creator.h index 4f02a74..b18bfd1 100644 --- a/contrib/nnc/passes/caffe_frontend/caffe_op_creator.h +++ b/contrib/nnc/passes/caffe_frontend/caffe_op_creator.h @@ -118,8 +118,6 @@ public: void checkReshape(const caffe::ReshapeParameter& opts, std::set&); - void checkReLU(const caffe::ReLUParameter& opts, std::set&); - void checkBatchNorm(const caffe::LayerParameter& layer, std::set&); void checkLSTM(const caffe::LayerParameter& layer, std::set&); -- 2.7.4