From: Сергей Баранников/AI Tools Lab /SRR/Engineer/삼성전자 Date: Fri, 23 Aug 2019 10:59:52 +0000 (+0900) Subject: [nnc] Use tensor names in backends (#6865) X-Git-Tag: accepted/tizen/unified/20190903.052428~153 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=6322d0b20d45c3559da96a00e5c6e6907e315424;p=platform%2Fcore%2Fml%2Fnnfw.git [nnc] Use tensor names in backends (#6865) Switch from operation names to tensor names. Signed-off-by: Sergei Barannikov --- diff --git a/compiler/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp b/compiler/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp index 18d0581..9e1879e 100644 --- a/compiler/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp +++ b/compiler/nnc/passes/acl_soft_backend/AclCppOpGenerator.cpp @@ -694,19 +694,17 @@ shared_ptr AclCppOpGenerator::genMultiplication(const string &prefix string AclCppOpGenerator::tensorName(const Operation::Output *ir_tensor) const { - string tensor_name; + string tensor_name = ir_tensor->getName(); - // TODO Use the tensor name instead of the operation name. - const auto &op_name = ir_tensor->getNode()->getName(); - - if (!op_name.empty()) + if (!tensor_name.empty()) { - tensor_name = "_" + op_name; + tensor_name = "_" + tensor_name; replace_if(tensor_name.begin(), tensor_name.end(), [](char c) { return std::isalnum(c) == 0; }, '_'); } else { + assert(ir_tensor->getNode()->getNumOutputs() == 1); tensor_name = "tensor_" + to_string(ir_tensor->getNode()->getId()); } @@ -761,8 +759,7 @@ shared_ptr AclCppOpGenerator::genTensor(const string &name, const Sh shared_ptr AclCppOpGenerator::genTensor(const Operation::Output *ir_tensor) { - return genTensor(tensorName(ir_tensor), ir_tensor->getShape(), - !ir_tensor->getNode()->getName().empty()); + return genTensor(tensorName(ir_tensor), ir_tensor->getShape(), !ir_tensor->getName().empty()); } void AclCppOpGenerator::genNamed(Graph *graph) diff --git a/compiler/nnc/passes/interpreter/Interpreter.cpp b/compiler/nnc/passes/interpreter/Interpreter.cpp index 44e912a..f12f0e7 100644 --- a/compiler/nnc/passes/interpreter/Interpreter.cpp +++ b/compiler/nnc/passes/interpreter/Interpreter.cpp @@ -76,18 +76,15 @@ TensorVariant NNInterpreter::getResult(const Operation::Output *tensor) void NNInterpreter::visit(ops::InputOp &op) { - auto it = _inputTensors.find(op.getName()); + const auto &input_name = op.getOutput(0)->getName(); + const auto it = _inputTensors.find(input_name); if (it == _inputTensors.end()) - throw std::runtime_error("Can't find data for node \"" + op.getName() + + throw std::runtime_error("Can't find data for input tensor \"" + input_name + ". Input data was not set correctly?"); setOutputTensors(op, {it->second}); } -void NNInterpreter::visit(ops::ConstantOp &op) -{ - assert(_inputTensors.find(op.getName()) == _inputTensors.end()); - setOutputTensors(op, {op.getValue()}); -} +void NNInterpreter::visit(ops::ConstantOp &op) { setOutputTensors(op, {op.getValue()}); } void NNInterpreter::visit(ops::ConcatOp &op) { diff --git a/compiler/nnc/passes/interpreter/interpreter_pass.cpp b/compiler/nnc/passes/interpreter/interpreter_pass.cpp index fe18399..455c954 100644 --- a/compiler/nnc/passes/interpreter/interpreter_pass.cpp +++ b/compiler/nnc/passes/interpreter/interpreter_pass.cpp @@ -127,11 +127,12 @@ PassData InterpreterPass::run(PassData data) for (const auto *input_op : g->getInputs()) { - std::string tensor_name = input_op->getName(); + std::string tensor_name = input_op->getOutput(0)->getName(); + assert(!tensor_name.empty()); std::replace(tensor_name.begin(), tensor_name.end(), '/', '_'); std::string filename = cli::interInputDataDir + "/" + tensor_name + ".dat"; auto tensor = readTensorFromFile(filename, DataType::FLOAT32, input_op->getOutputShape(0)); - interpreter.setInput(input_op->getName(), tensor); + interpreter.setInput(input_op->getOutput(0)->getName(), tensor); } g->accept(&interpreter); @@ -139,12 +140,12 @@ PassData InterpreterPass::run(PassData data) for (const auto *output_op : g->getOutputs()) { const auto &tensor = interpreter.getResult(output_op->getInput(0)->getProducer()); + const auto &output_name = output_op->getInput(0)->getProducer()->getName(); #ifdef NNC_HDF5_SUPPORTED - writeTensorToHDF5File(tensor, output_op->getName(), cli::artifactDir); + writeTensorToHDF5File(tensor, output_name, cli::artifactDir); #else - std::cout << "Result <" << output_op->getName() << "> wasn't saved, due to lack of HDF5" - << std::endl; + std::cout << "Result <" << output_name << "> wasn't saved, due to lack of HDF5" << std::endl; #endif // NNC_HDF5_SUPPORTED } diff --git a/compiler/nnc/passes/soft_backend/ModelAnalyzer.cpp b/compiler/nnc/passes/soft_backend/ModelAnalyzer.cpp index ce843ee..2518d41 100644 --- a/compiler/nnc/passes/soft_backend/ModelAnalyzer.cpp +++ b/compiler/nnc/passes/soft_backend/ModelAnalyzer.cpp @@ -36,38 +36,36 @@ void ModelAnalyzer::appendOperationToInference(Operation *op, const string &func { vector node_output_tensors; - const string &op_name = op->getName(); // process operation outputs - size_t node_output_tensor_id = INVALID_TENSOR_ID; if (op->getType() == Operation::Type::input) { // register input tensor - node_output_tensor_id = declareInputTensor(op_name, op->getOutputShape(0)); + const string &tensor_name = op->getOutput(0)->getName(); + const auto tensor_id = declareInputTensor(tensor_name, op->getOutputShape(0)); + node_output_tensors.push_back(tensor_id); } else if (op->getType() == Operation::Type::constant) { // register constant tensor // it's data is deserialized to described tensor by O(1) at runtime - node_output_tensor_id = declareTemporaryTensor(); + const auto tensor_id = declareTemporaryTensor(); + node_output_tensors.push_back(tensor_id); } else if (op->getType() == Operation::Type::output) { - // process output op - node_output_tensor_id = declarePersistentTensor(op_name); - } - else if (!op_name.empty()) - { - // process a named operation - node_output_tensor_id = declarePersistentTensor(op_name); + assert(!op->getInput(0)->getProducer()->getName().empty()); } else { - // process ordinary unnamed operation - node_output_tensor_id = declareTemporaryTensor(); + for (const auto &output : op->getOutputs()) + { + const auto &tensor_name = output.getName(); + const auto tensor_id = + tensor_name.empty() ? declareTemporaryTensor() : declarePersistentTensor(tensor_name); + node_output_tensors.push_back(tensor_id); + } } - assert(node_output_tensor_id != INVALID_TENSOR_ID); - node_output_tensors.push_back(node_output_tensor_id); // process operation inputs vector node_input_tensors; @@ -105,17 +103,9 @@ size_t ModelAnalyzer::declareInputTensor(const std::string &name, const mir::Sha size_t ModelAnalyzer::declarePersistentTensor(const std::string &name) { + assert(!name.empty()); size_t id = _allocatedTensors++; - auto type = TensorDescriptor::Type::persistent; - if (name.empty()) - { - // special case for unnamed output tensors - _tensors.push_back({id, type, "unnamed_output" + to_string(id), {}}); - } - else - { - _tensors.push_back({id, type, name, {}}); - } + _tensors.push_back({id, TensorDescriptor::Type::persistent, name, {}}); _persistent_tensors.push_back(id); return id; } @@ -233,8 +223,8 @@ void ModelAnalyzer::collectOutputs(const mir::Graph *g) for (ops::OutputOp *out_op : g->getOutputs()) { auto op_call = dynamic_cast(_opToDescr[out_op]); - assert(op_call); - _outputs.insert(_outputs.end(), op_call->outputs.begin(), op_call->outputs.end()); + assert(op_call->inputs.size() == 1); + _outputs.push_back(op_call->inputs[0]); } } diff --git a/compiler/nnc/passes/soft_backend/code_snippets/cpp_operations.def b/compiler/nnc/passes/soft_backend/code_snippets/cpp_operations.def index 2694607..5e81eb9 100644 --- a/compiler/nnc/passes/soft_backend/code_snippets/cpp_operations.def +++ b/compiler/nnc/passes/soft_backend/code_snippets/cpp_operations.def @@ -633,6 +633,5 @@ void constant(Tensor& out, const char* params) { out = deserializeTensor(params); } -void out(Tensor& out, const char* params, const Tensor& in) { - out = in; +void out(const char* params, const Tensor& in) { } diff --git a/compiler/nnc/tests/soft_backend/CompileCPP.cpp b/compiler/nnc/tests/soft_backend/CompileCPP.cpp index 7f64bc9..566d391 100644 --- a/compiler/nnc/tests/soft_backend/CompileCPP.cpp +++ b/compiler/nnc/tests/soft_backend/CompileCPP.cpp @@ -51,6 +51,8 @@ static void fillGraph(Graph &g) Operation *input_op = g.create("in", input_shape); Operation *relu_op = g.create("relu", input_op->getOutput(0)); Operation *output_op = g.create("out", relu_op->getOutput(0)); + input_op->getOutput(0)->setName("in"); + relu_op->getOutput(0)->setName("out"); } static void checkFileExists(const string &path) diff --git a/compiler/nnc/unittests/soft_backend/CPPOperations.cpp b/compiler/nnc/unittests/soft_backend/CPPOperations.cpp index 861f09b..7407731 100644 --- a/compiler/nnc/unittests/soft_backend/CPPOperations.cpp +++ b/compiler/nnc/unittests/soft_backend/CPPOperations.cpp @@ -129,6 +129,7 @@ fillGraph(mir::Graph &g, { auto input_op = g.create("x" + std::to_string(i), input_ntensors[i]->getShape()); + input_op->getOutput(0)->setName("x" + std::to_string(i)); inputs.push_back(input_op->getOutput(0)); } diff --git a/compiler/nnc/unittests/soft_backend/Generator.cpp b/compiler/nnc/unittests/soft_backend/Generator.cpp index 50f55c6..41ba502 100644 --- a/compiler/nnc/unittests/soft_backend/Generator.cpp +++ b/compiler/nnc/unittests/soft_backend/Generator.cpp @@ -83,6 +83,7 @@ TEST(Generator, check_generator_call) mir::Graph g; Operation *input = g.create("input", Shape{1, 2, 3, 4}); + input->getOutput(0)->setName("input"); Operation *output = g.create("output", input->getOutput(0)); // test that generator creates output dir and files diff --git a/compiler/nnc/unittests/soft_backend/ModelAnalyzer.cpp b/compiler/nnc/unittests/soft_backend/ModelAnalyzer.cpp index 1848dc1..f7d5ef6 100644 --- a/compiler/nnc/unittests/soft_backend/ModelAnalyzer.cpp +++ b/compiler/nnc/unittests/soft_backend/ModelAnalyzer.cpp @@ -59,6 +59,12 @@ TEST(ModelAnalyzer, linearization) Operation *tail2 = g.create("tail2", head2->getOutput(0)); std::vector concat_inputs{tail1->getOutput(0), tail2->getOutput(0)}; Operation *join = g.create("join", concat_inputs, 0); + input->getOutput(0)->setName("input"); + head1->getOutput(0)->setName("head1"); + head2->getOutput(0)->setName("head2"); + tail1->getOutput(0)->setName("tail2"); + tail2->getOutput(0)->setName("tail2"); + join->getOutput(0)->setName("join"); // Check that layout is desired ModelAnalyzer ma;