Switch from operation names to tensor names.
Signed-off-by: Sergei Barannikov <s.barannikov@samsung.com>
string AclCppOpGenerator::tensorName(const Operation::Output *ir_tensor) const
{
- string tensor_name;
+ string tensor_name = ir_tensor->getName();
- // TODO Use the tensor name instead of the operation name.
- const auto &op_name = ir_tensor->getNode()->getName();
-
- if (!op_name.empty())
+ if (!tensor_name.empty())
{
- tensor_name = "_" + op_name;
+ tensor_name = "_" + tensor_name;
replace_if(tensor_name.begin(), tensor_name.end(), [](char c) { return std::isalnum(c) == 0; },
'_');
}
else
{
+ assert(ir_tensor->getNode()->getNumOutputs() == 1);
tensor_name = "tensor_" + to_string(ir_tensor->getNode()->getId());
}
shared_ptr<ArtifactId> AclCppOpGenerator::genTensor(const Operation::Output *ir_tensor)
{
- return genTensor(tensorName(ir_tensor), ir_tensor->getShape(),
- !ir_tensor->getNode()->getName().empty());
+ return genTensor(tensorName(ir_tensor), ir_tensor->getShape(), !ir_tensor->getName().empty());
}
void AclCppOpGenerator::genNamed(Graph *graph)
void NNInterpreter::visit(ops::InputOp &op)
{
- auto it = _inputTensors.find(op.getName());
+ const auto &input_name = op.getOutput(0)->getName();
+ const auto it = _inputTensors.find(input_name);
if (it == _inputTensors.end())
- throw std::runtime_error("Can't find data for node \"" + op.getName() +
+ throw std::runtime_error("Can't find data for input tensor \"" + input_name +
". Input data was not set correctly?");
setOutputTensors(op, {it->second});
}
-void NNInterpreter::visit(ops::ConstantOp &op)
-{
- assert(_inputTensors.find(op.getName()) == _inputTensors.end());
- setOutputTensors(op, {op.getValue()});
-}
+void NNInterpreter::visit(ops::ConstantOp &op) { setOutputTensors(op, {op.getValue()}); }
void NNInterpreter::visit(ops::ConcatOp &op)
{
for (const auto *input_op : g->getInputs())
{
- std::string tensor_name = input_op->getName();
+ std::string tensor_name = input_op->getOutput(0)->getName();
+ assert(!tensor_name.empty());
std::replace(tensor_name.begin(), tensor_name.end(), '/', '_');
std::string filename = cli::interInputDataDir + "/" + tensor_name + ".dat";
auto tensor = readTensorFromFile(filename, DataType::FLOAT32, input_op->getOutputShape(0));
- interpreter.setInput(input_op->getName(), tensor);
+ interpreter.setInput(input_op->getOutput(0)->getName(), tensor);
}
g->accept(&interpreter);
for (const auto *output_op : g->getOutputs())
{
const auto &tensor = interpreter.getResult(output_op->getInput(0)->getProducer());
+ const auto &output_name = output_op->getInput(0)->getProducer()->getName();
#ifdef NNC_HDF5_SUPPORTED
- writeTensorToHDF5File(tensor, output_op->getName(), cli::artifactDir);
+ writeTensorToHDF5File(tensor, output_name, cli::artifactDir);
#else
- std::cout << "Result <" << output_op->getName() << "> wasn't saved, due to lack of HDF5"
- << std::endl;
+ std::cout << "Result <" << output_name << "> wasn't saved, due to lack of HDF5" << std::endl;
#endif // NNC_HDF5_SUPPORTED
}
{
vector<size_t> node_output_tensors;
- const string &op_name = op->getName();
// process operation outputs
- size_t node_output_tensor_id = INVALID_TENSOR_ID;
if (op->getType() == Operation::Type::input)
{
// register input tensor
- node_output_tensor_id = declareInputTensor(op_name, op->getOutputShape(0));
+ const string &tensor_name = op->getOutput(0)->getName();
+ const auto tensor_id = declareInputTensor(tensor_name, op->getOutputShape(0));
+ node_output_tensors.push_back(tensor_id);
}
else if (op->getType() == Operation::Type::constant)
{
// register constant tensor
// it's data is deserialized to described tensor by O(1) at runtime
- node_output_tensor_id = declareTemporaryTensor();
+ const auto tensor_id = declareTemporaryTensor();
+ node_output_tensors.push_back(tensor_id);
}
else if (op->getType() == Operation::Type::output)
{
- // process output op
- node_output_tensor_id = declarePersistentTensor(op_name);
- }
- else if (!op_name.empty())
- {
- // process a named operation
- node_output_tensor_id = declarePersistentTensor(op_name);
+ assert(!op->getInput(0)->getProducer()->getName().empty());
}
else
{
- // process ordinary unnamed operation
- node_output_tensor_id = declareTemporaryTensor();
+ for (const auto &output : op->getOutputs())
+ {
+ const auto &tensor_name = output.getName();
+ const auto tensor_id =
+ tensor_name.empty() ? declareTemporaryTensor() : declarePersistentTensor(tensor_name);
+ node_output_tensors.push_back(tensor_id);
+ }
}
- assert(node_output_tensor_id != INVALID_TENSOR_ID);
- node_output_tensors.push_back(node_output_tensor_id);
// process operation inputs
vector<size_t> node_input_tensors;
size_t ModelAnalyzer::declarePersistentTensor(const std::string &name)
{
+ assert(!name.empty());
size_t id = _allocatedTensors++;
- auto type = TensorDescriptor::Type::persistent;
- if (name.empty())
- {
- // special case for unnamed output tensors
- _tensors.push_back({id, type, "unnamed_output" + to_string(id), {}});
- }
- else
- {
- _tensors.push_back({id, type, name, {}});
- }
+ _tensors.push_back({id, TensorDescriptor::Type::persistent, name, {}});
_persistent_tensors.push_back(id);
return id;
}
for (ops::OutputOp *out_op : g->getOutputs())
{
auto op_call = dynamic_cast<const CallFunction *>(_opToDescr[out_op]);
- assert(op_call);
- _outputs.insert(_outputs.end(), op_call->outputs.begin(), op_call->outputs.end());
+ assert(op_call->inputs.size() == 1);
+ _outputs.push_back(op_call->inputs[0]);
}
}
out = deserializeTensor(params);
}
-void out(Tensor& out, const char* params, const Tensor& in) {
- out = in;
+void out(const char* params, const Tensor& in) {
}
Operation *input_op = g.create<ops::InputOp>("in", input_shape);
Operation *relu_op = g.create<ops::ReluOp>("relu", input_op->getOutput(0));
Operation *output_op = g.create<ops::OutputOp>("out", relu_op->getOutput(0));
+ input_op->getOutput(0)->setName("in");
+ relu_op->getOutput(0)->setName("out");
}
static void checkFileExists(const string &path)
{
auto input_op =
g.create<mir::ops::InputOp>("x" + std::to_string(i), input_ntensors[i]->getShape());
+ input_op->getOutput(0)->setName("x" + std::to_string(i));
inputs.push_back(input_op->getOutput(0));
}
mir::Graph g;
Operation *input = g.create<ops::InputOp>("input", Shape{1, 2, 3, 4});
+ input->getOutput(0)->setName("input");
Operation *output = g.create<ops::ReluOp>("output", input->getOutput(0));
// test that generator creates output dir and files
Operation *tail2 = g.create<ops::ReluOp>("tail2", head2->getOutput(0));
std::vector<mir::Operation::Output *> concat_inputs{tail1->getOutput(0), tail2->getOutput(0)};
Operation *join = g.create<ops::ConcatOp>("join", concat_inputs, 0);
+ input->getOutput(0)->setName("input");
+ head1->getOutput(0)->setName("head1");
+ head2->getOutput(0)->setName("head2");
+ tail1->getOutput(0)->setName("tail2");
+ tail2->getOutput(0)->setName("tail2");
+ join->getOutput(0)->setName("join");
// Check that layout is desired
ModelAnalyzer ma;