* Add OutputOp class to represent graph outputs.
* Add generation of instances of the class in importers.
* Modify backends to account for the new graph structure.
Signed-off-by: Sergei Barannikov <s.barannikov@samsung.com>
with->getMutablePrevNodes() = op->getPrevNodes();
}
-Operation* Graph::getInput(const std::string& name) {
- auto it = _inputs.find(name);
- if (it == _inputs.end())
- return nullptr;
- else
- return it->second;
-}
-
-Operation* Graph::getOutput(const std::string& name) {
- auto it = _outputs.find(name);
- if (it == _outputs.end())
- return nullptr;
- else
- return it->second;
-}
-
void Graph::accept(IVisitor* visitor) {
std::deque<Operation*> q;
std::set<Operation*> known_ops;
- for (const auto& e : _inputs) {
- q.push_back(e.second);
- known_ops.insert(e.second); //Consider all input _ops resolved by default
- }
-
- for (const auto& e : _constants) {
- q.push_back(e);
- known_ops.insert(e); //Consider all input _ops resolved by default
+ for (auto* op : _ops) {
+ if (op->getNumInputs() == 0) {
+ q.emplace_back(op);
+ known_ops.insert(op);
+ }
}
//BFS
}
}
-void Graph::markOutput(Operation* op) {
- auto it = _outputs.find(op->getName());
- if (it != _outputs.end()) {
- throw std::runtime_error("Output node with same name already exists");
- }
+void Graph::registerOp(Operation* op) {
+ _ops.push_back(op);
- _outputs[op->getName()] = op;
-}
+ if (auto* input_op = dynamic_cast<ops::InputOp*>(op))
+ _inputs.emplace_back(input_op);
-std::vector<Operation*> Graph::collectInputs() const {
- std::vector<Operation*> res;
- for (auto& e : _inputs) {
- res.emplace_back(e.second);
- }
- return res;
-}
-
-std::vector<Operation*> Graph::collectConstants() const {
- std::vector<Operation*> res;
- for (auto& e : _constants) {
- res.emplace_back(e);
- }
- return res;
-}
-
-std::vector<Operation*> Graph::collectOutputs() const {
- std::vector<Operation*> res;
- for (auto& e : _outputs) {
- res.emplace_back(e.second);
- }
- return res;
+ if (auto* output_op = dynamic_cast<ops::OutputOp*>(op))
+ _outputs.emplace_back(output_op);
}
void Graph::replaceNode(const Operation* op, Operation* with) {
- auto in = _inputs.find(op->getName());
- if (in != _inputs.end()) {
- (*in).second = with;
- }
+ replaceUsages(op, with);
- auto out_it = _outputs.find(op->getName());
- if (out_it != _outputs.end()) {
- (*out_it).second = with;
- }
+ _inputs.erase(std::remove_if(_inputs.begin(), _inputs.end(), [op](ops::InputOp* n) {
+ return n == op;
+ }), _inputs.end());
- replaceUsages(op, with);
+ _outputs.erase(std::remove_if(_outputs.begin(), _outputs.end(), [op](ops::OutputOp* n) {
+ return n == op;
+ }), _outputs.end());
- _ops.erase(std::remove_if(_ops.begin(), _ops.end(), [op] (Operation* n) {
+ _ops.erase(std::remove_if(_ops.begin(), _ops.end(), [op](Operation* n) {
return n == op;
}), _ops.end());
}
}
}
-void Graph::replaceOutputNodes(const std::vector<std::string>& new_outputs) {
- _outputs.clear();
-
- std::set<std::string> new_outputs_set(new_outputs.begin(), new_outputs.end());
-
- for (auto& op : _ops) {
- if (new_outputs_set.count(op->getName()) != 0) {
- markOutput(op);
- }
- }
-}
-
} // namespace mir
} // namespace nnc
* limitations under the License.
*/
-#include <iostream>
-
#include "core/modelIR/IrDotDumper.h"
+
#include "core/modelIR/operations/BatchNormOp.h"
#include "core/modelIR/operations/BiasAddOp.h"
#include "core/modelIR/operations/CappedReluOp.h"
#include "core/modelIR/operations/GemmOp.h"
#include "core/modelIR/operations/InputOp.h"
#include "core/modelIR/operations/LeakyReluOp.h"
+#include "core/modelIR/operations/OutputOp.h"
#include "core/modelIR/operations/PadOp.h"
#include "core/modelIR/operations/PoolOp.h"
#include "core/modelIR/operations/ReduceFOp.h"
#include "core/modelIR/operations/TanhOp.h"
#include "core/modelIR/operations/TransposeOp.h"
+#include <iostream>
+
namespace nnc {
namespace mir {
dotBuilder.updateWithOp(&op, node_info);
}
+void IrDotDumper::visit(ops::OutputOp& op) {
+ auto node_info = DotIrNodeInfo().withType("OutputOp", op.getName())
+ .withInShapes(getInputShapes(op));
+
+ dotBuilder.updateWithOp(&op, node_info);
+}
+
} // namespace mir
} // namespace nnc
#include "core/modelIR/operations/GemmOp.h"
#include "core/modelIR/operations/InputOp.h"
#include "core/modelIR/operations/LeakyReluOp.h"
+#include "core/modelIR/operations/OutputOp.h"
#include "core/modelIR/operations/PadOp.h"
#include "core/modelIR/operations/PoolOp.h"
#include "core/modelIR/operations/ReduceFOp.h"
#include <set>
#include "core/modelIR/Operation.h"
-#include "core/modelIR/operations/ConstantOp.h"
#include "core/modelIR/operations/InputOp.h"
+#include "core/modelIR/operations/OutputOp.h"
namespace nnc {
namespace mir {
void accept(IVisitor* visitor);
- void markOutput(Operation* op);
- Operation* getInput(const std::string& name);
- Operation* getOutput(const std::string& name);
-
/**
- * @brief Returns all inputs from graph
- * @returns vector containing all graph input nodes
+ * @brief Returns all graph nodes
+ * @return vector containing all graph nodes
*/
- std::vector<Operation*> collectInputs() const;
+ const std::vector<Operation*>& getNodes() const { return _ops; }
/**
- * @brief Returns all constants from graph
- * @returns vector containing all graph constant nodes
+ * @brief Returns all graph input nodes
+ * @returns vector containing all graph input nodes
*/
- std::vector<Operation*> collectConstants() const;
+ const std::vector<ops::InputOp*>& getInputs() const { return _inputs; }
/**
- * @brief Returns all outputs from graph
- * @returns vector containing all graph outputs nodes
+ * @brief Returns all graph output nodes
+ * @returns vector containing all graph output nodes
*/
- std::vector<Operation*> collectOutputs() const;
-
+ const std::vector<ops::OutputOp*>& getOutputs() const { return _outputs; }
/**
* @brief Subsitude node in graph with another keeping all edges
*/
void replaceInputNodes(const std::vector<std::string>& new_inputs);
- /**
- * @brief Change graph outputs to nodes with names in newOutputs
- * @param new_outputs names of nodes to be marked as output nodes
- * @warning Output node order is not preserved and may differ from newOutputs vector
- * @note Does essentially the same as markOutput() does, but takes node names
- */
- void replaceOutputNodes(const std::vector<std::string>& new_outputs);
-
- private:
- void registerOp(Operation* op) {
- _ops.push_back(op);
- }
-
- //TODO: maybe make user to mark input _ops in a more obvious way
- void registerOp(ops::InputOp* op) {
- auto it = _inputs.find(op->getName());
- if( it != _inputs.end()) {
- throw std::runtime_error("Input name collision");
- }
- _inputs.insert(it, {op->getName(), op});
- _ops.push_back(op);
- }
-
- void registerOp(ops::ConstantOp* op) {
- _constants.insert(op);
- _ops.push_back(op);
- }
+private:
+ void registerOp(Operation* op);
std::vector<Operation*> _ops;
size_t _lastNodeId = 0;
- std::unordered_map<std::string, Operation*> _inputs;
- std::unordered_map<std::string, Operation*> _outputs;
- std::set<Operation*> _constants;
+ std::vector<ops::InputOp*> _inputs;
+ std::vector<ops::OutputOp*> _outputs;
};
} // namespace mir
void visit(ops::GemmOp& op) override;
void visit(ops::InputOp& op) override;
void visit(ops::LeakyReluOp& op) override;
+ void visit(ops::OutputOp& op) override;
void visit(ops::PadOp& op) override;
void visit(ops::PoolOp& op) override;
void visit(ops::ReduceFOp& op) override;
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _NNC_CORE_IR_MODEL_OUTPUT_H_
+#define _NNC_CORE_IR_MODEL_OUTPUT_H_
+
+#include "core/modelIR/Operation.h"
+
+namespace nnc {
+namespace mir {
+namespace ops {
+
+class OutputOp : public Operation {
+public:
+ explicit OutputOp(IODescriptor input) : Operation(Type::output, {input}) {}
+};
+
+} // namespace ops
+} // namespace mir
+} // namespace nnc
+
+#endif //_NNC_CORE_IR_MODEL_OUTPUT_H_
HANDLE_OP(gemmOp, GemmOp)
HANDLE_OP(input, InputOp)
HANDLE_OP(leakyReLU, LeakyReluOp)
+HANDLE_OP(output, OutputOp)
HANDLE_OP(pad, PadOp)
HANDLE_OP(pool, PoolOp)
HANDLE_OP(reduceF, ReduceFOp)
void visit(ops::GemmOp& op) override;
void visit(ops::InputOp& op) override;
void visit(ops::LeakyReluOp& op) override;
+ void visit(ops::OutputOp& op) override;
void visit(ops::PadOp& op) override;
void visit(ops::PoolOp& op) override;
void visit(ops::ReduceFOp& op) override;
void visit(ops::TransposeOp& op) override;
void setInput(const std::string &name, const TensorVariant& data);
- std::vector<TensorVariant> &getResult(Operation* op);
+ TensorVariant getResult(IODescriptor tensor);
void dump(Operation& op, bool all = false);
~NNInterpreter() override = default;
#include "core/modelIR/operations/GemmOp.h"
#include "core/modelIR/operations/InputOp.h"
#include "core/modelIR/operations/LeakyReluOp.h"
+#include "core/modelIR/operations/OutputOp.h"
#include "core/modelIR/operations/PadOp.h"
#include "core/modelIR/operations/PoolOp.h"
#include "core/modelIR/operations/ReduceFOp.h"
}
void AclCppOpGenerator::genNamed(Graph* graph) {
- const auto& inputs = graph->collectInputs();
+ const auto& inputs = graph->getInputs();
if (inputs.size() == 1) {
auto f = _artifactClass->func(true, "arm_compute::CLTensor&", "getInput");
auto b = f->getBlock();
b->ret(id);
}
- const auto& outputs = graph->collectOutputs();
+ const auto& outputs = graph->getOutputs();
if (outputs.size() == 1) {
auto f = _artifactClass->func(true, "arm_compute::CLTensor&", "getOutput");
auto b = f->getBlock();
- auto id = AF::id(tensorName(outputs[0]->getOutput(0)));
+ auto id = AF::id(tensorName(outputs[0]->getInput(0)));
b->ret(id);
}
}
genActivation(op, "LEAKY_RELU", op.getAlpha());
}
+void AclCppOpGenerator::visit(mir::ops::OutputOp&) {
+ // No-op.
+}
+
}
// namespace nnc
void visit(mir::ops::GemmOp& op) override;
void visit(mir::ops::InputOp& op) override;
void visit(mir::ops::LeakyReluOp& op) override;
+ void visit(mir::ops::OutputOp& op) override;
void visit(mir::ops::PadOp& op) override;
void visit(mir::ops::PoolOp& op) override;
void visit(mir::ops::ReduceFOp& op) override;
#include "caffe2_op_types.h"
#include "caffe2_op_creator.h"
+#include "core/modelIR/operations/OutputOp.h"
#include "core/modelIR/Shape.h"
#include "pass/PassException.h"
// For now, we assume that:
// - there is exactly one output;
// - the output is from the last layer.
- _lastMIROp->setName("out");
- _graph->markOutput(_lastMIROp);
+ _graph->create<mir::ops::OutputOp>("out", _lastMIROp->getOutput(0));
}
const std::map<std::string, SupportedCaffe2OpType> Caffe2Importer::_operatorTypes = {
#include "core/modelIR/operations/BiasAddOp.h"
#include "core/modelIR/operations/CappedReluOp.h"
#include "core/modelIR/operations/ConcatOp.h"
+#include "core/modelIR/operations/ConstantOp.h"
#include "core/modelIR/operations/Conv2DOp.h"
#include "core/modelIR/operations/DepthwiseConv2DOp.h"
#include "core/modelIR/operations/DropoutOp.h"
#include "caffe_op_creator.h"
#include "caffe_op_types.h"
+#include "core/modelIR/operations/OutputOp.h"
#include "core/modelIR/Shape.h"
#include "core/modelIR/TensorUtil.h"
#include "pass/PassException.h"
// For now, we assume that:
// - there is exactly one output;
// - the output is from the last layer.
- _graph->markOutput(_blobNameToIODescriptor[last_layer.top(0)].op);
+ auto output = _blobNameToIODescriptor[last_layer.top(0)];
+ _graph->create<mir::ops::OutputOp>(output.op->getName(), output);
+ output.op->setName("");
}
void CaffeImporter::cleanup() {
#include "core/modelIR/operations/BiasAddOp.h"
#include "core/modelIR/operations/CappedReluOp.h"
#include "core/modelIR/operations/ConcatOp.h"
+#include "core/modelIR/operations/ConstantOp.h"
#include "core/modelIR/operations/Conv2DOp.h"
#include "core/modelIR/operations/Deconv2DOp.h"
#include "core/modelIR/operations/DepthwiseConv2DOp.h"
#include "core/modelIR/operations/GatherOp.h"
#include "core/modelIR/operations/GemmOp.h"
#include "core/modelIR/operations/InputOp.h"
+#include "core/modelIR/operations/OutputOp.h"
#include "core/modelIR/operations/LeakyReluOp.h"
#include "core/modelIR/operations/PadOp.h"
#include "core/modelIR/operations/PoolOp.h"
var(op.getId()) = {op.getValue()};
}
-std::vector<TensorVariant> &NNInterpreter::getResult(Operation* op) {
- auto res = vars.find(op->getId());
- if (res != vars.end())
- return res->second;
- else
- throw std::runtime_error("No such value: " + std::to_string(op->getId()));
+TensorVariant NNInterpreter::getResult(IODescriptor tensor) {
+ return vars.at(tensor.op->getId()).at(tensor.index);
}
void NNInterpreter::visit(ops::ConcatOp& op) {
DUMP(op, false);
}
+void NNInterpreter::visit(ops::OutputOp&) {
+ // No-op.
+}
+
} // namespace nnc
* @param tensor_name - name, by wich tensor will be saved
* @param destination - path to file, in which tensor will be saved
*/
-static void writeTensorToHDF5File(TensorVariant* tensor,
+static void writeTensorToHDF5File(const TensorVariant& tensor,
std::string tensor_name,
const std::string& destination) {
// Prepare shape, rank, dims, numElems
- auto& shape = tensor->getShape();
+ auto& shape = tensor.getShape();
const int32_t rank = shape.rank();
hsize_t dims[rank];
for (int32_t axis = 0; axis < rank; ++axis) {
std::vector<float> values;
values.reserve(shape.numElements());
ShapeRange out_range(shape);
- Tensor<float> tensor_accessor(*tensor);
+ Tensor<float> tensor_accessor(tensor);
for (auto& out_idx : out_range)
values.push_back(tensor_accessor.at(out_idx));
NNInterpreter interpreter;
// Check ops
- const auto& inputs = g->collectInputs();
+ const auto& inputs = g->getInputs();
assert(inputs.size() == 1 && "Interpreter doesn't support networks with multiple input nodes");
auto input_node = inputs[0];
interpreter.setInput(input_node->getName(), input_data);
g->accept(&interpreter);
- // Check nodes
- const auto& outputs = g->collectOutputs();
-#if 0
- interpreter.dump(*outputs[0], true);
-#endif
-
- for (auto& out : outputs) {
- auto outputNode = interpreter.getResult(out);
- if (outputNode.empty())
- throw PassException("No value for output node <" + out->getName() + ">");
- }
-
- bool is_several_outs = (outputs.size() > 1);
-
- nnc::mir::TensorVariant* out_data = nullptr;
- for (auto& out_node : outputs) {
- out_data = new TensorVariant(interpreter.getResult(out_node)[0]);
+ for (auto out_node : g->getOutputs()) {
+ const auto& tensor = interpreter.getResult(out_node->getInput(0));
#ifdef NNC_HDF5_SUPPORTED
- writeTensorToHDF5File(out_data, out_node->getName(), cli::artifactDir);
+ writeTensorToHDF5File(tensor, out_node->getName(), cli::artifactDir);
#else
std::cout << "Result <" << out_node->getName()
<< "> wasn't saved, due to lack of HDF5" << std::endl;
#endif // NNC_HDF5_SUPPORTED
- if (is_several_outs)
- delete out_data;
}
- _out = is_several_outs ? nullptr : out_data;
-
- return _out;
+ return nullptr;
}
TensorVariant InterpreterPass::loadInput(const Shape& shape) {
#include "core/modelIR/operations/DepthwiseConv2DOp.h"
#include "core/modelIR/operations/ElementwiseOp.h"
#include "core/modelIR/operations/InputOp.h"
+#include "core/modelIR/operations/OutputOp.h"
#include "core/modelIR/operations/TransposeOp.h"
#include "core/modelIR/Operation.h"
#include "core/modelIR/Shape.h"
}
// set graph outputs
// TODO: it should be done with onnx graph outputs
- for (auto& output_idx : _graphOutputs)
- _graph->markOutput(output_idx.op);
+ for (auto output : _graphOutputs) {
+ _graph->create<mir::ops::OutputOp>(output.op->getName(), output);
+ output.op->setName("");
+ }
return _graph;
}
#include "core/modelIR/operations/GemmOp.h"
#include "core/modelIR/operations/InputOp.h"
#include "core/modelIR/operations/LeakyReluOp.h"
+#include "core/modelIR/operations/OutputOp.h"
#include "core/modelIR/operations/PadOp.h"
#include "core/modelIR/operations/PoolOp.h"
#include "core/modelIR/operations/ReduceFOp.h"
// register constant tensor
// it's data is deserialized to described tensor by O(1) at runtime
node_output_tensor_id = declareTemporaryTensor();
- } else if (!op_name.empty() || op->getNextNodes().empty()) {
+ } else if (op->getType() == Operation::Type::output) {
// process output op
node_output_tensor_id = declarePersistentTensor(op_name);
+ } else if (!op_name.empty()) {
+ // process a named operation
+ node_output_tensor_id = declarePersistentTensor(op_name);
} else {
// process ordinary unnamed operation
node_output_tensor_id = declareTemporaryTensor();
}
void ModelAnalyzer::collectOutputs(const mir::Graph* g) {
- for (Operation* out_op: g->collectOutputs()) {
+ for (ops::OutputOp* out_op: g->getOutputs()) {
assert(dynamic_cast<const CallFunction*>(_opToDescr[out_op]));
auto op_call = static_cast<const CallFunction*>(_opToDescr[out_op]);
_outputs.insert(_outputs.end(), op_call->outputs.begin(), op_call->outputs.end());
// Set contains pointer to node if it is visited by DFS
set<Operation*> visited;
- // Collect all inputs and constants
- vector<Operation*> init_ops(g->collectInputs());
- auto constants = g->collectConstants();
- init_ops.insert(init_ops.end(), constants.begin(), constants.end());
+ vector<Operation*> init_ops;
+ for (Operation* op : g->getNodes()) {
+ if (op->getNumInputs() == 0) {
+ init_ops.emplace_back(op);
+ }
+ }
// Register temporary tensor for im2col buffer
_temp_tensor_id = declareTemporaryTensor();
// Walk all network inputs
for (Operation* in : init_ops) {
- assert(dynamic_cast<ops::InputOp*>(in) || dynamic_cast<ops::ConstantOp*>(in));
if (!visited.count(in)) {
visited.insert(in);
s.push({in, 0});
appendOperationToInference(&op, "leakyRelu");
}
+void ModelAnalyzer::visit(mir::ops::OutputOp& op) {
+ appendOperationToInference(&op, "out");
+}
+
} // namespace nnc
void visit(mir::ops::EluOp& op) override;
void visit(mir::ops::FullyConnectedOp& op) override;
void visit(mir::ops::GatherOp& op) override;
+ void visit(mir::ops::GemmOp& op) override;
void visit(mir::ops::InputOp& op) override;
void visit(mir::ops::LeakyReluOp& op) override;
- void visit(mir::ops::GemmOp& op) override;
+ void visit(mir::ops::OutputOp& op) override;
void visit(mir::ops::PadOp& op) override;
void visit(mir::ops::PoolOp& op) override;
void visit(mir::ops::ReduceFOp& op) override;
}
}
+void Serializer::visit(mir::ops::OutputOp& op) {
+ // no parameters to dump
+}
+
} // namespace nnc
void visit(mir::ops::ElementwiseOp& op) override;
void visit(mir::ops::EluOp& op) override;
void visit(mir::ops::FullyConnectedOp& op) override;
- void visit(mir::ops::InputOp& op) override;
- void visit(mir::ops::LeakyReluOp& op) override;
void visit(mir::ops::GatherOp& op) override;
void visit(mir::ops::GemmOp& op) override;
+ void visit(mir::ops::InputOp& op) override;
+ void visit(mir::ops::LeakyReluOp& op) override;
+ void visit(mir::ops::OutputOp& op) override;
void visit(mir::ops::PadOp& op) override;
void visit(mir::ops::PoolOp& op) override;
void visit(mir::ops::ReduceFOp& op) override;
void constant(Tensor& out, const char* params) {
out = deserializeTensor(params);
}
+
+void out(Tensor& out, const char* params, const Tensor& in) {
+ out = in;
+}
#include "schema_generated.h"
#include "tflite_importer.h"
+#include "core/modelIR/operations/ConstantOp.h"
#include "core/modelIR/operations/ElementwiseOp.h"
+#include "core/modelIR/operations/OutputOp.h"
#include "tflite_op_creator.h"
#include "passes/common_frontend/op_creator_helper.h"
}
void TfliteImporter::setGraphOutputs() {
- // Marking nodes as output nodes.
- for (auto output_idx : _graphOutputs)
- _graph->markOutput(_tensorMap[output_idx].op);
+ for (auto output_idx : _graphOutputs) {
+ auto output = _tensorMap[output_idx];
+ _graph->create<mir::ops::OutputOp>(output.op->getName(), output);
+ output.op->setName("");
+ }
}
void TfliteImporter::setIrNodeNames() {
// Note: we change the computation graph, (for example, TFLite Conv2D
// turns into IR Conv2D->BiasAdd->ReLU), so not all of the nodes will have names.
for (auto iter : _tensorMap) {
- iter.second.op->setName((*_tensors)[iter.first]->name()->c_str());
+ const Tensor* tensor = (*_tensors)[iter.first];
+ iter.second.op->setName(tensor->name()->c_str());
}
}
#include "core/modelIR/operations/BiasAddOp.h"
#include "core/modelIR/operations/CappedReluOp.h"
#include "core/modelIR/operations/ConcatOp.h"
+#include "core/modelIR/operations/ConstantOp.h"
#include "core/modelIR/operations/Conv2DOp.h"
#include "core/modelIR/operations/Deconv2DOp.h"
#include "core/modelIR/operations/DepthwiseConv2DOp.h"
#include "core/modelIR/Graph.h"
#include "core/modelIR/Shape.h"
#include "core/modelIR/operations/InputOp.h"
+#include "core/modelIR/operations/OutputOp.h"
#include "core/modelIR/operations/ReluOp.h"
#include "passes/soft_backend/CPPGenerator.h"
// Creates simple graph with input and output
-void fillGraph(Graph &g)
-{
- Shape inputShape{1, 2, 3};
- Operation* inputOp = g.create<ops::InputOp>("in", inputShape);
- Operation* outputOp = g.create<ops::ReluOp>("out", inputOp->getOutput(0));
- g.markOutput(outputOp);
+static void fillGraph(Graph& g) {
+ Shape input_shape{1, 2, 3};
+ Operation* input_op = g.create<ops::InputOp>("in", input_shape);
+ Operation* relu_op = g.create<ops::ReluOp>("relu", input_op->getOutput(0));
+ Operation* output_op = g.create<ops::OutputOp>("out", relu_op->getOutput(0));
}
static void checkFileExists(const string &path)
#include "core/modelIR/operations/EluOp.h"
#include "core/modelIR/operations/FullyConnectedOp.h"
#include "core/modelIR/operations/InputOp.h"
+#include "core/modelIR/operations/OutputOp.h"
#include "core/modelIR/operations/PadOp.h"
#include "core/modelIR/operations/PoolOp.h"
#include "core/modelIR/operations/ReduceFOp.h"
* @param input_shapes vector of network input shapes
* */
void fillGraph(Graph& g, const OpConstructor& op_constr, const vector<Shape>& input_shapes) {
- // Create inputs
+ // Create graph inputs.
vector<mir::IODescriptor> inputs;
- int num_inputs = input_shapes.size();
- for (int i = 0; i < num_inputs; ++i) {
- auto inputOp = g.create<ops::InputOp>("x" + to_string(i), input_shapes[i]);
- inputs.push_back(inputOp->getOutput(0));
+ for (std::size_t i = 0; i < input_shapes.size(); ++i) {
+ auto input_op = g.create<ops::InputOp>("x" + to_string(i), input_shapes[i]);
+ inputs.push_back(input_op->getOutput(0));
}
- // Create operation
+ // Create the operation.
Operation* op = op_constr(g, inputs);
- // Mark outputs
- g.markOutput(op);
+ // Create graph outputs.
+ for (std::size_t i = 0; i < op->getNumOutputs(); ++i)
+ g.create<ops::OutputOp>("y" + to_string(i), op->getOutput(i));
}
/**
#include "core/modelIR/Graph.h"
#include "core/modelIR/operations/ConcatOp.h"
#include "core/modelIR/operations/InputOp.h"
+#include "core/modelIR/operations/OutputOp.h"
#include "core/modelIR/operations/ReluOp.h"
namespace {
g->accept(&d);
auto str = ss.str();
- ASSERT_EQ(str, "iop4iop1rop2rop3cop5");
- delete g;
-};
-
-TEST(Graph, ReplaceOutputs) {
- //There is not much to test here as Graph::replaceOutputNodes simply calls Graph::markOutput
- // multiple times ( Graph::markOutput just places passed node into Graph::_outputs map )
-
- auto g = new Graph;
-
- auto n1 = g->create<ops::InputOp>("op1", Shape{1});
- auto n2 = g->create<ops::ReluOp>("op2", n1->getOutput(0));
- auto n3 = g->create<ops::ReluOp>("op3", n2->getOutput(0));
- auto n4 = g->create<ops::ReluOp>("op4", n2->getOutput(0));
- auto n5 = g->create<ops::ConcatOp>("op5",
- std::vector<IODescriptor>{n3->getOutput(0), n4->getOutput(0)},
- 0);
-
- g->replaceOutputNodes({"op3"});
-
- std::vector<Operation*> expectedOutputs{n3};
- ASSERT_EQ(g->collectOutputs(), expectedOutputs);
+ ASSERT_EQ(str, "iop1iop4rop2rop3cop5");
delete g;
};
auto n1 = g->create<ops::InputOp>("op1", Shape{});
auto n2 = g->create<ops::ReluOp>("op2", n1->getOutput(0));
-
- g->markOutput(n2);
+ auto n3 = g->create<ops::OutputOp>("op3", n2->getOutput(0));
auto in2 = g->replaceWithInputNode(n2);
- std::vector<Operation*> expectedInputs{in2, n1};
- ASSERT_EQ(g->collectInputs(), expectedInputs);
+ std::vector<ops::InputOp*> expectedInputs{dynamic_cast<ops::InputOp*>(n1), in2};
+ ASSERT_EQ(g->getInputs(), expectedInputs);
delete g;
}
#include "core/modelIR/operations/FullyConnectedOp.h"
#include "core/modelIR/operations/InputOp.h"
#include "core/modelIR/operations/LeakyReluOp.h"
+#include "core/modelIR/operations/OutputOp.h"
#include "core/modelIR/operations/PadOp.h"
#include "core/modelIR/operations/PoolOp.h"
#include "core/modelIR/operations/ReduceFOp.h"
fillGraph(mir::Graph& g,
const function<mir::Operation*(mir::Graph& g, vector<mir::IODescriptor>& inputs)>& op_gen,
const vector<unique_ptr<mir::TensorVariant>>& input_ntensors) {
- // Create inputs
+ // Create graph inputs.
std::vector<mir::IODescriptor> inputs;
- int num_inputs = input_ntensors.size();
- for (int i = 0; i < num_inputs; ++i) {
+ for (std::size_t i = 0; i < input_ntensors.size(); ++i) {
auto input_op =
g.create<mir::ops::InputOp>("x" + std::to_string(i), input_ntensors[i]->getShape());
inputs.push_back(input_op->getOutput(0));
}
- // Create operation
+ // Create the operation.
mir::Operation* op = op_gen(g, inputs);
- // Mark outputs
- g.markOutput(op);
+ // Create graph outputs.
+ assert(op->getNumOutputs() == 1);
+ g.create<mir::ops::OutputOp>(op->getName(), op->getOutput(0));
+ op->setName("");
return op;
}
/**
* @brief Run interpreter to get reference output data
*/
-mir::TensorVariant getReferenceTensor(mir::Graph &g,
- const vector<unique_ptr<mir::TensorVariant>> &input_ntensors,
- const string& output_name) {
+mir::TensorVariant
+getReferenceTensor(mir::Graph& g,
+ const vector<unique_ptr<mir::TensorVariant>>& input_ntensors) {
mir::NNInterpreter interpreter;
for (int i = 0; i < static_cast<int>(input_ntensors.size()); ++i)
interpreter.setInput("x" + to_string(i), *input_ntensors[i]);
g.accept(&interpreter);
- return interpreter.getResult(g.getOutput(output_name))[0];
+ return interpreter.getResult(g.getOutputs()[0]->getInput(0));
};
/**
serializer.serialize(inference_sequence);
assert(static_cast<sir::CallFunction*>(inference_sequence.front().get())->paramStartOffset == 0);
- const string& output_name = actual_operation->getName();
- mir::TensorVariant reference_output = getReferenceTensor(g, input_ntensors, output_name);
+ mir::TensorVariant reference_output = getReferenceTensor(g, input_ntensors);
Tensor test_output;
artifactOperation(test_output, serializer.getBuffer().data(), input_atensors...);