* Change `ExecEnv` to accept `Graph` instead of `Model`.
* Replace `ExecEnv::model` with `ExecEnv::graph`.
Signed-off-by: Sergei Barannikov <s.barannikov@samsung.com>
#include <unordered_set>
-#include "model/Model.h"
+#include "graph/Graph.h"
#include "Tensor.h"
namespace neurun
ExecEnv(void) = delete;
/**
* @brief Construct a new ExecEnv object
- * @param[in] model Model to execute by interpreter
+ * @param[in] graph Graph to execute by interpreter
*/
- ExecEnv(const std::shared_ptr<const model::Model> &model) : _model{model}
+ explicit ExecEnv(const graph::Graph &graph) : _graph(graph)
{
// DO NOTHING
}
public:
/**
- * @brief Return model to execute
- * @return Model
+ * @brief Return graph to execute
+ * @return Graph
*/
- const model::Model &model(void) const { return *_model; }
+ const graph::Graph &graph(void) const { return _graph; }
/**
* @brief Assign tensor to environment which have allocated or assigned buffer
* @param[in] index Tensor index
}
private:
- std::shared_ptr<const model::Model> _model;
+ const graph::Graph &_graph;
// Tensor map to use in interpreter
// It should map tensors that have allocated or assigned buffer pointer
std::unordered_map<model::OperandIndex, std::shared_ptr<ITensor>> _tensors;
Execution environment will be assigned to invoked interpreter instance
***********************************************************************/
- std::unique_ptr<ExecEnv> interp_env = nnfw::cpp14::make_unique<ExecEnv>(_graph.shareModel());
+ std::unique_ptr<ExecEnv> interp_env = nnfw::cpp14::make_unique<ExecEnv>(_graph);
// Assign input tensor into interpreter execution environment
for (auto index : _graph.getInputs())
void execute(const model::OperationIndex &idx)
{
- const auto nodeName = _env->model().operations.at(idx).name();
+ const auto nodeName = _env->graph().operations().at(idx).name();
VERBOSE(INTERPRETER) << "Prepare output operands and execute " << nodeName
<< " operation (id: " << idx.value() << ")" << std::endl;
- _env->model().operations.at(idx).accept(*this);
+ _env->graph().operations().at(idx).accept(*this);
}
private:
// but Use-Def cannot handle parameters (maybe constant, but not always)
// Note: If all model inputs are constant, it may not work (depend on tensors' order).
// But that scenario may not exist
- for (auto ind : _env->model().inputs)
+ for (auto ind : _env->graph().getInputs())
{
VERBOSE(INTERPRETER) << "Input: Push to operand stack " << ind.value() << std::endl;
operand_stack.push(ind);
}
- _env->model().operands.iterate([&](const model::OperandIndex &ind, const model::Operand &obj) {
+ _env->graph().operands().iterate([&](const model::OperandIndex &ind, const model::Operand &obj) {
if (obj.isConstant())
{
VERBOSE(INTERPRETER) << "Constant: Push to operand stack " << ind.value() << std::endl;
// Find prepared operations by scan use of current operand
std::stack<model::OperationIndex> operation_stack;
- const auto use_operators = _env->model().operands.at(current_operand_index).getUses();
+ const auto use_operators = _env->graph().operands().at(current_operand_index).getUses();
for (auto use_operator : use_operators.list())
{
// Assumption: all parameters are ready to use
bool operator_ready = true;
- for (auto input_index : _env->model().operations.at(use_operator).getInputs())
+ for (auto input_index : _env->graph().operations().at(use_operator).getInputs())
{
if (ready_check.find(input_index) == ready_check.end())
{
const auto current_operation_index = operation_stack.top();
operation_stack.pop();
VERBOSE(INTERPRETER) << "Poped operation: " << current_operation_index.value() << "("
- << _env->model().operations.at(current_operation_index).name() << ")"
+ << _env->graph().operations().at(current_operation_index).name() << ")"
<< std::endl;
// execution
executed.insert(current_operation_index);
// 3. Push each output into operand stack
- const auto def_operands = _env->model().operations.at(current_operation_index).getOutputs();
+ const auto def_operands = _env->graph().operations().at(current_operation_index).getOutputs();
for (auto def_operand : def_operands)
{
VERBOSE(INTERPRETER) << "Buffer: Push to operand stack " << def_operand.value()
}
// 4. Free if lifetime of buffer operands used by input is finished
- for (auto input_index : _env->model().operations.at(current_operation_index).getInputs())
+ for (auto input_index : _env->graph().operations().at(current_operation_index).getInputs())
{
- const auto use_operators = _env->model().operands.at(input_index).getUses();
+ const auto use_operators = _env->graph().operands().at(input_index).getUses();
bool dead_buffer = true;
for (auto use_operator : use_operators.list())
{
assert(in_tensor->num_dimensions() == 4);
- const auto output_info = env->model().operands.at(out_index).info();
+ const auto output_info = env->graph().operands().at(out_index).info();
if (output_info.total_size() == 0)
{
// Handle unspecified output shape
UNUSED_RELEASE(kernel_tensor);
UNUSED_RELEASE(bias_tensor);
- const auto output_info = env->model().operands.at(out_index).info();
+ const auto output_info = env->graph().operands().at(out_index).info();
if (output_info.total_size() == 0)
{
// Handle unspecified output shape
// TODO handle unspecified output shape:
// calculate output shape using ifm shape, kernel shape, padding, stride
- const auto output_info = env->model().operands.at(out_index).info();
+ const auto output_info = env->graph().operands().at(out_index).info();
if (output_info.total_size() == 0)
{
// Handle unspecified output shape
assert(in_tensor->num_dimensions() == 4);
UNUSED_RELEASE(in_tensor);
- const auto output_info = env->model().operands.at(out_index).info();
+ const auto output_info = env->graph().operands().at(out_index).info();
if (output_info.total_size() == 0)
{
// Handle unspecified output shape
const auto out_index = node.getOutputs().at(0);
// Unspecified shape is not supported in operation node spec now
- const auto output_info = env->model().operands.at(out_index).info();
+ const auto output_info = env->graph().operands().at(out_index).info();
env->allocateAndShareIfNeeded(out_index, output_info, in_index);
- assert(output_info.total_size() == env->model().operands.at(in_index).info().total_size());
+ assert(output_info.total_size() == env->graph().operands().at(in_index).info().total_size());
}
void invoke(const ExecEnv *env, const model::Operation &node)
return;
}
- const auto output_info = env->model().operands.at(out_index).info();
+ const auto output_info = env->graph().operands().at(out_index).info();
memcpy(env->tensorAt(out_index)->buffer(), env->tensorAt(in_index)->bufferRO(),
output_info.total_size());
}
// Output shape should be same with input
// Output type is pre-defined in model
- const auto output_shape = env->model().operands.at(in_index).info().shape();
- const auto output_type = env->model().operands.at(out_index).info().typeInfo();
+ const auto output_shape = env->graph().operands().at(in_index).info().shape();
+ const auto output_type = env->graph().operands().at(out_index).info().typeInfo();
const model::OperandInfo output_info{output_shape, output_type};
env->allocateIfNeeded(out_index, output_info);