2 * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "InterpExecutor.h"
20 #include "Interpreter.h"
22 #include "util/logging.h"
31 void InterpExecutor::execute(const exec::IODescription &desc)
33 /************************************************************************
34 * Prepare execution model (submodel)
35 It may execute divided model
36 but now consider model inference is done at interpreter
37 ***********************************************************************/
38 ir::OperandIndexMap<std::shared_ptr<ITensor>> tensor_map;
40 for (uint32_t n = 0; n < _graph.getInputs().size(); n++)
43 const auto input_index = _graph.getInputs().at(index);
45 const auto input = desc.inputs.at(n).get();
52 auto input_tensor = std::make_shared<ROTensor>(input->info);
53 input_tensor->setData(std::make_shared<const ir::ExternalData>(
54 reinterpret_cast<const uint8_t *>(input->buffer), input->size));
55 tensor_map[input_index] = input_tensor;
58 /************************************************************************
59 * Prepare execution environment
60 Execution environment will be assigned to invoked interpreter instance
61 ***********************************************************************/
63 std::unique_ptr<ExecEnv> interp_env = std::make_unique<ExecEnv>(_graph);
65 // Assign input/output tensor into interpreter execution environment
66 for (auto index : _graph.getInputs())
68 if (tensor_map.find(index) != tensor_map.end())
70 VERBOSE(INTERPRETER) << "Assign input tensor. operand index:" << index << std::endl;
71 interp_env->assignTensor(index, tensor_map.at(index));
75 for (uint32_t n = 0; n < _graph.getOutputs().size(); n++)
78 const auto output_index = _graph.getOutputs().at(index);
79 const auto output = desc.outputs.at(n).get();
80 if (output == nullptr)
86 VERBOSE(INTERPRETER) << "Set out buffer to ExecEnv. operand index:" << output_index.value()
89 interp_env->assignExternalBuffer(
91 std::make_shared<ExternalBuffer>(reinterpret_cast<uint8_t *>(output->buffer), output->size));
94 // Allocate constant tensor
95 _graph.operands().iterate([&](const ir::OperandIndex &ind, const ir::Operand &obj) {
98 VERBOSE(INTERPRETER) << "Allocate and assign constant tensor. operand index:" << ind
102 auto const_tensor = std::make_shared<ROTensor>(obj.info());
103 // Assume that interpreter's tensor layout is same with model (NHWC)
104 const_tensor->setData(
105 std::make_shared<ir::ExternalData>(obj.data()->base(), obj.info().total_size()));
106 interp_env->assignTensor(ind, const_tensor);
110 /*****************************************************************************
112 ****************************************************************************/
114 interp::Interpreter interp(std::move(interp_env));
117 /*****************************************************************************
118 * Invoked interpreter run is finished
119 ****************************************************************************/
121 // If interpreter execute submodel
122 // 1. Get tensor output of submodel into tensor_map to save result
123 // 2. Generate new ExecEnv for next interpretation
126 } // namespace interp