2 * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "exec/Execution.h"
19 #include "util/logging.h"
26 Execution::Execution(const std::shared_ptr<IExecutors> &executors) : _executors{executors}
28 assert(executors != nullptr);
29 assert(executors->entryExecutor() != nullptr);
30 _io_desc.inputs.resize(_executors->inputSize());
31 _io_desc.outputs.resize(_executors->outputSize());
34 void Execution::changeInputShape(const ir::IOIndex &index, const ir::Shape &new_shape)
36 // This will be used later to set input tensor dynamic
37 // Note that 'compiled' model will not be updated with new_shape
38 // but new_shape will change model input shape while 'running' the model
39 _io_desc.dynamic_input_shapes[index] = new_shape;
41 VERBOSE(Execution) << "Model input shape will be changed at the start of execute()"
42 << "(index: " << index << ")" << std::endl;
45 // TODO Remove default parameter
46 void Execution::setInput(const ir::IOIndex &index, const void *buffer, size_t length,
49 const auto info = _executors->inputInfo(index);
51 // TODO handle when (!buffer && length != 0) : setting the input as an optional tensor
53 // check if size enough for input is passed
54 // if input_shape_sig is set, input_shape_sig overrides shape in info
55 // note: input_shape_sig contains shape passed by nnfw_set_input_tensorinfo()
57 auto input_shape_sig = _io_desc.dynamic_input_shapes.find(index);
59 (input_shape_sig != _io_desc.dynamic_input_shapes.end())
60 ? input_shape_sig->second.num_elements() * onert::ir::sizeOfDataType(info.typeInfo().type())
63 if (length < size_required)
65 throw std::runtime_error{"Too small length"};
69 _io_desc.inputs.at(index.value()) = std::make_unique<InputDesc>(info, buffer, length, layout);
72 // TODO Remove default parameter
73 void Execution::setInput(const ir::IOIndex &index, const ir::TypeInfo &type, const ir::Shape &shape,
74 const void *buffer, size_t length, ir::Layout layout)
76 auto info = ir::OperandInfo::createStaticInfo(shape, type);
78 if (length < info.total_size())
80 throw std::runtime_error{"Too small length"};
83 _io_desc.inputs.at(index.value()) = std::make_unique<InputDesc>(info, buffer, length, layout);
86 // TODO Remove default parameter
87 void Execution::setOutput(const ir::IOIndex &index, void *buffer, size_t length, ir::Layout layout)
89 const auto info = _executors->outputInfo(index);
91 if (length < info.total_size())
93 throw std::runtime_error{"Too small length"};
96 _io_desc.outputs.at(index.value()) = std::make_unique<OutputDesc>(info, buffer, length, layout);
99 // TODO Remove default parameter
100 void Execution::setOutput(const ir::IOIndex &index, const ir::TypeInfo &type,
101 const ir::Shape &shape, void *buffer, size_t length, ir::Layout layout)
103 auto info = ir::OperandInfo::createStaticInfo(shape, type);
105 if (length < info.total_size())
107 throw std::runtime_error{"Too small length"};
110 _io_desc.outputs.at(index.value()) = std::make_unique<OutputDesc>(info, buffer, length, layout);
113 void Execution::setInputLayout(const ir::IOIndex &index, ir::Layout layout)
115 const auto &input_desc = _io_desc.inputs.at(index.value());
116 _io_desc.inputs.at(index.value()) =
117 std::make_unique<InputDesc>(input_desc->info, input_desc->buffer, input_desc->size, layout);
120 void Execution::setOutputLayout(const ir::IOIndex &index, ir::Layout layout)
122 const auto &output_desc = _io_desc.outputs.at(index.value());
123 _io_desc.outputs.at(index.value()) =
124 std::make_unique<OutputDesc>(output_desc->info, output_desc->buffer, output_desc->size, layout);
127 void Execution::execute()
129 VERBOSE(Execution) << "Start execution" << std::endl;
131 _executors->execute(_io_desc);
134 VERBOSE(Execution) << "Execution finished" << std::endl;
137 void Execution::startExecute()
139 VERBOSE(Execution) << "Create asynchronous execution thread" << std::endl;
141 _exec_thread = std::make_unique<std::thread>(&Execution::execute, this);
144 void Execution::waitFinish()
146 VERBOSE(Execution) << "Wait to finish execution" << std::endl;
148 _exec_thread->join();
152 bool Execution::isFinished(void) const { return finished; }
154 ir::Shape Execution::getInputShape(ir::IOIndex ind) const
156 auto itr = _io_desc.dynamic_input_shapes.find(ind);
157 if (itr == _io_desc.dynamic_input_shapes.end())
159 return _executors->inputInfo(ind).shape();
167 // NNAPI return fail if ANeuralNetworksExecution_getOutputOperandRank or
168 // ANeuralNetworksExecution_getOutputOperandDimensions is called before execution.
169 // On the other hand, NNFW API return static shape inference result if nnfw_output_tensorinfo is
170 // called before execution.
171 // To handle both case, this method retun static shape inference result and fail will be handled on
173 ir::Shape Execution::getOutputShape(ir::IOIndex ind) const
176 return _executors->outputInfo(ind).shape();
178 const auto &output_desc = _io_desc.outputs.at(ind.value());
180 return output_desc->info.shape();