Imported Upstream version 1.25.0
[platform/core/ml/nnfw.git] / runtime / onert / core / src / exec / Execution.cc
1 /*
2  * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *    http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include "exec/Execution.h"
18
19 #include "train/TrainableExecutors.h"
20
21 #include "util/logging.h"
22
23 namespace onert
24 {
25 namespace exec
26 {
27
28 Execution::Execution(const std::shared_ptr<IExecutors> &executors) : _executors{executors}
29 {
30   assert(executors != nullptr);
31   assert(executors->entryExecutor() != nullptr);
32   _io_desc.inputs.resize(_executors->inputSize());
33   _io_desc.outputs.resize(_executors->outputSize());
34 }
35
36 void Execution::changeInputShape(const ir::IOIndex &index, const ir::Shape &new_shape)
37 {
38   // This will be used later to set input tensor dynamic
39   // Note that 'compiled' model will not be updated with new_shape
40   // but new_shape will change model input shape while 'running' the model
41   _io_desc.dynamic_input_shapes[index] = new_shape;
42
43   VERBOSE(Execution) << "Model input shape will be changed at the start of execute()"
44                      << "(index: " << index << ")" << std::endl;
45 }
46
47 // TODO Remove default parameter
48 void Execution::setInput(const ir::IOIndex &index, const void *buffer, size_t length,
49                          ir::Layout layout)
50 {
51   const auto info = _executors->inputInfo(index);
52
53   // TODO handle when (!buffer && length != 0) : setting the input as an optional tensor
54
55   // check if size enough for input is passed
56   // if input_shape_sig is set, input_shape_sig overrides shape in info
57   // note: input_shape_sig contains shape passed by nnfw_set_input_tensorinfo()
58   {
59     auto input_shape_sig = _io_desc.dynamic_input_shapes.find(index);
60     auto size_required =
61       (input_shape_sig != _io_desc.dynamic_input_shapes.end())
62         ? input_shape_sig->second.num_elements() * onert::ir::sizeOfDataType(info.typeInfo().type())
63         : info.total_size();
64
65     if (length < size_required)
66     {
67       throw std::runtime_error{"Too small length"};
68     }
69   }
70
71   _io_desc.inputs.at(index.value()) = std::make_unique<InputDesc>(info, buffer, length, layout);
72 }
73
74 // TODO Remove default parameter
75 void Execution::setInput(const ir::IOIndex &index, const ir::TypeInfo &type, const ir::Shape &shape,
76                          const void *buffer, size_t length, ir::Layout layout)
77 {
78   auto info = ir::OperandInfo::createStaticInfo(shape, type);
79
80   if (length < info.total_size())
81   {
82     throw std::runtime_error{"Too small length"};
83   }
84
85   _io_desc.inputs.at(index.value()) = std::make_unique<InputDesc>(info, buffer, length, layout);
86 }
87
88 // TODO Remove default parameter
89 void Execution::setOutput(const ir::IOIndex &index, void *buffer, size_t length, ir::Layout layout)
90 {
91   const auto info = _executors->outputInfo(index);
92
93   if (length < info.total_size())
94   {
95     throw std::runtime_error{"Too small length"};
96   }
97
98   _io_desc.outputs.at(index.value()) = std::make_unique<OutputDesc>(info, buffer, length, layout);
99 }
100
101 // TODO Remove default parameter
102 void Execution::setOutput(const ir::IOIndex &index, const ir::TypeInfo &type,
103                           const ir::Shape &shape, void *buffer, size_t length, ir::Layout layout)
104 {
105   auto info = ir::OperandInfo::createStaticInfo(shape, type);
106
107   if (length < info.total_size())
108   {
109     throw std::runtime_error{"Too small length"};
110   }
111
112   _io_desc.outputs.at(index.value()) = std::make_unique<OutputDesc>(info, buffer, length, layout);
113 }
114
115 void Execution::setInputLayout(const ir::IOIndex &index, ir::Layout layout)
116 {
117   const auto &input_desc = _io_desc.inputs.at(index.value());
118   _io_desc.inputs.at(index.value()) =
119     std::make_unique<InputDesc>(input_desc->info, input_desc->buffer, input_desc->size, layout);
120 }
121
122 void Execution::setOutputLayout(const ir::IOIndex &index, ir::Layout layout)
123 {
124   const auto &output_desc = _io_desc.outputs.at(index.value());
125   _io_desc.outputs.at(index.value()) =
126     std::make_unique<OutputDesc>(output_desc->info, output_desc->buffer, output_desc->size, layout);
127 }
128
129 void Execution::execute()
130 {
131   VERBOSE(Execution) << "Start execution" << std::endl;
132
133   _executors->execute(_io_desc);
134   finished = true;
135
136   VERBOSE(Execution) << "Execution finished" << std::endl;
137 }
138
139 void Execution::startExecute()
140 {
141   VERBOSE(Execution) << "Create asynchronous execution thread" << std::endl;
142
143   _exec_thread = std::make_unique<std::thread>(&Execution::execute, this);
144 }
145
146 void Execution::waitFinish()
147 {
148   VERBOSE(Execution) << "Wait to finish execution" << std::endl;
149
150   _exec_thread->join();
151   finished = true;
152 }
153
154 bool Execution::isFinished(void) const { return finished; }
155
156 #ifdef ONERT_TRAIN
157 void Execution::train(uint32_t training_step)
158 {
159   auto execs = dynamic_cast<exec::train::TrainableExecutors *>(_executors.get());
160   if (!execs)
161   {
162     throw std::runtime_error{"Supported only TrainableExecutors"};
163   }
164
165   VERBOSE(Execution) << "Start training" << std::endl;
166
167   execs->train(_io_desc, training_step);
168   finished = true;
169
170   VERBOSE(Execution) << "training finished" << std::endl;
171 }
172
173 float Execution::getLoss(const ir::IOIndex &ind)
174 {
175   auto execs = dynamic_cast<exec::train::TrainableExecutors *>(_executors.get());
176   if (!execs)
177   {
178     throw std::runtime_error{"Supported only TrainableExecutors"};
179   }
180
181   return execs->getLoss(ind);
182 }
183 #endif // ONERT_TRAIN
184
185 ir::Shape Execution::getInputShape(ir::IOIndex ind) const
186 {
187   auto itr = _io_desc.dynamic_input_shapes.find(ind);
188   if (itr == _io_desc.dynamic_input_shapes.end())
189   {
190     return _executors->inputInfo(ind).shape();
191   }
192   else
193   {
194     return itr->second;
195   }
196 }
197
198 // NNAPI return fail if ANeuralNetworksExecution_getOutputOperandRank or
199 // ANeuralNetworksExecution_getOutputOperandDimensions is called before execution.
200 // On the other hand, NNFW API return static shape inference result if nnfw_output_tensorinfo is
201 // called before execution.
202 // To handle both case, this method retun static shape inference result and fail will be handled on
203 // NNAPI frontend.
204 ir::Shape Execution::getOutputShape(ir::IOIndex ind) const
205 {
206   if (!isFinished())
207     return _executors->outputInfo(ind).shape();
208
209   const auto &output_desc = _io_desc.outputs.at(ind.value());
210
211   return output_desc->info.shape();
212 }
213
214 size_t Execution::getInputTotalSize(ir::IOIndex ind) const
215 {
216   // TODO Support dynamic shape
217   return _executors->inputInfo(ind).total_size();
218 }
219
220 size_t Execution::getOutputTotalSize(ir::IOIndex ind) const
221 {
222   return _executors->outputInfo(ind).total_size();
223 }
224
225 } // namespace exec
226 } // namespace onert