2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "WhileLayer.h"
19 #include "PermuteLayer.h"
20 #include "../../../exec/ExecutorBase.h"
22 #include <misc/polymorphic_downcast.h>
35 WhileLayer::WhileLayer(const std::vector<backend::IPortableTensor *> input_tensors,
36 const std::vector<backend::IPortableTensor *> output_tensors,
37 const ir::SubgraphIndex &cond_subg_index,
38 const ir::SubgraphIndex &body_subg_index, exec::IExecutors *executors,
39 const ir::ModelIndex &model_index,
40 basic::DynamicMemoryManager *dyn_memory_manager,
41 const std::shared_ptr<ExternalContext> &external_context)
42 : _cond_subg_index{cond_subg_index}, _body_subg_index{body_subg_index},
43 _input_tensors{input_tensors}, _output_tensors{output_tensors}, _executors{executors},
44 _model_index{model_index}, _dyn_memory_manager{dyn_memory_manager}, _external_context{
47 // At this point, executors may not have executors of cond subg and body subg
50 void WhileLayer::run()
52 // Copy "_input_tensors" -> "cond subg inputs"
54 // Start loop while output of cond subg is ture
55 // // Copy "_input_tensors" -> "body subg inputs" in the first iteration, then copy "body subg
56 // outputs" -> "body subg inputs" in the second or more iterations
58 // // Copy "body subg outputs" -> "cond subg inputs"
60 // If there is no loop copy "_input_tensors" -> "_dst_tensors", else copy "cond subg inputs" ->
62 auto cond_exec = _executors->at(_model_index, _cond_subg_index);
63 auto body_exec = _executors->at(_model_index, _body_subg_index);
65 // Need a temp tensor to hold the cond subgraph output
66 assert(cond_exec->getOutputTensors().size() == 1);
67 auto cond_output_tensor = [&]() {
68 auto cond_output = cond_exec->getOutputTensors().at(0);
69 auto tensor = std::make_unique<Tensor>(cond_output->orig_info(), cond_output->orig_layout(),
71 tensor->set_dynamic();
72 tensor->setBuffer(_dyn_memory_manager->allocate(tensor.get(), tensor->total_size()));
76 VERBOSE(While) << "Call to $" << _cond_subg_index << " (cond)" << std::endl;
77 cond_exec->execute(_input_tensors, {cond_output_tensor.get()});
78 VERBOSE(While) << "Return from $" << _cond_subg_index << std::endl;
80 auto getResultCond = [](backend::ITensor *tensor) -> bool {
82 tensor->access([&](ITensor &tensor) { ret = *reinterpret_cast<bool *>(tensor.buffer()); });
86 std::vector<ITensor *> op_inputs(_input_tensors.begin(), _input_tensors.end());
87 std::vector<ITensor *> op_outputs(_output_tensors.begin(), _output_tensors.end());
88 // Copying body inputs to outputs when the loop body is never executed
89 if (!getResultCond(cond_output_tensor.get()))
91 PermuteLayer copy_body_inputs_to_op_outputs{op_inputs, op_outputs, _external_context};
92 copy_body_inputs_to_op_outputs.run();
96 // Need some temp tensors to hold the body subgraph output
97 std::vector<std::unique_ptr<Tensor>> temp_outputs_o;
98 std::vector<IPortableTensor *> temp_outputs;
99 for (auto io_tensor : body_exec->getOutputTensors())
101 auto tensor = std::make_unique<Tensor>(io_tensor->orig_info(), io_tensor->orig_layout(),
102 _dyn_memory_manager);
103 tensor->set_dynamic();
104 tensor->setBuffer(_dyn_memory_manager->allocate(tensor.get(), tensor->total_size()));
105 temp_outputs.push_back(tensor.get());
106 temp_outputs_o.push_back(std::move(tensor));
109 std::vector<ITensor *> body_outputs(temp_outputs.begin(), temp_outputs.end());
110 PermuteLayer copy_body_outputs_to_op_outputs{body_outputs, op_outputs, _external_context};
112 const auto body_execute_with_op_inputs = [&]() {
113 VERBOSE(While) << "Call to $" << _body_subg_index << " (body)" << std::endl;
114 body_exec->execute(_input_tensors, temp_outputs);
115 VERBOSE(While) << "Return from $" << _body_subg_index << std::endl;
118 const auto body_execute_with_body_outputs = [&]() {
119 VERBOSE(While) << "Call to $" << _body_subg_index << " (body)" << std::endl;
120 body_exec->execute(_output_tensors, temp_outputs);
121 VERBOSE(While) << "Return from $" << _body_subg_index << std::endl;
124 std::function<void()> body_execute = body_execute_with_op_inputs;
125 const auto cond_execute = [&]() {
126 VERBOSE(While) << "Call to $" << _cond_subg_index << " (cond)" << std::endl;
127 cond_exec->execute(_output_tensors, {cond_output_tensor.get()});
128 VERBOSE(While) << "Return from $" << _cond_subg_index << std::endl;
131 // Loop while Cond subgraph's output is true
132 while (getResultCond(cond_output_tensor.get()))
135 copy_body_outputs_to_op_outputs.run();
137 body_execute = body_execute_with_body_outputs;
140 // Clean-up the temp tensors
141 _dyn_memory_manager->deallocate(cond_output_tensor.get());
142 for (auto tensor : temp_outputs)
144 _dyn_memory_manager->deallocate(tensor);
148 } // namespace kernel
149 } // namespace builtin
150 } // namespace backend