2 * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 * @brief This file contains HEScheduler class to define and run task Heterogeneous Execution
23 #ifndef __ONERT_COMPILER_H_E_SCHEDULER_H_
24 #define __ONERT_COMPILER_H_E_SCHEDULER_H_
26 #include "compiler/IScheduler.h"
27 #include "compiler/BackendManager.h"
28 #include "compiler/Compiler.h"
30 #include "exec/ExecTime.h"
31 #include "backend/Backend.h"
33 #include "ir/OperationIndexMap.h"
43 * @brief Class to schedule tasks
45 class HEScheduler : IScheduler
49 * @brief Construct a new Heterogeneous Execution Scheduler object
50 * @param[in] model Graph model
51 * @param[in] backend_resolver backend resolver
53 HEScheduler(const backend::BackendContexts &backend_contexts, const CompilerOptions &options)
54 : _is_supported{}, _backends_avail_time{}, _ops_eft{},
55 _op_to_rank{std::make_shared<ir::OperationIndexMap<int64_t>>()},
56 _is_profiling_mode{options.he_profiling_mode},
57 _is_linear_exec{options.executor == "Linear"},
58 _is_parallel_exec{options.executor == "Parallel"}
60 for (auto &entry : backend_contexts)
62 if (entry.first->config()->id() == backend::controlflow::Config::ID)
64 _all_backends.push_back(entry.first);
66 _backend_resolver = std::make_unique<compiler::BackendResolver>();
67 _exec_time = std::make_unique<exec::ExecTime>(_all_backends);
70 auto cpu_backend_it = std::find_if(
71 _all_backends.begin(), _all_backends.end(),
72 [](const backend::Backend *backend) { return backend->config()->id() == "cpu"; });
73 if (cpu_backend_it == _all_backends.end())
74 throw std::runtime_error("HEScheduler could be used only if 'cpu' backend is available");
75 _cpu_backend = *cpu_backend_it;
80 * @brief Task scheduling
82 * @note The main idea is taken from HSIP algo:
83 * https://www.hindawi.com/journals/sp/2016/3676149/
85 std::unique_ptr<compiler::BackendResolver> schedule(const ir::Graph &graph) final;
86 std::shared_ptr<ir::OperationIndexMap<int64_t>> getIndexedRanks() { return _op_to_rank; }
89 bool isNodeProfiled(const ir::Operation &);
91 bool schedule(const ir::OperationIndex &, const backend::Backend *parent_backend);
93 * @brief Get earliest starting time and execution time of an operation on a backend.
95 * @note Returns a time when operation's inputs are ready and backend is available
96 * It also returns exec time. If this is "cpu" backend, then exec_time*CPU_DELAY
98 * @param[in] backend: backend, for which to return the time
99 * @param[in] index: index of an operation
100 * @param[out] transfer_st_exec_time: est and exec time of data transfer operation
102 * @return earliest starting time and execution time
104 std::pair<int64_t, int64_t>
105 ESTAndExecTime(const backend::Backend *backend, const ir::OperationIndex &index,
106 std::multimap<int64_t, int64_t> &transfer_st_exec_time);
108 * @brief Returns the latest finishing time of parents of a node.
110 * @param[in] backend: backend, for which to return the time
111 * @param[in] node: node to get eft of parents
112 * @param[out] transfer_st_exec_time: est and exec time of data transfer operation
114 * @return earliest finishing time of parent nodes
116 int64_t predMaxEFT(const backend::Backend *backend, const ir::Operation &node,
117 std::multimap<int64_t, int64_t> &transfer_st_exec_time);
121 int64_t DFSMaxRank(const ir::OperationIndex &index);
123 int64_t DFSChildrenMaxRank(const ir::OperationIndex &index);
125 * @brief Returns the time, when backend is available for at least given amount of time.
127 * @note Returns either hole/gap between two performing two already scheduled operations,
128 * or the finishing time of the last scheduled operation
130 * @param[in] backend backend, for which to return the time
131 * @param[in] starting_time time, starting which to look for gap
132 * @param[in] time_amount amount of the time, for which to look gap
134 * @return time, when backend has at least time_amount free time
136 int64_t backendAvailableTime(const backend::Backend *backend, const int64_t &starting_time,
137 const int64_t &time_amount);
139 int64_t getOpTime(const backend::Backend *backend, const std::string &operation, bool quant,
142 int64_t getPermuteTime(const backend::Backend *src_backend, const backend::Backend *dst_backend,
143 bool quant, uint32_t size);
145 void scheduleShufflingBackends();
147 int64_t tryBackend(const ir::Operation &node, const backend::Backend *backend);
150 * @brief Schedule a node and its successor until:
151 * 1. there is no branching or connection of multiple branches
152 * 2. for subsequent nodes: other than predecessor's backend is prefered
154 * @param[in] index: index of an operation
155 * @param[in] scheduled: a map to check if this node has already been scheduled
159 void scheduleBranch(const ir::OperationIndex &index, ir::OperationIndexMap<bool> &scheduled);
162 // This variable stores backend/node pairs with unknown execution time, and hints scheduler
163 // whether it should assign these backends to these nodes:
164 // * It stores false for unsupported nodes
165 // * During rank calculation with enabled profiling mode it stores true for supported nodes
166 std::unordered_map<const backend::Backend *, std::unordered_map<std::string, bool>> _is_supported;
167 // Finishing and starting time of each backend
168 std::unordered_map<const backend::Backend *, std::map<int64_t, int64_t>> _backends_avail_time;
169 ir::OperationIndexMap<int64_t> _ops_eft;
170 std::multimap<int64_t, ir::OperationIndex, std::greater<int64_t>> _rank_to_op;
171 std::shared_ptr<ir::OperationIndexMap<int64_t>> _op_to_rank;
172 std::unique_ptr<compiler::BackendResolver> _backend_resolver;
173 std::unique_ptr<exec::ExecTime> _exec_time;
174 const ir::Graph *_graph{nullptr};
175 std::vector<const backend::Backend *> _all_backends;
176 const backend::Backend *_cpu_backend{nullptr}; // TODO Change this to controlflow_backend
177 bool _is_profiling_mode;
178 bool _is_linear_exec;
179 bool _is_parallel_exec;
182 } // namespace compiler
186 #endif // __ONERT_COMPILER_H_E_SCHEDULER_H_