2 * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef __ONERT_EXEC_FUNCTION_SEQUENCE_H__
18 #define __ONERT_EXEC_FUNCTION_SEQUENCE_H__
25 #include "exec/IFunction.h"
26 #include "exec/DynamicShapeInferer.h"
27 #include "ir/Operations.h"
28 #include "backend/ITensorRegistry.h"
35 class FunctionSequence : public IFunction
38 template <typename... Args> FunctionSequence(Args &&... args) { initialize(std::move(args)...); }
43 // Template base case : do nothing
46 template <typename T, typename... Args> void initialize(std::unique_ptr<T> &&fn, Args &&... args)
48 _functions.emplace_back(std::move(fn));
49 initialize(std::move(args)...);
53 virtual ~FunctionSequence() = default;
56 void prepare() override;
59 * @brief Appends an IFunction object to the function sequence
61 * @param function IFunction object to be appended
63 void append(std::unique_ptr<IFunction> &&function);
65 void iterate(const std::function<void(IFunction &)> &fn);
67 template <typename T, typename... Args> void wrap(Args &&... args)
69 for (auto &&function : _functions)
71 function = std::make_unique<T>(std::move(function), args...);
75 public: // methods related to dynamic tensor
76 struct DynamicTensorCtx
78 const ir::IOperation *op = nullptr;
79 std::shared_ptr<exec::DynamicShapeInferer> dynamic_shape_inferer = nullptr;
83 * @brief Prepare to run FunctionSequence which "might" handle dynamic tensor
84 * @note Calling this does not mean that run() will handle dynamic tensor.
85 * enableDynamicShapeInferer(true) will make run() will handle dynamic tensor.
87 void dynamic_tensor_ctx(std::shared_ptr<DynamicTensorCtx> &dynamic_tensor_ctx)
89 _dynamic_tensor_ctx = dynamic_tensor_ctx;
92 std::shared_ptr<DynamicTensorCtx> &dynamic_tensor_ctx() { return _dynamic_tensor_ctx; }
95 * @brief Call this function by passing @c true if this FunctionSequence handles dynamic tensors
96 * and should run DynamicShapeInferer. This function can be called multiple times and
97 * if @c false is passed during multiple calls, DynamicShapeInfere will not be run.
98 * @note This must be called before run(). If not called, run() assumes that all tensors are
99 * dynamic and DynamicShapeInferer will be run.
101 void enableDynamicShapeInferer(bool enable)
103 _enable_dynamic_shape_inferer = _enable_dynamic_shape_inferer || enable;
107 * @brief Call this function to initialize vars before running
108 * @note When we run a model with static tensor input and then run with dynamic tensor input,
109 * _enable_dynamic_shape_inferer is set to @c false at first run.
110 * Once _enable_dynamic_shape_inferer is set to @c true it cannot be changed to @c false
111 * only with calling enableDynamicShapeInferer(). So initializing it to @c false is
113 * @todo This is a quick fix. Adding this will increase time for run(). Find way to optimize.
115 void initRunning() { _enable_dynamic_shape_inferer = false; }
118 std::vector<std::unique_ptr<IFunction>> _functions;
121 bool _enable_dynamic_shape_inferer = false;
123 std::shared_ptr<DynamicTensorCtx> _dynamic_tensor_ctx = nullptr;
129 #endif // __ONERT_EXEC_FUNCTION_SEQUENCE_H__