2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef __ONERT_BACKEND_CONTROLFLOW_TENSOR_BUILDER_H__
18 #define __ONERT_BACKEND_CONTROLFLOW_TENSOR_BUILDER_H__
20 #include <backend/cpu_common/StaticTensorManager.h>
21 #include <backend/cpu_common/TensorRegistry.h>
22 #include <backend/cpu_common/Tensor.h>
24 #include <backend/ITensorBuilder.h>
25 #include <ir/OperandIndexMap.h>
27 #include <unordered_map>
29 #include "DynamicTensorManager.h"
30 #include "UserTensorRegistry.h"
39 class TensorBuilder : public ITensorBuilder
42 TensorBuilder(const std::shared_ptr<TensorRegistry> &tensor_reg);
45 * @brief Register tensor information to allocate on CPU backend
46 * @param[in] ind Operand index
47 * @param[in] info Operand information
48 * @param[in] layout Operand data layout
50 void registerTensorInfo(const ir::OperandIndex &ind, const ir::OperandInfo &info,
51 ir::Layout backend_layout) override;
53 void notifyFirstUse(const ir::OperandIndex &) override;
54 void notifyLastUse(const ir::OperandIndex &) override;
56 bool isRegistered(const ir::OperandIndex &) const override;
58 void prepare(void) override;
59 void allocate() override;
60 void postFunctionPrepare() override { /* DO NOTHING */}
62 std::unique_ptr<ITensorManager> releaseStaticTensorManager(void) override;
64 IDynamicTensorManager *dynamicTensorManager(void) override { return _dynamic_tensor_mgr.get(); }
66 std::unique_ptr<ITensorManager> releaseDynamicTensorManager(void) override;
69 * @brief Get tensor with a specific OperandIndex.
70 * @param ind OperandIndex for the tensor. There must exist a tensor with this ind.
71 * If not, program will crash with assert or exception.
72 * @return shared_ptr<operand::Tensor>
74 std::shared_ptr<cpu_common::Tensor> nativeOwnTensorAt(const ir::OperandIndex &ind);
75 void setNativeUserTensor(const ir::OperandIndex &ind, const std::shared_ptr<UserTensor> &tensor);
78 const std::shared_ptr<TensorRegistry> _tensor_reg;
79 std::unique_ptr<DynamicTensorManager> _dynamic_tensor_mgr;
80 std::unique_ptr<cpu_common::StaticTensorManager> _static_tensor_mgr;
81 ir::OperandIndexMap<ir::OperandInfo> _tensor_info_map;
82 ir::OperandIndexMap<ir::Layout> _tensor_layout_map;
85 } // namespace controlflow
86 } // namespace backend
89 #endif // __ONERT_BACKEND_CONTROLFLOW_TENSOR_BUILDER_H__