2 * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef __ONERT_BACKEND_ICONSTANT_INITIALIZER_H__
18 #define __ONERT_BACKEND_ICONSTANT_INITIALIZER_H__
20 #include <unordered_map>
23 #include "ITensorBuilder.h"
24 #include "ir/Coordinates.h"
25 #include "ir/Layout.h"
26 #include "ir/Operand.h"
27 #include "ir/Operands.h"
28 #include "ir/OperationVisitor.h"
29 #include "ir/OpSequence.h"
30 #include "util/logging.h"
35 static void Init(const onert::ir::Operand &model_obj, onert::backend::ITensor &obj, const bool copy,
36 const onert::ir::Layout frontend_layout = onert::ir::Layout::UNKNOWN)
38 const auto shape = model_obj.shape();
39 assert(model_obj.data());
40 auto base = reinterpret_cast<const T *>(model_obj.data()->base());
42 obj.access([&](::onert::backend::ITensor &tensor) {
47 assert(model_obj.data()->size() == sizeof(T));
48 const auto value = *reinterpret_cast<const T *>(base);
49 T *into = reinterpret_cast<T *>(tensor.buffer());
55 auto vec_size = shape.dim(0);
56 for (int32_t n = 0; n < vec_size; ++n)
58 const T *from = reinterpret_cast<const T *>(base) + n;
59 const auto value = *from;
61 T *into = reinterpret_cast<T *>(tensor.buffer()) + n;
69 const int32_t copy_len = shape.dim(1);
71 for (auto i = 0; i < shape.dim(0); ++i)
73 ::onert::ir::Coordinates coords{i, 0};
74 memcpy(tensor.buffer() + tensor.calcOffset(coords), base + i * copy_len,
75 copy_len * sizeof(T));
81 const int32_t width = shape.dim(1);
82 const int32_t copy_len = shape.dim(2);
84 for (auto i = 0; i < shape.dim(0); ++i)
86 for (auto j = 0; j < shape.dim(1); ++j)
88 ::onert::ir::Coordinates coords{i, j, 0};
89 memcpy(tensor.buffer() + tensor.calcOffset(coords),
90 base + i * width * copy_len + j * copy_len, copy_len * sizeof(T));
97 const int32_t height = shape.dim(1);
98 const int32_t width = shape.dim(2);
99 const int32_t copy_len = shape.dim(3);
100 for (auto i = 0; i < shape.dim(0); ++i)
102 for (auto j = 0; j < shape.dim(1); ++j)
104 for (auto k = 0; k < shape.dim(2); ++k)
108 ::onert::ir::Coordinates coords{i, j, k, 0};
109 memcpy(tensor.buffer() + tensor.calcOffset(coords),
110 base + i * height * width * copy_len + j * width * copy_len + k * copy_len,
111 copy_len * sizeof(T));
115 for (auto l = 0; l < shape.dim(3); ++l)
117 const auto coords = ::onert::ir::convertCoordinates({i, j, k, l}, frontend_layout,
119 T *into = reinterpret_cast<T *>(tensor.buffer() + tensor.calcOffset(coords));
120 T value = *(base + i * height * width * copy_len + j * width * copy_len +
131 throw std::runtime_error{"Not yet supported"};
136 template <typename T>
137 void copyInit(const onert::ir::Operand &model_obj, onert::backend::ITensor &obj)
139 Init<T>(model_obj, obj, true);
142 template <typename T>
143 void permuteInit(const onert::ir::Operand &model_obj, onert::backend::ITensor &obj,
144 const onert::ir::Layout frontend_layout)
146 const bool copy = frontend_layout == obj.layout();
147 Init<T>(model_obj, obj, copy, frontend_layout);
157 class IConstantInitializer : public ir::OperationVisitor
160 virtual ~IConstantInitializer() = default;
165 assert(tensor_builder().get());
166 for (const auto &it : _init_map)
168 const auto &ind = it.first;
169 const auto &fn = it.second;
171 const auto &model_obj = _operands.at(ind);
172 auto tensor_obj = tensor_builder()->tensorAt(ind);
173 assert(tensor_obj != nullptr);
174 fn(model_obj, *tensor_obj);
175 VERBOSE(FillOperandData) << "Fill data for operand " << ind.value() << std::endl;
181 IConstantInitializer(const ir::Operands &operands)
182 : _operands{operands}, _current_op_seq_layout{ir::Layout::UNKNOWN}
187 using Initializer = std::function<void(const ir::Operand &, backend::ITensor &)>;
189 void setLayout(ir::Layout layout) { _current_op_seq_layout = layout; }
192 using OperationVisitor::visit;
195 virtual std::shared_ptr<ITensorBuilder> tensor_builder() const = 0;
198 virtual void registerDefaultInitializer(const ir::OperandIndex &index, const ir::Operand &obj)
200 registerPermuteInitializer(index, obj); // as default
204 void registerCopyInitializer(const ir::OperandIndex &index, const ir::Operand &obj);
205 void registerPermuteInitializer(const ir::OperandIndex &index, const ir::Operand &obj);
208 void registerCustomInitializer(const ir::OperandIndex &index, const ir::Operand &obj,
209 void (*customInit)(const onert::ir::Operand &model_obj,
210 onert::backend::ITensor &obj))
212 // For only CONSTANTS
213 // TODO Add to check if tensor has been allocated
214 if (!obj.isConstant())
217 using namespace std::placeholders;
218 _init_map[index] = std::bind(customInit, _1, _2);
222 bool exist(const ir::OperandIndex &ind) { return _init_map.find(ind) != _init_map.end(); }
225 const ir::Operands &_operands;
226 std::unordered_map<ir::OperandIndex, Initializer> _init_map;
227 ir::Layout _current_op_seq_layout; // TODO Rename this to _current_layout
230 } // namespace backend
233 #endif // __ONERT_BACKEND_ICONSTANT_INITIALIZER_H__