2 * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef __NEURUN_BACKEND_ICONSTANT_INITIALIZER_H__
18 #define __NEURUN_BACKEND_ICONSTANT_INITIALIZER_H__
20 #include <unordered_map>
23 #include "ITensorBuilder.h"
24 #include "ir/Layout.h"
25 #include "model/Operand.h"
26 #include "model/Operands.h"
27 #include "model/OperationVisitor.h"
28 #include "model/Subgraph.h"
29 #include "util/logging.h"
30 #include "util/Utils.h"
35 static void Init(const neurun::model::Operand &model_obj, neurun::backend::operand::ITensor &obj,
37 const neurun::ir::Layout frontend_layout = neurun::ir::Layout::UNKNOWN)
39 const auto shape = model_obj.shape();
40 auto base = reinterpret_cast<const T *>(model_obj.data().base());
42 obj.access([&](::neurun::backend::operand::ITensor &tensor) {
47 assert(model_obj.data().size() == sizeof(T));
48 const auto value = *reinterpret_cast<const T *>(base);
49 T *into = reinterpret_cast<T *>(tensor.buffer());
55 auto vec_size = shape.dim(0);
56 for (int32_t n = 0; n < vec_size; ++n)
58 const T *from = reinterpret_cast<const T *>(base) + n;
59 const auto value = *from;
61 T *into = reinterpret_cast<T *>(tensor.buffer()) + n;
69 const int32_t copy_len = shape.dim(1);
71 for (auto i = 0; i < shape.dim(0); ++i)
73 neurun::util::Coordinates coords{i, 0};
74 memcpy(tensor.buffer() + tensor.calcOffset(coords), base + i * copy_len,
75 copy_len * sizeof(T));
81 const int32_t width = shape.dim(1);
82 const int32_t copy_len = shape.dim(2);
84 for (auto i = 0; i < shape.dim(0); ++i)
86 for (auto j = 0; j < shape.dim(1); ++j)
88 neurun::util::Coordinates coords{i, j, 0};
89 memcpy(tensor.buffer() + tensor.calcOffset(coords),
90 base + i * width * copy_len + j * copy_len, copy_len * sizeof(T));
97 const int32_t height = shape.dim(1);
98 const int32_t width = shape.dim(2);
99 const int32_t copy_len = shape.dim(3);
100 for (auto i = 0; i < shape.dim(0); ++i)
102 for (auto j = 0; j < shape.dim(1); ++j)
104 for (auto k = 0; k < shape.dim(2); ++k)
108 neurun::util::Coordinates coords{i, j, k, 0};
109 memcpy(tensor.buffer() + tensor.calcOffset(coords),
110 base + i * height * width * copy_len + j * width * copy_len + k * copy_len,
111 copy_len * sizeof(T));
115 for (auto l = 0; l < shape.dim(3); ++l)
117 const auto coords = neurun::util::convertCoordinates(
118 {i, j, k, l}, frontend_layout, tensor.layout());
119 T *into = reinterpret_cast<T *>(tensor.buffer() + tensor.calcOffset(coords));
120 T value = *(base + i * height * width * copy_len + j * width * copy_len +
131 throw std::runtime_error{"Not yet supported"};
136 template <typename T>
137 void copyInit(const neurun::model::Operand &model_obj, neurun::backend::operand::ITensor &obj)
139 Init<T>(model_obj, obj, true);
142 template <typename T>
143 void permuteInit(const neurun::model::Operand &model_obj, neurun::backend::operand::ITensor &obj,
144 const neurun::ir::Layout frontend_layout)
146 const bool copy = frontend_layout == obj.layout();
147 Init<T>(model_obj, obj, copy, frontend_layout);
157 class IConstantInitializer : model::OperationVisitor
160 virtual ~IConstantInitializer() = default;
165 assert(tensor_builder().get());
166 for (const auto &it : _init_map)
168 const auto &ind = it.first;
169 const auto &fn = it.second;
171 const auto &model_obj = operands().at(ind);
172 auto tensor_obj = tensor_builder()->tensorAt(ind);
173 fn(model_obj, *tensor_obj);
174 VERBOSE(FillOperandData) << "Fill data for operand " << ind.value() << std::endl;
180 using Initializer = std::function<void(const model::Operand &, backend::operand::ITensor &)>;
182 void generate(const model::Subgraph &subg, const model::Operands &operands)
184 _current_subg_layout = subg.getLayout();
186 for (const auto &e : subg.operations())
188 for (const auto &ind : e.node->getInputs())
190 const auto &obj = operands.at(ind);
191 if (obj.isConstant() && !exist(ind))
193 registerPermuteInitializer(ind, obj);
200 #define OP(InternalName) \
201 virtual void visit(const model::operation::InternalName &) override { /* DO NOTHING */}
202 #include "model/Operations.lst"
206 virtual const model::Operands &operands() const = 0;
207 virtual std::shared_ptr<ITensorBuilder> tensor_builder() const = 0;
210 void registerCopyInitializer(const model::OperandIndex &index, const model::Operand &obj)
212 // For only CONSTANTS
213 // TODO Add to check if tensor has been allocated
214 if (!obj.isConstant())
217 const auto type = obj.typeInfo().type();
218 using neurun::model::DataType;
222 case DataType::FLOAT32:
223 _init_map[index] = copyInit<float>;
225 case DataType::INT32:
226 _init_map[index] = copyInit<int32_t>;
228 case DataType::UINT32:
229 _init_map[index] = copyInit<uint32_t>;
231 case DataType::BOOL8:
232 case DataType::QUANT8_ASYMM:
233 _init_map[index] = copyInit<uint8_t>;
236 throw std::runtime_error("Not supported, yet");
242 void registerPermuteInitializer(const model::OperandIndex &index, const model::Operand &obj)
244 // For only CONSTANTS
245 // TODO Add to check if tensor has been allocated
246 if (!obj.isConstant())
249 const auto type = obj.typeInfo().type();
250 using neurun::model::DataType;
251 using namespace std::placeholders;
255 case DataType::FLOAT32:
256 _init_map[index] = std::bind(permuteInit<float>, _1, _2, _current_subg_layout);
258 case DataType::INT32:
259 _init_map[index] = std::bind(permuteInit<int32_t>, _1, _2, _current_subg_layout);
261 case DataType::UINT32:
262 _init_map[index] = std::bind(permuteInit<uint32_t>, _1, _2, _current_subg_layout);
264 case DataType::BOOL8:
265 case DataType::QUANT8_ASYMM:
266 _init_map[index] = std::bind(permuteInit<uint8_t>, _1, _2, _current_subg_layout);
269 throw std::runtime_error("Not supported, yet");
275 bool exist(const model::OperandIndex &ind) { return _init_map.find(ind) != _init_map.end(); }
278 std::unordered_map<model::OperandIndex, Initializer> _init_map;
279 ir::Layout _current_subg_layout;
282 } // namespace backend
283 } // namespace neurun
285 #endif // __NEURUN_BACKEND_ICONSTANT_INITIALIZER_H__