Imported Upstream version 1.18.0
[platform/core/ml/nnfw.git] / runtime / onert / frontend / base_loader / include / base_loader.h
1 /*
2  * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
3  * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
4  *
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at
8  *
9  *      http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17
18 #ifndef __BASE_LOADER_BASE_LOADER_H__
19 #define __BASE_LOADER_BASE_LOADER_H__
20
21 #include "ir/Graph.h"
22 #include "ir/Shape.h"
23 #include "ir/Operations.Include.h"
24
25 #include "flatbuffers/flexbuffers.h"
26
27 #include <map>
28 #include <memory>
29 #include <fstream>
30 #include <limits>
31 #include <fcntl.h>
32 #include <sys/stat.h>
33 #include <sys/mman.h>
34 #include <unistd.h>
35 #include <util/logging.h>
36
37 namespace onert
38 {
39 namespace base_loader
40 {
41
42 template <typename LoaderDomain> class BaseLoader
43 {
44 protected:
45   using Verifier = typename LoaderDomain::Verifier;
46   using ActivationFunctionType = typename LoaderDomain::ActivationFunctionType;
47   using Buffer = typename LoaderDomain::Buffer;
48   using BuiltinOperator = typename LoaderDomain::BuiltinOperator;
49   using CustomOptionsFormat = typename LoaderDomain::CustomOptionsFormat;
50   using Model = typename LoaderDomain::Model;
51   using Operator = typename LoaderDomain::Operator;
52   using Padding = typename LoaderDomain::Padding;
53   using Pool2DOptions = typename LoaderDomain::Pool2DOptions;
54   using SubGraph = typename LoaderDomain::SubGraph;
55   using Tensor = typename LoaderDomain::Tensor;
56   using TensorType = typename LoaderDomain::TensorType;
57   using DimensionType = typename LoaderDomain::DimensionType;
58   using SparseIndexVector = typename LoaderDomain::SparseIndexVector;
59
60 protected:
61   bool isOptionalInputTensor(std::int32_t idx) { return idx == -1; }
62   virtual bool allowOptionalInputTensor(BuiltinOperator) = 0;
63
64 public:
65   /**
66    * @brief Construct a new Loader object
67    *
68    * @param graph reference on subgraphs
69    */
70   explicit BaseLoader(std::unique_ptr<ir::Subgraphs> &subgs)
71     : _base{nullptr}, _pagesize(getpagesize()), _fd(-1), _subgraphs(subgs), _model{nullptr},
72       _tensor_names(std::make_shared<std::unordered_map<ir::OperandIndex, std::string>>())
73   {
74     _use_mmaped_data = util::getConfigBool(util::config::USE_MMAPED_DATA);
75   }
76
77   /**
78    * @brief Load a model from file
79    *
80    * @param file_path
81    */
82   void loadFromFile(const std::string &file_path);
83   /**
84    * @brief Load a model from a buffer
85    *
86    * @param buffer buffer pointer
87    * @param size buffer size
88    */
89   void loadFromBuffer(uint8_t *buffer, size_t size);
90
91 protected:
92   ~BaseLoader() = default;
93   void loadModel();
94
95   // Helper functions
96   ir::Activation convertActivation(ActivationFunctionType type);
97   ir::DataType tensorTypeToDataType(TensorType type);
98   ir::OperandIndex tensorIdxToOperandIdx(int32_t tensorIdx);
99
100   // Create operands form tflite::Tensor
101   ir::OperandIndex loadOperand(const Tensor *tensor, ir::Graph &subg);
102   void loadQuantization(const Tensor *tensor, ir::TypeInfo &typeInfo);
103   void loadSparsity(const Tensor *tensor, ir::TypeInfo &typeInfo);
104   void loadOperationIO(const Operator *op, ir::OperandIndexSequence &inputs,
105                        ir::OperandIndexSequence &outputs);
106   // Create operations from Operator
107   void loadOperation(const Operator *op, ir::Graph &subg);
108   // Load Strides and Paddings from options to param
109   template <typename Param, typename OptionsType>
110   void loadStridesAndPaddings(Param &param, const OptionsType *options);
111   // Load Pool2D param
112   template <typename Param> void loadPool2DOptions(Param &param, const Pool2DOptions *options);
113
114 private:
115   virtual std::unique_ptr<ir::Graph> loadSubgraph(const SubGraph *subg) = 0;
116   // Operations
117   template <typename OpIR, typename... Args>
118   const OpIR *loadOperationTo(const Operator *op, ir::Graph &subg, Args &&... args);
119
120   void loadAddV2(const Operator *op, ir::Graph &subg);
121   void loadArgMinMax(const Operator *op, ir::Graph &subg, bool is_argmax);
122   void loadBatchMatMul(const Operator *op, ir::Graph &subg);
123   void loadBinaryArithmetic(const Operator *op, ir::Graph &subg,
124                             ir::operation::BinaryArithmetic::ArithmeticType op_type);
125   void loadComparison(const Operator *op, ir::Graph &subg);
126   void loadConcatenation(const Operator *op, ir::Graph &subg);
127   void loadConv2D(const Operator *op, ir::Graph &subg);
128   void loadCustom(const Operator *op, ir::Graph &subg);
129   void loadDepthToSpace(const Operator *op, ir::Graph &subg);
130   void loadDepthwiseConv2D(const Operator *op, ir::Graph &subg);
131   void loadEinsum(const Operator *op, ir::Graph &subg);
132   void loadElementwiseActivation(const Operator *op, ir::Graph &subg,
133                                  ir::operation::ElementwiseActivation::Type op_type,
134                                  float alpha = 0.f, float beta = 0.f);
135   void loadElementwiseBinary(const Operator *op, ir::Graph &subg,
136                              ir::operation::ElementwiseBinary::ElementwiseBinaryType op_type);
137   void loadElementwiseUnary(const Operator *op, ir::Graph &subg,
138                             ir::operation::ElementwiseUnary::Type op_type);
139   void loadFC(const Operator *op, ir::Graph &subg);
140   void loadFusedBatchNorm(const Operator *op, ir::Graph &subg);
141   void loadGather(const Operator *op, ir::Graph &subg);
142   void loadIf(const Operator *op, ir::Graph &subg);
143   void loadLeakyRelu(const Operator *op, ir::Graph &subg);
144   void loadLogSoftmax(const Operator *op, ir::Graph &subg);
145   void loadDetectionPostProcess(const Operator *op, ir::Graph &subg);
146   void loadOneHot(const Operator *op, ir::Graph &subg);
147   void loadPack(const Operator *op, ir::Graph &subg);
148   void loadPool2D(const Operator *op, ir::Graph &subg, ir::operation::Pool2D::PoolType op_type);
149   void loadReduce(const Operator *op, ir::Graph &subg,
150                   ir::operation::Reduce::ReduceType reduce_type);
151   void loadReduceAll(const Operator *op, ir::Graph &subg);
152   void loadReshape(const Operator *op, ir::Graph &subg);
153   void loadResizeBilinear(const Operator *op, ir::Graph &subg);
154   void loadResizeNearestNeighbor(const Operator *op, ir::Graph &subg);
155   void loadSoftmax(const Operator *op, ir::Graph &subg);
156   void loadSpaceToDepth(const Operator *op, ir::Graph &subg);
157   void loadSplit(const Operator *op, ir::Graph &subg);
158   void loadSplitV(const Operator *op, ir::Graph &subg);
159   void loadSqueeze(const Operator *op, ir::Graph &subg);
160   void loadStridedSlice(const Operator *op, ir::Graph &subg);
161   void loadTransposeConv(const Operator *op, ir::Graph &subg);
162   void loadUnidirectionalSequenceLSTM(const Operator *op, ir::Graph &subg);
163   void loadUnpack(const Operator *op, ir::Graph &subg);
164   void loadWhile(const Operator *op, ir::Graph &subg);
165
166   void verifySubgraphIndex(int subg_index)
167   {
168     const auto num_subgraphs = _model->subgraphs()->size();
169     if (subg_index < 0 || subg_index >= static_cast<int32_t>(num_subgraphs))
170       throw std::runtime_error{std::string{"Invalid subgraph index - "} +
171                                std::to_string(subg_index)};
172   }
173
174 protected:
175   // Base address for mapped region for loading (if needed)
176   uint8_t *_base;
177   // Memory page size
178   int32_t _pagesize;
179   // loaded file description
180   int _fd;
181   // Reference on loadable subgraphs
182   std::unique_ptr<ir::Subgraphs> &_subgraphs;
183   const Model *_model;
184   // Maps Tensor indices to onert Operands.
185   std::vector<ir::OperandIndex> _tensor_to_operand;
186   std::shared_ptr<std::unordered_map<ir::OperandIndex, std::string>> _tensor_names;
187   // Verifier
188   std::unique_ptr<Verifier> _verifier;
189   // Boolean flag to use MMAPED_DATA
190   bool _use_mmaped_data = false;
191
192   std::unordered_map<uint32_t /* Buffer Index in circle file */, std::shared_ptr<ir::Data>>
193     _buf_to_data;
194 };
195
196 template <typename LoaderDomain>
197 void BaseLoader<LoaderDomain>::BaseLoader::loadFromFile(const std::string &file_path)
198 {
199   _fd = open(file_path.c_str(), O_RDONLY);
200   if (_fd < 0)
201   {
202     throw std::runtime_error("Failed to open file " + file_path);
203   }
204
205   struct stat file_stat;
206   if (fstat(_fd, &file_stat) != 0)
207   {
208     throw std::runtime_error("Fstat failed or file " + file_path + " is not a regular file");
209   }
210   int size = file_stat.st_size;
211
212   // Map model file into memory region
213   _base = static_cast<uint8_t *>(mmap(NULL, size, PROT_READ, MAP_PRIVATE, _fd, 0));
214   if (_base == MAP_FAILED)
215   {
216     close(_fd);
217     throw std::runtime_error("mmap failed - " + std::string(strerror(errno)));
218   }
219
220   _verifier = std::make_unique<Verifier>(reinterpret_cast<const std::uint8_t *>(_base), size);
221
222   loadModel();
223   munmap(_base, size);
224
225   close(_fd);
226 }
227
228 template <typename LoaderDomain>
229 void BaseLoader<LoaderDomain>::BaseLoader::loadFromBuffer(uint8_t *buffer, size_t size)
230 {
231   _base = buffer;
232   _verifier = std::make_unique<Verifier>(reinterpret_cast<const std::uint8_t *>(_base), size);
233   loadModel();
234 }
235
236 template <typename LoaderDomain>
237 ir::Activation
238 BaseLoader<LoaderDomain>::BaseLoader::convertActivation(const ActivationFunctionType type)
239 {
240   switch (type)
241   {
242     case ActivationFunctionType::ActivationFunctionType_NONE:
243       return ir::Activation::NONE;
244     case ActivationFunctionType::ActivationFunctionType_RELU:
245       return ir::Activation::RELU;
246     case ActivationFunctionType::ActivationFunctionType_RELU_N1_TO_1:
247       return ir::Activation::RELU1;
248     case ActivationFunctionType::ActivationFunctionType_RELU6:
249       return ir::Activation::RELU6;
250     case ActivationFunctionType::ActivationFunctionType_TANH:
251       return ir::Activation::TANH;
252     default:
253       throw std::runtime_error(std::string("Unsupported or invalid activation type: ") +
254                                std::to_string(static_cast<int>(type)));
255   }
256 }
257
258 template <typename LoaderDomain>
259 ir::DataType BaseLoader<LoaderDomain>::BaseLoader::tensorTypeToDataType(const TensorType type)
260 {
261   switch (type)
262   {
263     case TensorType::TensorType_FLOAT32:
264       return ir::DataType::FLOAT32;
265     case TensorType::TensorType_FLOAT16:
266       return ir::DataType::FLOAT16;
267     case TensorType::TensorType_INT32:
268       return ir::DataType::INT32;
269     case TensorType::TensorType_UINT8:
270       return ir::DataType::QUANT_UINT8_ASYMM;
271     case TensorType::TensorType_INT64:
272       return ir::DataType::INT64;
273     // case TensorType::TensorType_STRING:
274     case TensorType::TensorType_BOOL:
275       return ir::DataType::BOOL8;
276     case TensorType::TensorType_INT16:
277       return ir::DataType::QUANT_INT16_ASYMM;
278     // case TensorType::TensorType_COMPLEX64
279     case TensorType::TensorType_INT8:
280       return ir::DataType::QUANT_INT8_ASYMM;
281     // case TensorType::TensorType_FLOAT64
282     default:
283       throw std::runtime_error(
284         std::string("Unsupported tensor type: ").append(EnumNameTensorType(type)));
285   }
286 }
287
288 template <typename LoaderDomain>
289 ir::OperandIndex BaseLoader<LoaderDomain>::BaseLoader::tensorIdxToOperandIdx(int32_t tensorIdx)
290 {
291   return isOptionalInputTensor(tensorIdx) ? ir::OperandIndex() : _tensor_to_operand[tensorIdx];
292 }
293
294 /* Copy is copied from tensorflow lite */
295 template <typename T> bool Copy(const T *data_ptr, std::vector<uint16_t> &arr)
296 {
297   if (data_ptr->values() == nullptr)
298   {
299     return false;
300   }
301
302   int size = data_ptr->values()->size();
303   arr.reserve(size);
304   for (int i = 0; i < size; i++)
305   {
306     arr.emplace_back(static_cast<uint16_t>(data_ptr->values()->Get(i)));
307   }
308   return true;
309 }
310
311 template <typename LoaderDomain>
312 ir::OperandIndex BaseLoader<LoaderDomain>::loadOperand(const Tensor *tensor, ir::Graph &subg)
313 {
314   ir::Shape shape;
315   // Shape
316   const auto *tensor_shape = tensor->shape();
317   if (tensor_shape != nullptr)
318   {
319     for (const auto &dim : *tensor_shape)
320     {
321       shape.append(dim);
322     }
323   }
324
325   // Note for tensor->shape_signature()
326   // We don't handle shape signature
327   //    How we handle:
328   //       If shape_signature[k] == -1, we will use tensor->shape()[k] == 1
329   //       If app wants to change the input shape, call nnfw_apply_input_tensorinfo() can
330   //       be used.
331
332   // TypeInfo
333   ir::TypeInfo type_info(tensorTypeToDataType(tensor->type()));
334   loadQuantization(tensor, type_info);
335   loadSparsity(tensor, type_info);
336
337   // Create operand
338   const auto operand_index = subg.addOperand(shape, type_info);
339
340   // Constant tensors are indicated by non-empty data.
341   const auto *data = _model->buffers()->Get(tensor->buffer())->data();
342   if (data != nullptr)
343   {
344     using std::ptrdiff_t;
345     std::shared_ptr<ir::Data> data_obj;
346
347     if (_fd == -1) // Model is from memory
348     {
349       data_obj = std::make_shared<ir::ExternalData>(data->data(), data->size());
350     }
351     else // Model is loaded(mmap'd) from a file
352     {
353       size_t data_size = data->size();
354       ptrdiff_t unaligned_offset_start = data->data() - _base;
355       ptrdiff_t offset_end = unaligned_offset_start + data_size;
356
357       // Calculated aligned offset from base address of mapped region
358       // munmap accepts memory address which is a multiple of the pagesize
359       ptrdiff_t aligned_offset_start = (unaligned_offset_start / _pagesize) * _pagesize;
360       size_t mmap_size = offset_end - aligned_offset_start;
361
362       uint32_t buf_idx = tensor->buffer();
363       auto buffer_found = _buf_to_data.find(buf_idx);
364
365       if (buffer_found != _buf_to_data.end())
366       {
367         // Another tensor points this buffer and its matching Data(either CachedData or MMapedData)
368         // was already created. Let's reuse the Data
369         data_obj = buffer_found->second;
370       }
371       else if (_use_mmaped_data)
372       {
373         data_obj = std::make_shared<ir::MMapedData>(_fd, aligned_offset_start, mmap_size,
374                                                     unaligned_offset_start, data_size);
375         _buf_to_data[buf_idx] = data_obj;
376       }
377       else
378       {
379         size_t offset = unaligned_offset_start - aligned_offset_start;
380         uint8_t *mmap_base = static_cast<uint8_t *>(
381           mmap(NULL, mmap_size, PROT_READ, MAP_PRIVATE, _fd, aligned_offset_start));
382
383         data_obj = std::make_shared<ir::CachedData>(mmap_base + offset, data_size);
384         _buf_to_data[buf_idx] = data_obj;
385
386         munmap(mmap_base, mmap_size);
387       }
388     }
389     subg.setOperandValue(operand_index, std::move(data_obj));
390   }
391
392   _tensor_names->emplace(operand_index, tensor->name()->str());
393
394   // Variable
395   if (tensor->is_variable())
396   {
397     if (data != nullptr)
398       throw std::runtime_error("Variable tensor with buffer is not supported!");
399
400     subg.operands().at(operand_index).info().setAsVariable();
401   }
402
403   return operand_index;
404 }
405
406 template <typename LoaderDomain>
407 void BaseLoader<LoaderDomain>::loadQuantization(const Tensor *tensor, ir::TypeInfo &typeInfo)
408 {
409   auto q_params = tensor->quantization();
410   if (q_params == nullptr || q_params->scale() == nullptr || q_params->scale()->size() == 0)
411   {
412     typeInfo.quantization(0., 0);
413     return;
414   }
415   if (q_params->zero_point() == nullptr)
416   {
417     throw std::runtime_error("Quantization params: scale is not null, but zero_point is null.");
418   }
419   const size_t num_scales = q_params->scale()->size();
420   if (num_scales != q_params->zero_point()->size())
421   {
422     throw std::runtime_error("Quantization params: scale size != zero_point size");
423   }
424   std::vector<float> scales;
425   std::vector<int32_t> zero_points;
426   scales.resize(num_scales);
427   zero_points.resize(num_scales);
428   for (size_t i = 0; i < num_scales; ++i)
429   {
430     scales[i] = q_params->scale()->Get(i);
431     // zero_point is defined as long (i64) in schema while TypeInfo's zero_point is int32_t.
432     // int64_t is used instead of long because long is 4 byte in most 32bit architecture.
433     int64_t zero_point = q_params->zero_point()->Get(i);
434     if (zero_point < std::numeric_limits<int32_t>::min() ||
435         zero_point > std::numeric_limits<int32_t>::max())
436       throw std::runtime_error("Zero_point is out of int32 range.");
437     zero_points[i] = static_cast<int32_t>(zero_point);
438   }
439   auto details = q_params->details_as_CustomQuantization();
440   if (details != nullptr)
441     throw std::runtime_error("Custom Quantization is not supported");
442   typeInfo.quantization(std::move(scales), std::move(zero_points));
443 }
444
445 template <typename LoaderDomain>
446 void BaseLoader<LoaderDomain>::loadSparsity(const Tensor *tensor, ir::TypeInfo &typeInfo)
447 {
448   auto src_sparsity = tensor->sparsity();
449   if (src_sparsity != nullptr)
450   {
451     std::vector<uint16_t> w1_segments;
452     std::vector<uint16_t> w1_indices;
453     // check traversal_order
454     if (src_sparsity->traversal_order())
455     {
456       const int traversal_order_size = src_sparsity->traversal_order()->size();
457       for (int i = 0; i < traversal_order_size; ++i)
458       {
459         if (i != src_sparsity->traversal_order()->Get(i))
460           throw std::runtime_error("traversal_order [0, 1, ..., n-1] is only supported.");
461       }
462     }
463     // check block_map
464     int block_rank = 0;
465     if (src_sparsity->block_map())
466     {
467       block_rank = src_sparsity->block_map()->size();
468       for (int i = 0; i < block_rank; ++i)
469       {
470         if (i != src_sparsity->block_map()->Get(i))
471           throw std::runtime_error("block_map [0, 1, ..., n-1] is only supported.");
472       }
473     }
474     // load metadata
475     const auto dim_metadata_size = src_sparsity->dim_metadata()->size();
476     const auto dense_rank = tensor->shape() ? tensor->shape()->size() : 0;
477     if (dense_rank + block_rank != dim_metadata_size)
478       throw std::runtime_error("sparsity dim_metadata length is wrong.");
479     bool random_sparsity = dim_metadata_size == 2 && block_rank == 0;
480     bool block2D_sparsity = dim_metadata_size == 4 && block_rank == 2;
481     if (dim_metadata_size != !random_sparsity && !block2D_sparsity)
482       throw std::runtime_error(
483         "sparsity is supported only for 2D tensor with random or 16x1 block sparsity.");
484
485     const auto *src_metadata = src_sparsity->dim_metadata()->Get(0);
486     if (src_metadata->format() != DimensionType::DimensionType_DENSE)
487       throw std::runtime_error("sparse tensor dim[0] is not DENSE");
488     src_metadata = src_sparsity->dim_metadata()->Get(1);
489     if (src_metadata->format() != DimensionType::DimensionType_SPARSE_CSR)
490       throw std::runtime_error("sparse tensor dim[0] is not SPARSE_CSR");
491     auto ParseSparseIndexVector = [src_metadata, &w1_segments, &w1_indices]() {
492       if (src_metadata->array_segments() == nullptr || src_metadata->array_indices() == nullptr)
493         return false;
494       bool status = true;
495       switch (src_metadata->array_segments_type())
496       {
497         case SparseIndexVector::SparseIndexVector_Int32Vector:
498           status = Copy(src_metadata->array_segments_as_Int32Vector(), w1_segments);
499           break;
500         case SparseIndexVector::SparseIndexVector_Uint16Vector:
501           status = Copy(src_metadata->array_segments_as_Uint16Vector(), w1_segments);
502           break;
503         case SparseIndexVector::SparseIndexVector_Uint8Vector:
504           status = Copy(src_metadata->array_segments_as_Uint8Vector(), w1_segments);
505           break;
506         default:
507           return false;
508       }
509       if (status != true)
510         return false;
511       switch (src_metadata->array_indices_type())
512       {
513         case SparseIndexVector::SparseIndexVector_Int32Vector:
514           return Copy(src_metadata->array_indices_as_Int32Vector(), w1_indices);
515         case SparseIndexVector::SparseIndexVector_Uint16Vector:
516           return Copy(src_metadata->array_indices_as_Uint16Vector(), w1_indices);
517         case SparseIndexVector::SparseIndexVector_Uint8Vector:
518           return Copy(src_metadata->array_indices_as_Uint8Vector(), w1_indices);
519         default:
520           break;
521       }
522       return false;
523     };
524     if (ParseSparseIndexVector() == false)
525       throw std::runtime_error("Error during parsing sparsity index information");
526     // Get block size
527     std::vector<int32_t> block_size;
528     for (int i = 0; i < block_rank; ++i)
529     {
530       auto block_metadata = src_sparsity->dim_metadata()->Get(dense_rank + i);
531       if (block_metadata->format() != DimensionType::DimensionType_DENSE)
532         throw std::runtime_error("block dimension must be DENSE.");
533       block_size.push_back(block_metadata->dense_size());
534     }
535     typeInfo.sparsity(std::make_shared<ir::Sparsity>(std::move(w1_segments), std::move(w1_indices),
536                                                      std::move(block_size)));
537   }
538 }
539
540 template <typename LoaderDomain>
541 void BaseLoader<LoaderDomain>::loadOperationIO(const Operator *op, ir::OperandIndexSequence &inputs,
542                                                ir::OperandIndexSequence &outputs)
543 {
544   for (const std::int32_t idx : *op->inputs())
545   {
546     // Optional tensors are not supported yet except for FULLY_CONNECTED and BCQ_FULLY_CONNECTED
547     auto check_optional_input = [&]() {
548       auto builtin_code = _model->operator_codes()->Get(op->opcode_index())->builtin_code();
549       if (isOptionalInputTensor(idx) && !allowOptionalInputTensor(builtin_code))
550         throw std::runtime_error(
551           std::string("loader doesn't support optional input tensor yet for ")
552             .append(EnumNameBuiltinOperator(builtin_code)));
553     };
554     check_optional_input();
555     inputs.append(tensorIdxToOperandIdx(idx));
556   }
557
558   for (const std::int32_t idx : *op->outputs())
559   {
560     outputs.append(tensorIdxToOperandIdx(idx));
561   }
562 }
563
564 template <typename LoaderDomain>
565 template <typename Param, typename OptionsType>
566 void BaseLoader<LoaderDomain>::loadStridesAndPaddings(Param &param, const OptionsType *options)
567 {
568   // Strides
569   param.stride.vertical = options->stride_h();
570   param.stride.horizontal = options->stride_w();
571   // Paddings
572   switch (options->padding())
573   {
574     case Padding::Padding_SAME:
575       param.padding.type = ir::PaddingType::SAME;
576       break;
577     case Padding::Padding_VALID:
578       param.padding.type = ir::PaddingType::VALID;
579       break;
580     default:
581       throw std::runtime_error{"Invalid padding type"};
582   }
583   // param paddings indexes unused
584 }
585
586 template <typename LoaderDomain>
587 template <typename Param>
588 void BaseLoader<LoaderDomain>::loadPool2DOptions(Param &param, const Pool2DOptions *options)
589 {
590   // Strides and Paddings
591   if (options->stride_h() <= 0 || options->stride_w() <= 0)
592     throw std::runtime_error{"Invalid stride vertical or horizontal - both must be bigger than 0"};
593   loadStridesAndPaddings(param, options);
594   // Filter width and height
595   // Strides
596   if (options->filter_width() <= 0 || options->filter_height() <= 0)
597     throw std::runtime_error{"Invalid filter width or height - both must be bigger than 0"};
598   param.kw = options->filter_width();
599   param.kh = options->filter_height();
600   // Activation
601   param.activation = convertActivation(options->fused_activation_function());
602 }
603
604 template <typename LoaderDomain>
605 template <typename OpIR, typename... Args>
606 const OpIR *BaseLoader<LoaderDomain>::loadOperationTo(const Operator *op, ir::Graph &subg,
607                                                       Args &&... args)
608 {
609   static_assert(sizeof...(args) <= 1, "You can't have more than 1 arguments!");
610   ir::OperandIndexSequence inputs;
611   ir::OperandIndexSequence outputs;
612
613   loadOperationIO(op, inputs, outputs);
614
615   std::unique_ptr<OpIR> new_op(new OpIR(inputs, outputs, std::forward<Args>(args)...));
616   auto ret = new_op.get();
617   subg.addOperation(std::move(new_op));
618
619   return ret;
620 }
621
622 template <typename LoaderDomain>
623 void BaseLoader<LoaderDomain>::loadConv2D(const Operator *op, ir::Graph &subg)
624 {
625   ir::operation::Conv2D::Param param;
626   const auto *options = op->builtin_options_as_Conv2DOptions();
627   param.activation = convertActivation(options->fused_activation_function());
628   loadStridesAndPaddings(param, options);
629   param.dilation.width_factor = options->dilation_w_factor();
630   param.dilation.height_factor = options->dilation_h_factor();
631
632   loadOperationTo<ir::operation::Conv2D>(op, subg, param);
633 }
634
635 template <typename LoaderDomain>
636 void BaseLoader<LoaderDomain>::loadDepthwiseConv2D(const Operator *op, ir::Graph &subg)
637 {
638   ir::operation::DepthwiseConv2D::Param param;
639   const auto *options = op->builtin_options_as_DepthwiseConv2DOptions();
640   param.activation = convertActivation(options->fused_activation_function());
641   loadStridesAndPaddings(param, options);
642   param.multiplier = options->depth_multiplier();
643   // Dilation h/w factor unused
644   param.dilation.width_factor = options->dilation_w_factor();
645   param.dilation.height_factor = options->dilation_h_factor();
646
647   loadOperationTo<ir::operation::DepthwiseConv2D>(op, subg, param);
648 }
649
650 template <typename LoaderDomain>
651 void BaseLoader<LoaderDomain>::loadTransposeConv(const Operator *op, ir::Graph &subg)
652 {
653   ir::operation::TransposeConv::Param param;
654   const auto *options = op->builtin_options_as_TransposeConvOptions();
655   loadStridesAndPaddings(param, options);
656
657   loadOperationTo<ir::operation::TransposeConv>(op, subg, param);
658 }
659
660 template <typename LoaderDomain>
661 void BaseLoader<LoaderDomain>::loadPool2D(const Operator *op, ir::Graph &subg,
662                                           ir::operation::Pool2D::PoolType op_type)
663 {
664   ir::operation::Pool2D::Param param;
665   param.op_type = op_type;
666   const auto *options = op->builtin_options_as_Pool2DOptions();
667
668   loadPool2DOptions(param, options);
669
670   loadOperationTo<ir::operation::Pool2D>(op, subg, param);
671 }
672
673 template <typename LoaderDomain>
674 void BaseLoader<LoaderDomain>::loadReshape(const Operator *op, ir::Graph &subg)
675 {
676   ir::operation::Reshape::Param param{};
677   const auto *options = op->builtin_options_as_ReshapeOptions();
678   if (options != nullptr)
679   {
680     const auto *new_shape = options->new_shape();
681     if (new_shape)
682     {
683       for (uint i = 0; i < new_shape->size(); ++i)
684       {
685         param.new_shape.push_back(new_shape->Get(i));
686       }
687     }
688   }
689
690   loadOperationTo<ir::operation::Reshape>(op, subg, param);
691 }
692
693 template <typename LoaderDomain>
694 void BaseLoader<LoaderDomain>::loadSoftmax(const Operator *op, ir::Graph &subg)
695 {
696   ir::operation::Softmax::Param param;
697   const auto *options = op->builtin_options_as_SoftmaxOptions();
698   // Beta
699   param.beta = options->beta();
700
701   loadOperationTo<ir::operation::Softmax>(op, subg, param);
702 }
703
704 template <typename LoaderDomain>
705 void BaseLoader<LoaderDomain>::loadConcatenation(const Operator *op, ir::Graph &subg)
706 {
707   ir::operation::Concat::Param param;
708   const auto *options = op->builtin_options_as_ConcatenationOptions();
709   // Axis
710   param.axis = options->axis();
711   // activation unused
712
713   loadOperationTo<ir::operation::Concat>(op, subg, param);
714 }
715
716 template <typename LoaderDomain>
717 void BaseLoader<LoaderDomain>::loadFC(const Operator *op, ir::Graph &subg)
718 {
719   ir::operation::FullyConnected::Param param;
720   const auto *options = op->builtin_options_as_FullyConnectedOptions();
721
722   param.activation = convertActivation(options->fused_activation_function());
723   param.weights_format = static_cast<ir::FullyConnectedWeightsFormat>(options->weights_format());
724
725   const auto fc = loadOperationTo<ir::operation::FullyConnected>(op, subg, param);
726
727   const auto &input_operand =
728     subg.operands().at(fc->getInputs().at(ir::operation::FullyConnected::INPUT));
729   auto &weights_operand =
730     subg.operands().at(fc->getInputs().at(ir::operation::FullyConnected::WEIGHT));
731   if (input_operand.typeInfo().type() == ir::DataType::FLOAT32 &&
732       ((weights_operand.typeInfo().type() == ir::DataType::QUANT_UINT8_ASYMM) ||
733        weights_operand.typeInfo().type() == ir::DataType::QUANT_INT8_ASYMM))
734   {
735     weights_operand.type(ir::DataType::QUANT_INT8_SYMM);
736   }
737 }
738
739 template <typename LoaderDomain>
740 void BaseLoader<LoaderDomain>::loadAddV2(const Operator *op, ir::Graph &subg)
741 {
742   ir::operation::BinaryArithmetic::Param param;
743   param.arithmetic_type = ir::operation::BinaryArithmetic::ArithmeticType::ADD;
744
745   if (op->custom_options() == nullptr)
746   {
747     param.activation = ir::Activation::NONE;
748   }
749   else
750   {
751     size_t custom_op_data_size = op->custom_options()->size();
752     auto custom_op_data = op->custom_options()->Data();
753     auto data_root = flexbuffers::GetRoot(custom_op_data, custom_op_data_size);
754     auto attr_map = data_root.AsMap();
755     const auto fused_activation_func = static_cast<typename LoaderDomain::ActivationFunctionType>(
756       attr_map["fused_activation_function"].AsInt8());
757     param.activation = convertActivation(fused_activation_func);
758   }
759
760   loadOperationTo<ir::operation::BinaryArithmetic>(op, subg, param);
761 }
762
763 template <typename LoaderDomain>
764 void BaseLoader<LoaderDomain>::loadDepthToSpace(const Operator *op, ir::Graph &subg)
765 {
766   ir::operation::DepthToSpace::Param param;
767   const auto *options = op->builtin_options_as_DepthToSpaceOptions();
768   param.block_size = options->block_size();
769
770   loadOperationTo<ir::operation::DepthToSpace>(op, subg, param);
771 }
772
773 template <typename LoaderDomain>
774 void BaseLoader<LoaderDomain>::loadBinaryArithmetic(
775   const Operator *op, ir::Graph &subg, ir::operation::BinaryArithmetic::ArithmeticType op_type)
776 {
777   ir::operation::BinaryArithmetic::Param param;
778   param.arithmetic_type = op_type;
779   switch (op_type)
780   {
781     case ir::operation::BinaryArithmetic::ArithmeticType::ADD:
782     {
783       const auto *add_options = op->builtin_options_as_AddOptions();
784       param.activation = convertActivation(add_options->fused_activation_function());
785       break;
786     }
787     case ir::operation::BinaryArithmetic::ArithmeticType::SUB:
788     {
789       const auto *sub_options = op->builtin_options_as_SubOptions();
790       param.activation = convertActivation(sub_options->fused_activation_function());
791       break;
792     }
793     case ir::operation::BinaryArithmetic::ArithmeticType::MUL:
794     {
795       const auto *mul_options = op->builtin_options_as_MulOptions();
796       param.activation = convertActivation(mul_options->fused_activation_function());
797       break;
798     }
799     case ir::operation::BinaryArithmetic::ArithmeticType::DIV:
800     {
801       const auto *div_options = op->builtin_options_as_DivOptions();
802       param.activation = convertActivation(div_options->fused_activation_function());
803       break;
804     }
805     default:
806       assert(false &&
807              "The function 'loadBinaryArithmetic' supports only BinaryArithmetic operations");
808       break;
809   }
810
811   loadOperationTo<ir::operation::BinaryArithmetic>(op, subg, param);
812 }
813
814 template <typename LoaderDomain>
815 void BaseLoader<LoaderDomain>::loadPack(const Operator *op, ir::Graph &subg)
816 {
817   ir::operation::Pack::Param param;
818   const auto *options = op->builtin_options_as_PackOptions();
819   param.num = options->values_count();
820   param.axis = options->axis();
821
822   loadOperationTo<ir::operation::Pack>(op, subg, param);
823 }
824
825 template <typename LoaderDomain>
826 void BaseLoader<LoaderDomain>::loadElementwiseActivation(
827   const Operator *op, ir::Graph &subg, ir::operation::ElementwiseActivation::Type op_type,
828   float alpha, float beta)
829 {
830   ir::operation::ElementwiseActivation::Param param;
831   param.op_type = op_type;
832   param.alpha = alpha;
833   param.beta = beta;
834
835   loadOperationTo<ir::operation::ElementwiseActivation>(op, subg, param);
836 }
837
838 template <typename LoaderDomain>
839 void BaseLoader<LoaderDomain>::loadResizeBilinear(const Operator *op, ir::Graph &subg)
840 {
841   ir::operation::ResizeBilinear::Param param;
842   param.align_corners = op->builtin_options_as_ResizeBilinearOptions()->align_corners();
843   param.half_pixel_centers = op->builtin_options_as_ResizeBilinearOptions()->half_pixel_centers();
844
845   loadOperationTo<ir::operation::ResizeBilinear>(op, subg, param);
846 }
847
848 template <typename LoaderDomain>
849 void BaseLoader<LoaderDomain>::loadResizeNearestNeighbor(const Operator *op, ir::Graph &subg)
850 {
851   ir::operation::ResizeNearestNeighbor::Param param;
852   param.align_corners = op->builtin_options_as_ResizeNearestNeighborOptions()->align_corners();
853
854   loadOperationTo<ir::operation::ResizeNearestNeighbor>(op, subg, param);
855 }
856
857 template <typename LoaderDomain>
858 void BaseLoader<LoaderDomain>::loadReduce(const Operator *op, ir::Graph &subg,
859                                           ir::operation::Reduce::ReduceType reduce_type)
860 {
861   ir::operation::Reduce::Param param;
862   param.reduce_type = reduce_type;
863   param.keep_dims = op->builtin_options_as_ReducerOptions()->keep_dims();
864
865   loadOperationTo<ir::operation::Reduce>(op, subg, param);
866 }
867
868 template <typename LoaderDomain>
869 void BaseLoader<LoaderDomain>::loadReduceAll(const Operator *op, ir::Graph &subg)
870 {
871   ir::operation::Reduce::Param param;
872   param.reduce_type = ir::operation::Reduce::ReduceType::ALL;
873   if (op->custom_options() == nullptr)
874   {
875     param.keep_dims = false;
876   }
877   else
878   {
879     size_t custom_op_data_size = op->custom_options()->size();
880     auto custom_op_data = op->custom_options()->Data();
881     auto data_root = flexbuffers::GetRoot(custom_op_data, custom_op_data_size);
882     auto attr_map = data_root.AsMap();
883     param.keep_dims = attr_map["keep_dims"].AsBool();
884   }
885
886   loadOperationTo<ir::operation::Reduce>(op, subg, param);
887 }
888
889 template <typename LoaderDomain>
890 void BaseLoader<LoaderDomain>::loadElementwiseBinary(
891   const Operator *op, ir::Graph &subg,
892   ir::operation::ElementwiseBinary::ElementwiseBinaryType op_type)
893 {
894   ir::operation::ElementwiseBinary::Param param;
895   param.op_type = op_type;
896
897   loadOperationTo<ir::operation::ElementwiseBinary>(op, subg, param);
898 }
899
900 template <typename LoaderDomain>
901 void BaseLoader<LoaderDomain>::loadElementwiseUnary(const Operator *op, ir::Graph &subg,
902                                                     ir::operation::ElementwiseUnary::Type op_type)
903 {
904   ir::operation::ElementwiseUnary::Param param;
905   param.op_type = op_type;
906
907   const auto eu = loadOperationTo<ir::operation::ElementwiseUnary>(op, subg, param);
908   if (op_type == ir::operation::ElementwiseUnary::Type::CAST)
909   {
910     auto qasymm8ToUint8 = [](ir::Operand &operand) {
911       if (operand.typeInfo().type() == ir::DataType::QUANT_UINT8_ASYMM)
912       {
913         operand.type(ir::DataType::UINT8);
914       }
915     };
916     qasymm8ToUint8(
917       subg.operands().at(eu->getInputs().at(ir::operation::ElementwiseUnary::Input::INPUT)));
918     qasymm8ToUint8(subg.operands().at(eu->getOutputs().at(0)));
919   }
920 }
921
922 template <typename LoaderDomain>
923 void BaseLoader<LoaderDomain>::loadGather(const Operator *op, ir::Graph &subg)
924 {
925   ir::operation::Gather::Param param;
926   param.axis = op->builtin_options_as_GatherOptions()->axis();
927
928   loadOperationTo<ir::operation::Gather>(op, subg, param);
929 }
930
931 template <typename LoaderDomain>
932 void BaseLoader<LoaderDomain>::loadDetectionPostProcess(const Operator *op, ir::Graph &subg)
933 {
934   const flexbuffers::Map &m =
935     flexbuffers::GetRoot(op->custom_options()->data(), op->custom_options()->size()).AsMap();
936
937   ir::operation::DetectionPostProcess::Param param;
938
939   param.max_detections = m["max_detections"].AsInt32();
940
941   // TODO fixme
942   param.max_classes_per_detection = m["max_classes_per_detection"].AsInt32();
943   if (m["detections_per_class"].IsNull())
944     param.max_boxes_per_class = 100;
945   else
946     param.max_boxes_per_class = m["detections_per_class"].AsInt32();
947
948   if (m["use_regular_nms"].IsNull())
949     param.do_fast_eval = true;
950   else
951     param.do_fast_eval = !m["use_regular_nms"].AsBool();
952
953   param.score_threshold = m["nms_score_threshold"].AsFloat();
954   param.iou_threshold = m["nms_iou_threshold"].AsFloat();
955
956   // TODO add num classes support
957   param.num_classes = m["num_classes"].AsInt32();
958
959   param.scale.y_scale = m["y_scale"].AsFloat();
960   param.scale.x_scale = m["x_scale"].AsFloat();
961   param.scale.h_scale = m["h_scale"].AsFloat();
962   param.scale.w_scale = m["w_scale"].AsFloat();
963
964   // TODO depends on input model framework
965   param.center_size_boxes = true;
966
967   loadOperationTo<ir::operation::DetectionPostProcess>(op, subg, param);
968 }
969
970 template <typename LoaderDomain>
971 void BaseLoader<LoaderDomain>::loadBatchMatMul(const Operator *op, ir::Graph &subg)
972 {
973   ir::operation::BatchMatMul::Param param;
974
975   const auto builtin_op = _model->operator_codes()->Get(op->opcode_index())->builtin_code();
976
977   switch (builtin_op)
978   {
979     case BuiltinOperator::BuiltinOperator_BATCH_MATMUL:
980       param.adj_x = op->builtin_options_as_BatchMatMulOptions()->adjoint_lhs();
981       param.adj_y = op->builtin_options_as_BatchMatMulOptions()->adjoint_rhs();
982       break;
983     case BuiltinOperator::BuiltinOperator_CUSTOM:
984       if (op->custom_options() == nullptr)
985       {
986         param.adj_x = false;
987         param.adj_y = false;
988       }
989       else
990       {
991         size_t custom_op_data_size = op->custom_options()->size();
992         auto custom_op_data = op->custom_options()->Data();
993         auto data_root = flexbuffers::GetRoot(custom_op_data, custom_op_data_size);
994         auto attr_map = data_root.AsMap();
995         param.adj_x = attr_map["adj_x"].AsBool();
996         param.adj_y = attr_map["adj_y"].AsBool();
997       }
998       break;
999     default:
1000       throw std::runtime_error(
1001         std::string("Wrong loaded operation: ").append(EnumNameBuiltinOperator(builtin_op)) +
1002         " as " + EnumNameBuiltinOperator(BuiltinOperator::BuiltinOperator_BATCH_MATMUL));
1003   }
1004
1005   loadOperationTo<ir::operation::BatchMatMul>(op, subg, param);
1006 }
1007
1008 template <typename LoaderDomain>
1009 void BaseLoader<LoaderDomain>::loadSpaceToDepth(const Operator *op, ir::Graph &subg)
1010 {
1011   ir::operation::SpaceToDepth::Param param;
1012   const auto *options = op->builtin_options_as_SpaceToDepthOptions();
1013   param.block_size = options->block_size();
1014
1015   loadOperationTo<ir::operation::SpaceToDepth>(op, subg, param);
1016 }
1017
1018 template <typename LoaderDomain>
1019 void BaseLoader<LoaderDomain>::loadCustom(const Operator *op, ir::Graph &subg)
1020 {
1021   ir::OperandIndexSequence inputs;
1022   ir::OperandIndexSequence outputs;
1023
1024   assert(op->custom_options_format() == CustomOptionsFormat::CustomOptionsFormat_FLEXBUFFERS &&
1025          "Unsupported custom operation options format");
1026
1027   auto *op_code = _model->operator_codes()->Get(op->opcode_index());
1028   auto custom_op_name = op_code->custom_code()->str();
1029
1030   enum class BuiltinOP
1031   {
1032     AddV2,
1033     ReduceAll,
1034     MatrixBandPart,
1035     BatchMatMul,
1036     Einsum,
1037     BroadcastTo,
1038     FusedBatchNorm,
1039     StatelessRandomUniform,
1040     Erf,
1041     DetectionPostProcess
1042   };
1043
1044   // Mapping from custom op name string to BuiltinOP enum
1045   std::map<std::string, BuiltinOP> builtin_map = {
1046     {"AddV2", BuiltinOP::AddV2},
1047     {"All", BuiltinOP::ReduceAll},
1048     {"MatrixBandPart", BuiltinOP::MatrixBandPart},
1049     {"BatchMatMulV2", BuiltinOP::BatchMatMul},
1050     {"Einsum", BuiltinOP::Einsum},
1051     {"FusedBatchNormV3", BuiltinOP::FusedBatchNorm},
1052     {"BroadcastTo", BuiltinOP::BroadcastTo},
1053     {"StatelessRandomUniform", BuiltinOP::StatelessRandomUniform},
1054     {"Erf", BuiltinOP::Erf},
1055     {"TFLite_Detection_PostProcess", BuiltinOP::DetectionPostProcess},
1056   };
1057
1058   try
1059   {
1060     // Throw out_of_range if it is unknown custom op
1061     auto custom_op_id = builtin_map.at(custom_op_name);
1062     switch (custom_op_id)
1063     {
1064       case BuiltinOP::AddV2:
1065         loadAddV2(op, subg);
1066         break;
1067       case BuiltinOP::ReduceAll:
1068         loadReduceAll(op, subg);
1069         break;
1070       case BuiltinOP::MatrixBandPart:
1071         loadOperationTo<ir::operation::MatrixBandPart>(op, subg);
1072         break;
1073       case BuiltinOP::BatchMatMul:
1074         loadBatchMatMul(op, subg);
1075         break;
1076       case BuiltinOP::Einsum:
1077         loadEinsum(op, subg);
1078         break;
1079       case BuiltinOP::BroadcastTo:
1080         loadOperationTo<ir::operation::BroadcastTo>(op, subg);
1081         break;
1082       case BuiltinOP::FusedBatchNorm:
1083         loadFusedBatchNorm(op, subg);
1084         break;
1085       case BuiltinOP::StatelessRandomUniform:
1086         loadOperationTo<ir::operation::StatelessRandomUniform>(op, subg);
1087         break;
1088       case BuiltinOP::Erf:
1089         loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::ERF);
1090         break;
1091       case BuiltinOP::DetectionPostProcess:
1092         loadDetectionPostProcess(op, subg);
1093         break;
1094       default:
1095         throw std::runtime_error{
1096           "Loader: Custom OP map is defined but operation loader function is not defined"};
1097     }
1098
1099     return;
1100   }
1101   catch (...)
1102   {
1103     loadOperationIO(op, inputs, outputs);
1104
1105     auto constraint = ir::OperandConstraint::createExact(inputs.size());
1106
1107     size_t custom_op_data_size = op->custom_options()->size();
1108     auto custom_op_data = new char[custom_op_data_size];
1109     std::copy(op->custom_options()->begin(), op->custom_options()->end(), custom_op_data);
1110
1111     ir::operation::Custom::Userdata userdata{};
1112     userdata.data = custom_op_data;
1113     userdata.size = custom_op_data_size;
1114
1115     auto new_op = std::make_unique<ir::operation::Custom>(constraint, inputs, outputs,
1116                                                           custom_op_name, userdata);
1117
1118     subg.addOperation(std::move(new_op));
1119   }
1120 }
1121
1122 template <typename LoaderDomain>
1123 void BaseLoader<LoaderDomain>::loadSqueeze(const Operator *op, ir::Graph &subg)
1124 {
1125   ir::operation::Squeeze::Param param;
1126   const auto *options = op->builtin_options_as_SqueezeOptions();
1127   const auto *dims = options->squeeze_dims();
1128   if (dims)
1129   {
1130     if (dims->size() > sizeof(param.dims) / sizeof(param.dims[0]))
1131       throw std::runtime_error("Squeeze: 'param.ndims' is out of range.");
1132     param.ndim = dims->size();
1133     for (int i = 0; i < param.ndim; ++i)
1134       param.dims[i] = dims->Get(i);
1135   }
1136
1137   loadOperationTo<ir::operation::Squeeze>(op, subg, param);
1138 }
1139
1140 template <typename LoaderDomain>
1141 void BaseLoader<LoaderDomain>::loadSplit(const Operator *op, ir::Graph &subg)
1142 {
1143   ir::operation::Split::Param param;
1144   const auto *options = op->builtin_options_as_SplitOptions();
1145   param.num_splits = options->num_splits();
1146
1147   loadOperationTo<ir::operation::Split>(op, subg, param);
1148 }
1149
1150 template <typename LoaderDomain>
1151 void BaseLoader<LoaderDomain>::loadSplitV(const Operator *op, ir::Graph &subg)
1152 {
1153   ir::operation::SplitV::Param param;
1154   const auto *options = op->builtin_options_as_SplitVOptions();
1155   param.num_splits = options->num_splits();
1156
1157   loadOperationTo<ir::operation::SplitV>(op, subg, param);
1158 }
1159
1160 template <typename LoaderDomain>
1161 void BaseLoader<LoaderDomain>::loadStridedSlice(const Operator *op, ir::Graph &subg)
1162 {
1163   ir::operation::StridedSlice::Param param;
1164   const auto *options = op->builtin_options_as_StridedSliceOptions();
1165   param.begin_mask = options->begin_mask();
1166   param.end_mask = options->end_mask();
1167   param.shrink_axis_mask = options->shrink_axis_mask();
1168
1169   loadOperationTo<ir::operation::StridedSlice>(op, subg, param);
1170 }
1171
1172 template <typename LoaderDomain>
1173 void BaseLoader<LoaderDomain>::loadUnpack(const Operator *op, ir::Graph &subg)
1174 {
1175   ir::operation::Unpack::Param param;
1176   const auto *options = op->builtin_options_as_UnpackOptions();
1177   param.num = options->num();
1178   param.axis = options->axis();
1179
1180   loadOperationTo<ir::operation::Unpack>(op, subg, param);
1181 }
1182
1183 template <typename LoaderDomain>
1184 void BaseLoader<LoaderDomain>::loadComparison(const Operator *op, ir::Graph &subg)
1185 {
1186   ir::operation::Comparison::Param param;
1187   const auto builtin_op = _model->operator_codes()->Get(op->opcode_index())->builtin_code();
1188
1189   switch (builtin_op)
1190   {
1191     case BuiltinOperator::BuiltinOperator_EQUAL:
1192       param.comparison_type = ir::operation::Comparison::ComparisonType::Equal;
1193       break;
1194     case BuiltinOperator::BuiltinOperator_NOT_EQUAL:
1195       param.comparison_type = ir::operation::Comparison::ComparisonType::NotEqual;
1196       break;
1197     case BuiltinOperator::BuiltinOperator_GREATER_EQUAL:
1198       param.comparison_type = ir::operation::Comparison::ComparisonType::GreaterEqual;
1199       break;
1200     case BuiltinOperator::BuiltinOperator_GREATER:
1201       param.comparison_type = ir::operation::Comparison::ComparisonType::Greater;
1202       break;
1203     case BuiltinOperator::BuiltinOperator_LESS_EQUAL:
1204       param.comparison_type = ir::operation::Comparison::ComparisonType::LessEqual;
1205       break;
1206     case BuiltinOperator::BuiltinOperator_LESS:
1207       param.comparison_type = ir::operation::Comparison::ComparisonType::Less;
1208       break;
1209     default:
1210       throw std::runtime_error(
1211         std::string("Unsupported operation: ").append(EnumNameBuiltinOperator(builtin_op)));
1212   }
1213
1214   loadOperationTo<ir::operation::Comparison>(op, subg, param);
1215 }
1216
1217 template <typename LoaderDomain>
1218 void BaseLoader<LoaderDomain>::loadEinsum(const Operator *op, ir::Graph &subg)
1219 {
1220   ir::operation::Einsum::Param param;
1221   if (op->custom_options() == nullptr)
1222   {
1223     throw std::runtime_error{"Einsum: empty equation"};
1224   }
1225   else
1226   {
1227     size_t custom_op_data_size = op->custom_options()->size();
1228     auto custom_op_data = op->custom_options()->Data();
1229     auto data_root = flexbuffers::GetRoot(custom_op_data, custom_op_data_size);
1230     auto attr_map = data_root.AsMap();
1231     param.equation = attr_map["equation"].ToString();
1232   }
1233
1234   const auto es = loadOperationTo<ir::operation::Einsum>(op, subg, param);
1235   if (es->getInputs().size() != 2)
1236   {
1237     throw std::runtime_error{"Einsum: NYI input - only support two inputs"};
1238   }
1239 }
1240 template <typename LoaderDomain>
1241 void BaseLoader<LoaderDomain>::loadFusedBatchNorm(const Operator *op, ir::Graph &subg)
1242 {
1243   ir::operation::FusedBatchNorm::Param param;
1244   if (op->custom_options() == nullptr)
1245   {
1246     throw std::runtime_error{"FusedBatchNorm: empty option"};
1247   }
1248   else
1249   {
1250     size_t custom_op_data_size = op->custom_options()->size();
1251     auto custom_op_data = op->custom_options()->Data();
1252     auto data_root = flexbuffers::GetRoot(custom_op_data, custom_op_data_size);
1253     auto attr_map = data_root.AsMap();
1254     param.is_training = attr_map["is_training"].AsBool();
1255     param.epsilon = attr_map["epsilon"].AsFloat();
1256     param.data_format = attr_map["data_format"].ToString();
1257   }
1258
1259   const auto fbn = loadOperationTo<ir::operation::FusedBatchNorm>(op, subg, param);
1260
1261   if (fbn->getInputs().size() != 5)
1262   {
1263     throw std::runtime_error{"FusedBatchNorm: NYI input - only support five inputs"};
1264   }
1265 }
1266
1267 template <typename LoaderDomain>
1268 void BaseLoader<LoaderDomain>::loadOneHot(const Operator *op, ir::Graph &subg)
1269 {
1270   if (op->inputs()->size() != 4 || op->outputs()->size() != 1)
1271     throw std::runtime_error("OneHot Op has wrong number of input or output tensors.");
1272
1273   // Set parameter
1274   ir::operation::OneHot::Param param;
1275   param.axis = op->builtin_options_as_OneHotOptions()->axis();
1276
1277   loadOperationTo<ir::operation::OneHot>(op, subg, param);
1278 }
1279
1280 template <typename LoaderDomain>
1281 void BaseLoader<LoaderDomain>::loadIf(const Operator *op, ir::Graph &subg)
1282 {
1283   const auto *options = op->builtin_options_as_IfOptions();
1284   const int32_t then_index = options->then_subgraph_index();
1285   const int32_t else_index = options->else_subgraph_index();
1286
1287   verifySubgraphIndex(then_index);
1288   verifySubgraphIndex(else_index);
1289
1290   ir::operation::If::Param param;
1291   param.then_subg_index = ir::SubgraphIndex{static_cast<uint32_t>(then_index)};
1292   param.else_subg_index = ir::SubgraphIndex{static_cast<uint32_t>(else_index)};
1293
1294   loadOperationTo<ir::operation::If>(op, subg, param);
1295 }
1296
1297 template <typename LoaderDomain>
1298 void BaseLoader<LoaderDomain>::loadWhile(const Operator *op, ir::Graph &subg)
1299 {
1300   const auto *options = op->builtin_options_as_WhileOptions();
1301   const int32_t cond_index = options->cond_subgraph_index();
1302   const int32_t body_index = options->body_subgraph_index();
1303
1304   verifySubgraphIndex(cond_index);
1305   verifySubgraphIndex(body_index);
1306
1307   ir::operation::While::Param param;
1308   param.cond_subg_index = ir::SubgraphIndex{static_cast<uint32_t>(cond_index)};
1309   param.body_subg_index = ir::SubgraphIndex{static_cast<uint32_t>(body_index)};
1310
1311   loadOperationTo<ir::operation::While>(op, subg, param);
1312 }
1313
1314 template <typename LoaderDomain>
1315 void BaseLoader<LoaderDomain>::loadArgMinMax(const Operator *op, ir::Graph &subg, bool is_argmax)
1316 {
1317   ir::operation::ArgMinMax::Param param;
1318   const auto output_type = is_argmax ? op->builtin_options_as_ArgMaxOptions()->output_type()
1319                                      : op->builtin_options_as_ArgMinOptions()->output_type();
1320   param.output_type = tensorTypeToDataType(output_type);
1321   param.is_arg_max = is_argmax;
1322
1323   loadOperationTo<ir::operation::ArgMinMax>(op, subg, param);
1324 }
1325
1326 template <typename LoaderDomain>
1327 void BaseLoader<LoaderDomain>::loadLogSoftmax(const Operator *op, ir::Graph &subg)
1328 {
1329   ir::operation::LogSoftmax::Param param;
1330   // In tflite, beta is fixed to 1.0 and axis is fixed to -1.
1331   param.beta = 1.0f;
1332   param.axis = -1;
1333
1334   loadOperationTo<ir::operation::LogSoftmax>(op, subg, param);
1335 }
1336
1337 template <typename LoaderDomain>
1338 void BaseLoader<LoaderDomain>::loadLeakyRelu(const Operator *op, ir::Graph &subg)
1339 {
1340   float alpha = op->builtin_options_as_LeakyReluOptions()->alpha();
1341   loadElementwiseActivation(op, subg, ir::operation::ElementwiseActivation::Type::LEAKY_RELU, alpha,
1342                             1.f);
1343 }
1344
1345 template <typename LoaderDomain>
1346 void BaseLoader<LoaderDomain>::loadUnidirectionalSequenceLSTM(const Operator *op, ir::Graph &subg)
1347 {
1348   ir::operation::LSTM::Param param;
1349   const auto *options = op->builtin_options_as_UnidirectionalSequenceLSTMOptions();
1350   param.activation = convertActivation(options->fused_activation_function());
1351   param.cell_threshold = options->cell_clip();
1352   param.projection_threshold = options->proj_clip();
1353   param.time_major = options->time_major();
1354   // The asymmetric_quantize_inputs option is unused yet
1355
1356   ir::OperandIndexSequence inputs;
1357   for (const std::int32_t idx : *op->inputs())
1358   {
1359     inputs.append(tensorIdxToOperandIdx(idx));
1360   }
1361
1362   ir::OperandIndexSequence outputs;
1363   // loader doesn't support optional output tensor yet
1364   if (op->outputs()->size() != 1)
1365   {
1366     auto builtin_code = _model->operator_codes()->Get(op->opcode_index())->builtin_code();
1367     throw std::runtime_error(std::string("loader doesn't support optional output tensor yet for ")
1368                                .append(EnumNameBuiltinOperator(builtin_code)));
1369   }
1370   for (size_t i = 0; i < ir::operation::LSTM::Output::OUTPUT; ++i)
1371   {
1372     // Add optional outputs
1373     outputs.append(ir::OperandIndex());
1374   }
1375   outputs.append(tensorIdxToOperandIdx(op->outputs()->Get(0)));
1376
1377   std::unique_ptr<ir::operation::LSTM> new_op(new ir::operation::LSTM(inputs, outputs, param));
1378   subg.addOperation(std::move(new_op));
1379 }
1380
1381 template <typename LoaderDomain>
1382 void BaseLoader<LoaderDomain>::loadOperation(const Operator *op, ir::Graph &subg)
1383 {
1384   const auto builtin_op = _model->operator_codes()->Get(op->opcode_index())->builtin_code();
1385
1386   switch (builtin_op)
1387   {
1388     case BuiltinOperator::BuiltinOperator_ADD_N:
1389       loadOperationTo<ir::operation::AddN>(op, subg);
1390       return;
1391     case BuiltinOperator::BuiltinOperator_CONV_2D:
1392       loadConv2D(op, subg);
1393       return;
1394     case BuiltinOperator::BuiltinOperator_AVERAGE_POOL_2D:
1395       loadPool2D(op, subg, ir::operation::Pool2D::PoolType::AVG);
1396       return;
1397     case BuiltinOperator::BuiltinOperator_DEPTHWISE_CONV_2D:
1398       loadDepthwiseConv2D(op, subg);
1399       return;
1400     case BuiltinOperator::BuiltinOperator_TRANSPOSE_CONV:
1401       loadTransposeConv(op, subg);
1402       return;
1403     case BuiltinOperator::BuiltinOperator_RESHAPE:
1404       loadReshape(op, subg);
1405       return;
1406     case BuiltinOperator::BuiltinOperator_SOFTMAX:
1407       loadSoftmax(op, subg);
1408       return;
1409     case BuiltinOperator::BuiltinOperator_MAX_POOL_2D:
1410       loadPool2D(op, subg, ir::operation::Pool2D::PoolType::MAX);
1411       return;
1412     case BuiltinOperator::BuiltinOperator_CONCATENATION:
1413       loadConcatenation(op, subg);
1414       return;
1415     case BuiltinOperator::BuiltinOperator_FLOOR:
1416       loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::FLOOR);
1417       return;
1418     case BuiltinOperator::BuiltinOperator_FULLY_CONNECTED:
1419       loadFC(op, subg);
1420       return;
1421     case BuiltinOperator::BuiltinOperator_ADD:
1422       loadBinaryArithmetic(op, subg, ir::operation::BinaryArithmetic::ArithmeticType::ADD);
1423       return;
1424     case BuiltinOperator::BuiltinOperator_SUB:
1425       loadBinaryArithmetic(op, subg, ir::operation::BinaryArithmetic::ArithmeticType::SUB);
1426       return;
1427     case BuiltinOperator::BuiltinOperator_MUL:
1428       loadBinaryArithmetic(op, subg, ir::operation::BinaryArithmetic::ArithmeticType::MUL);
1429       return;
1430     case BuiltinOperator::BuiltinOperator_DIV:
1431       loadBinaryArithmetic(op, subg, ir::operation::BinaryArithmetic::ArithmeticType::DIV);
1432       return;
1433     case BuiltinOperator::BuiltinOperator_PACK:
1434       loadPack(op, subg);
1435       return;
1436     case BuiltinOperator::BuiltinOperator_ELU:
1437       loadElementwiseActivation(op, subg, ir::operation::ElementwiseActivation::Type::ELU);
1438       return;
1439     case BuiltinOperator::BuiltinOperator_RELU:
1440       loadElementwiseActivation(op, subg, ir::operation::ElementwiseActivation::Type::RELU,
1441                                 ir::operation::ElementwiseActivation::infinity, 0.f);
1442       return;
1443     case BuiltinOperator::BuiltinOperator_RELU_N1_TO_1:
1444       loadElementwiseActivation(op, subg, ir::operation::ElementwiseActivation::Type::RELU, 1.f,
1445                                 -1.f);
1446       return;
1447     case BuiltinOperator::BuiltinOperator_RELU6:
1448       loadElementwiseActivation(op, subg, ir::operation::ElementwiseActivation::Type::RELU, 6.f,
1449                                 0.f);
1450       return;
1451     case BuiltinOperator::BuiltinOperator_RESIZE_BILINEAR:
1452       loadResizeBilinear(op, subg);
1453       return;
1454     case BuiltinOperator::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR:
1455       loadResizeNearestNeighbor(op, subg);
1456       return;
1457     case BuiltinOperator::BuiltinOperator_RSQRT:
1458       loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::RSQRT);
1459       return;
1460     case BuiltinOperator::BuiltinOperator_SELECT:
1461     case BuiltinOperator::BuiltinOperator_SELECT_V2:
1462       loadOperationTo<ir::operation::Select>(op, subg);
1463       return;
1464     case BuiltinOperator::BuiltinOperator_SQRT:
1465       loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::SQRT);
1466       return;
1467     case BuiltinOperator::BuiltinOperator_SQUARE:
1468       loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::SQUARE);
1469       return;
1470     case BuiltinOperator::BuiltinOperator_SQUARED_DIFFERENCE:
1471       loadOperationTo<ir::operation::SquaredDifference>(op, subg);
1472       return;
1473     case BuiltinOperator::BuiltinOperator_TANH:
1474       loadElementwiseActivation(op, subg, ir::operation::ElementwiseActivation::Type::TANH, 1.f,
1475                                 1.f);
1476       return;
1477     case BuiltinOperator::BuiltinOperator_TRANSPOSE:
1478       loadOperationTo<ir::operation::Transpose>(op, subg);
1479       return;
1480     case BuiltinOperator::BuiltinOperator_MEAN:
1481       loadReduce(op, subg, ir::operation::Reduce::ReduceType::MEAN);
1482       return;
1483     case BuiltinOperator::BuiltinOperator_REDUCE_ANY:
1484       loadReduce(op, subg, ir::operation::Reduce::ReduceType::ANY);
1485       return;
1486     case BuiltinOperator::BuiltinOperator_REDUCE_MAX:
1487       loadReduce(op, subg, ir::operation::Reduce::ReduceType::MAX);
1488       return;
1489     case BuiltinOperator::BuiltinOperator_REVERSE_V2:
1490       loadOperationTo<ir::operation::Reverse>(op, subg);
1491       return;
1492     case BuiltinOperator::BuiltinOperator_PAD:
1493     case BuiltinOperator::BuiltinOperator_PADV2:
1494       loadOperationTo<ir::operation::Pad>(op, subg);
1495       return;
1496     case BuiltinOperator::BuiltinOperator_LOGISTIC:
1497       loadElementwiseActivation(op, subg, ir::operation::ElementwiseActivation::Type::LOGISTIC);
1498       return;
1499     case BuiltinOperator::BuiltinOperator_EXP:
1500       loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::EXP);
1501       return;
1502     case BuiltinOperator::BuiltinOperator_EXPAND_DIMS:
1503       loadOperationTo<ir::operation::ExpandDims>(op, subg);
1504       return;
1505     case BuiltinOperator::BuiltinOperator_GATHER:
1506       loadGather(op, subg);
1507       return;
1508     case BuiltinOperator::BuiltinOperator_SPACE_TO_BATCH_ND:
1509       loadOperationTo<ir::operation::SpaceToBatchND>(op, subg);
1510       return;
1511     case BuiltinOperator::BuiltinOperator_BATCH_TO_SPACE_ND:
1512       loadOperationTo<ir::operation::BatchToSpaceND>(op, subg);
1513       return;
1514     case BuiltinOperator::BuiltinOperator_SUM:
1515       loadReduce(op, subg, ir::operation::Reduce::ReduceType::SUM);
1516       return;
1517     case BuiltinOperator::BuiltinOperator_CUSTOM:
1518       loadCustom(op, subg);
1519       return;
1520     case BuiltinOperator::BuiltinOperator_SQUEEZE:
1521       loadSqueeze(op, subg);
1522       return;
1523     case BuiltinOperator::BuiltinOperator_PRELU:
1524       loadOperationTo<ir::operation::PReLU>(op, subg);
1525       return;
1526     case BuiltinOperator::BuiltinOperator_SPLIT:
1527       loadSplit(op, subg);
1528       return;
1529     case BuiltinOperator::BuiltinOperator_SPLIT_V:
1530       loadSplitV(op, subg);
1531       return;
1532     case BuiltinOperator::BuiltinOperator_SLICE:
1533       loadOperationTo<ir::operation::Slice>(op, subg);
1534       return;
1535     case BuiltinOperator::BuiltinOperator_STRIDED_SLICE:
1536       loadStridedSlice(op, subg);
1537       return;
1538     case BuiltinOperator::BuiltinOperator_UNPACK:
1539       loadUnpack(op, subg);
1540       return;
1541     case BuiltinOperator::BuiltinOperator_FLOOR_DIV:
1542       loadElementwiseBinary(op, subg,
1543                             ir::operation::ElementwiseBinary::ElementwiseBinaryType::FLOOR_DIV);
1544       return;
1545     case BuiltinOperator::BuiltinOperator_MINIMUM:
1546       loadElementwiseBinary(op, subg, ir::operation::ElementwiseBinary::ElementwiseBinaryType::MIN);
1547       return;
1548     case BuiltinOperator::BuiltinOperator_MAXIMUM:
1549       loadElementwiseBinary(op, subg, ir::operation::ElementwiseBinary::ElementwiseBinaryType::MAX);
1550       return;
1551     case BuiltinOperator::BuiltinOperator_CAST:
1552       loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::CAST);
1553       return;
1554     case BuiltinOperator::BuiltinOperator_EQUAL:
1555     case BuiltinOperator::BuiltinOperator_NOT_EQUAL:
1556     case BuiltinOperator::BuiltinOperator_GREATER_EQUAL:
1557     case BuiltinOperator::BuiltinOperator_GREATER:
1558     case BuiltinOperator::BuiltinOperator_LESS_EQUAL:
1559     case BuiltinOperator::BuiltinOperator_LESS:
1560       loadComparison(op, subg);
1561       return;
1562     case BuiltinOperator::BuiltinOperator_ONE_HOT:
1563       loadOneHot(op, subg);
1564       return;
1565     case BuiltinOperator::BuiltinOperator_ABS:
1566       loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::ABS);
1567       return;
1568     case BuiltinOperator::BuiltinOperator_COS:
1569       loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::COS);
1570       return;
1571     case BuiltinOperator::BuiltinOperator_SIN:
1572       loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::SIN);
1573       return;
1574     case BuiltinOperator::BuiltinOperator_SHAPE:
1575       loadOperationTo<ir::operation::Shape>(op, subg);
1576       return;
1577     case BuiltinOperator::BuiltinOperator_REDUCE_PROD:
1578       loadReduce(op, subg, ir::operation::Reduce::ReduceType::PROD);
1579       return;
1580     case BuiltinOperator::BuiltinOperator_IF:
1581       loadIf(op, subg);
1582       return;
1583     case BuiltinOperator::BuiltinOperator_WHILE:
1584       loadWhile(op, subg);
1585       return;
1586     case BuiltinOperator::BuiltinOperator_NEG:
1587       loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::NEG);
1588       return;
1589     case BuiltinOperator::BuiltinOperator_ARG_MAX:
1590       loadArgMinMax(op, subg, true);
1591       return;
1592     case BuiltinOperator::BuiltinOperator_ARG_MIN:
1593       loadArgMinMax(op, subg, false);
1594       return;
1595     case BuiltinOperator::BuiltinOperator_LOG:
1596       loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::LOG);
1597       return;
1598     case BuiltinOperator::BuiltinOperator_ROUND:
1599       loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::ROUND);
1600       return;
1601     case BuiltinOperator::BuiltinOperator_POW:
1602       loadOperationTo<ir::operation::Pow>(op, subg);
1603       return;
1604     case BuiltinOperator::BuiltinOperator_LOGICAL_NOT:
1605       loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::LOGICAL_NOT);
1606       return;
1607     case BuiltinOperator::BuiltinOperator_LOGICAL_AND:
1608       loadElementwiseBinary(op, subg,
1609                             ir::operation::ElementwiseBinary::ElementwiseBinaryType::LOGICAL_AND);
1610       return;
1611     case BuiltinOperator::BuiltinOperator_LOGICAL_OR:
1612       loadElementwiseBinary(op, subg,
1613                             ir::operation::ElementwiseBinary::ElementwiseBinaryType::LOGICAL_OR);
1614       return;
1615     case BuiltinOperator::BuiltinOperator_FILL:
1616       loadOperationTo<ir::operation::Fill>(op, subg);
1617       return;
1618     case BuiltinOperator::BuiltinOperator_ZEROS_LIKE:
1619       loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::ZEROS_LIKE);
1620       return;
1621     case BuiltinOperator::BuiltinOperator_TILE:
1622       loadOperationTo<ir::operation::Tile>(op, subg);
1623       return;
1624     case BuiltinOperator::BuiltinOperator_RANGE:
1625       loadOperationTo<ir::operation::Range>(op, subg);
1626       return;
1627     case BuiltinOperator::BuiltinOperator_BATCH_MATMUL:
1628       loadBatchMatMul(op, subg);
1629       return;
1630     case BuiltinOperator::BuiltinOperator_LOG_SOFTMAX:
1631       loadLogSoftmax(op, subg);
1632       return;
1633     case BuiltinOperator::BuiltinOperator_QUANTIZE:
1634       loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::QUANTIZE);
1635       return;
1636     case BuiltinOperator::BuiltinOperator_DEQUANTIZE:
1637       loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::DEQUANTIZE);
1638       return;
1639     case BuiltinOperator::BuiltinOperator_SPACE_TO_DEPTH:
1640       loadSpaceToDepth(op, subg);
1641       return;
1642     case BuiltinOperator::BuiltinOperator_L2_NORMALIZATION:
1643       loadOperationTo<ir::operation::L2Normalization>(op, subg);
1644       break;
1645     case BuiltinOperator::BuiltinOperator_LEAKY_RELU:
1646       loadLeakyRelu(op, subg);
1647       return;
1648     case BuiltinOperator::BuiltinOperator_RANK:
1649       loadOperationTo<ir::operation::Rank>(op, subg);
1650       return;
1651     case BuiltinOperator::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM:
1652       loadUnidirectionalSequenceLSTM(op, subg);
1653       return;
1654     case BuiltinOperator::BuiltinOperator_DEPTH_TO_SPACE:
1655       loadDepthToSpace(op, subg);
1656       return;
1657     default:
1658       throw std::runtime_error(
1659         std::string("Unsupported operation: ").append(EnumNameBuiltinOperator(builtin_op)));
1660   }
1661 }
1662
1663 template <typename LoaderDomain> void BaseLoader<LoaderDomain>::loadModel()
1664 {
1665   LoaderDomain::VerifyModelBuffer(*_verifier.get());
1666   _model = LoaderDomain::GetModel(_base);
1667   // Version unused
1668   // const auto version = _model->version();
1669   // Description unused
1670   // const auto *description = _model->description();
1671   // Metabuffer unsued
1672   // const auto *metadata_buffer = _model->metadata_buffer();
1673   // Load subgraphs and map operations on subgraph
1674   const auto domain_subgraphs = _model->subgraphs();
1675   auto subgraphs = std::make_unique<ir::Subgraphs>();
1676   for (uint32_t subgraph_index = 0; subgraph_index < domain_subgraphs->size(); ++subgraph_index)
1677   {
1678     auto subg = loadSubgraph((*_model->subgraphs())[subgraph_index]);
1679     subgraphs->push(ir::SubgraphIndex{subgraph_index}, std::move(subg));
1680   }
1681   _subgraphs = std::move(subgraphs);
1682 }
1683
1684 } // namespace base_loader
1685 } // namespace onert
1686
1687 #endif //__BASE_LOADER_BASE_LOADER_H__