2 * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "OperationFactory.h"
18 #include "NNAPIConvert.h"
20 #include <ir/Operations.Include.h>
25 using namespace onert::ir;
27 void replaceDataType(Operands &operands, const OperandIndex &index, const DataType type)
29 assert(operands.exist(index));
30 operands.at(index).type(type);
33 ExplicitPadding makeExplicitPadding(Operands &operands, const OperandIndex &left_index,
34 const OperandIndex &right_index, const OperandIndex &top_index,
35 const OperandIndex &bottom_index)
37 auto left = operands.at(left_index).asScalar<int32_t>();
38 auto right = operands.at(right_index).asScalar<int32_t>();
39 auto top = operands.at(top_index).asScalar<int32_t>();
40 auto bottom = operands.at(bottom_index).asScalar<int32_t>();
42 if (left < 0 || right < 0 || top < 0 || bottom < 0)
44 throw std::runtime_error{"Cannot handle negative explicit padding value"};
47 ExplicitPadding param;
48 param.left = static_cast<uint32_t>(left);
49 param.right = static_cast<uint32_t>(right);
50 param.top = static_cast<uint32_t>(top);
51 param.bottom = static_cast<uint32_t>(bottom);
56 Stride makeStride(Operands &operands, const OperandIndex &horizontal_index,
57 const OperandIndex &vertical_index)
59 auto horizontal = operands.at(horizontal_index).asScalar<int32_t>();
60 auto vertical = operands.at(vertical_index).asScalar<int32_t>();
62 if (vertical < 0 || horizontal < 0)
64 throw std::runtime_error{"Cannot handle negative stride value"};
68 stride.horizontal = static_cast<uint32_t>(horizontal);
69 stride.vertical = static_cast<uint32_t>(vertical);
74 uint32_t getUint32Scalar(Operands &operands, const OperandIndex index)
76 auto int32_value = operands.at(index).asScalar<int32_t>();
79 throw std::runtime_error{"Cannot handle negative value"};
82 return static_cast<uint32_t>(int32_value);
85 OperationFactory::Generator
86 getReduceGenerator(const onert::ir::operation::Reduce::ReduceType reduce_type)
88 return [reduce_type](const OperationFactory::Param &init_param, Operands &operands) {
89 assert(init_param.input_count == 3);
90 assert(init_param.output_count == 1);
92 // Each input should be interpreted as follows:
94 // 0 -> Input Tensor Index
95 // 1 -> Reduced Axes Tensor Index
96 // 2 -> keep_dims Index
98 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
99 OperandIndexSequence outputs{init_param.outputs[0]};
101 operation::Reduce::Param param;
102 param.reduce_type = reduce_type;
103 param.keep_dims = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<int8_t>() != 0;
105 return new operation::Reduce{inputs, outputs, param};
109 template <typename T>
110 Operation *CreateSimpleUnaryOp(const OperationFactory::Param &init_param, Operands &)
112 assert(init_param.input_count == 1 && init_param.output_count == 1);
114 OperandIndexSequence outputs{init_param.outputs[0]};
116 // Each input should be interpreted as follows:
118 // 0 -> Input Tensor Index
119 OperandIndexSequence inputs{init_param.inputs[0]};
121 return new T{inputs, outputs};
124 // A generator function for binary ops with no params
125 template <typename T>
126 Operation *createSimpleBinaryOp(const OperationFactory::Param &init_param, Operands &)
128 assert(init_param.input_count == 2 && init_param.output_count == 1);
130 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
131 OperandIndexSequence outputs{init_param.outputs[0]};
133 return new T{inputs, outputs};
136 // A generator function for binary ops with no params
137 template <typename T>
138 Operation *createPool2DOp(const OperationFactory::Param &init_param, Operands &operands)
140 assert(init_param.input_count == 7 || init_param.input_count == 10);
141 assert(init_param.output_count == 1);
144 // 0 -> IFM Tensor Index
145 OperandIndexSequence inputs{init_param.inputs[0]};
146 OperandIndexSequence outputs{init_param.outputs[0]};
148 typename T::Param param;
149 if (init_param.input_count == 7) // support implicit padding
151 // Each input should be interpreted as follows:
153 // 1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
154 // 2 -> Horizontal (over width) Stride Index
155 // 3 -> Vertial (over height) Stride Index
156 // 4 -> Filter Width Index
157 // 5 -> Filter Height Index
158 // 6 -> FuseCode (activation) Index
160 const auto padding_index = OperandIndex{init_param.inputs[1]};
161 const auto hstride_index = OperandIndex{init_param.inputs[2]};
162 const auto vstride_index = OperandIndex{init_param.inputs[3]};
163 const auto kw_index = OperandIndex{init_param.inputs[4]};
164 const auto kh_index = OperandIndex{init_param.inputs[5]};
165 const auto activation_index = OperandIndex{init_param.inputs[6]};
168 NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>());
169 param.stride = makeStride(operands, hstride_index, vstride_index);
170 param.kw = getUint32Scalar(operands, kw_index);
171 param.kh = operands.at(kh_index).asScalar<uint32_t>();
173 NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
175 else // support explicit padding
177 // Each input should be interpreted as follows:
179 // 1 -> Padding_left index
180 // 2 -> Padding_right index
181 // 3 -> Padding_top index
182 // 4 -> Padding_bottom index
183 // 5 -> Horizontal (over width) Stride Index
184 // 6 -> Vertial (over height) Stride Index
185 // 7 -> Filter Width Index
186 // 8 -> Filter Height Index
187 // 9 -> FuseCode (activation) Index
189 const auto padding_left_index = OperandIndex{init_param.inputs[1]};
190 const auto padding_right_index = OperandIndex{init_param.inputs[2]};
191 const auto padding_top_index = OperandIndex{init_param.inputs[3]};
192 const auto padding_bottom_index = OperandIndex{init_param.inputs[4]};
193 const auto hstride_index = OperandIndex{init_param.inputs[5]};
194 const auto vstride_index = OperandIndex{init_param.inputs[6]};
195 const auto kw_index = OperandIndex{init_param.inputs[7]};
196 const auto kh_index = OperandIndex{init_param.inputs[8]};
197 const auto activation_index = OperandIndex{init_param.inputs[9]};
199 param.padding.type = PaddingType::EXPLICIT;
200 param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index,
201 padding_top_index, padding_bottom_index);
202 param.stride = makeStride(operands, hstride_index, vstride_index);
203 param.kw = getUint32Scalar(operands, kw_index);
204 param.kh = getUint32Scalar(operands, kh_index);
206 NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
208 return new T{inputs, outputs, param};
213 OperationFactory &OperationFactory::get()
215 static OperationFactory factory;
219 OperationFactory::OperationFactory()
221 // Each input should be interpreted as follows:
222 // 0 -> Input Tensor Index
223 // 1 -> Block size Index
224 _map[ANEURALNETWORKS_BATCH_TO_SPACE_ND] = createSimpleBinaryOp<operation::BatchToSpaceND>;
226 _map[ANEURALNETWORKS_DEPTHWISE_CONV_2D] = [](const OperationFactory::Param &init_param,
227 Operands &operands) {
228 assert((init_param.input_count == 8 || init_param.input_count == 11) &&
229 init_param.output_count == 1);
232 // 0 -> IFM Tensor Index
233 // 1 -> Kernel Tensor Index
234 // 2 -> Bias Tensor Index
235 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
236 OperandIndexSequence outputs{init_param.outputs[0]};
238 operation::DepthwiseConv2D::Param param;
239 if (init_param.input_count == 8)
241 // Imlicit Padding case
242 // Each input should be interpreted as follows:
244 // 3 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
245 // 4 -> Stride (width) Index
246 // 5 -> Stride (height) INdex
247 // 6 -> Depthwise multiplier
248 // 7 -> Activation Index
250 const auto padding_index = OperandIndex{init_param.inputs[3]};
251 const auto hstride_index = OperandIndex{init_param.inputs[4]};
252 const auto vstride_index = OperandIndex{init_param.inputs[5]};
253 const auto multiplier_index = OperandIndex{init_param.inputs[6]};
254 const auto activation_index = OperandIndex{init_param.inputs[7]};
257 NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>());
258 param.stride = makeStride(operands, hstride_index, vstride_index);
259 param.multiplier = getUint32Scalar(operands, multiplier_index);
261 NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
265 // Explicit Padding case
266 // Each input should be interpreted as follows:
268 // 3 -> Padding On the Left
269 // 4 -> Padding On the Right
270 // 5 -> Padding On the Top
271 // 6 -> Padding On the Bottom
272 // 7 -> Stride (width) Index
273 // 8 -> Stride (height) Index
274 // 9 -> Depthwise multiplier
275 // 10-> Activation Index
277 const auto padding_left_index = OperandIndex{init_param.inputs[3]};
278 const auto padding_right_index = OperandIndex{init_param.inputs[4]};
279 const auto padding_top_index = OperandIndex{init_param.inputs[5]};
280 const auto padding_bottom_index = OperandIndex{init_param.inputs[6]};
281 const auto hstride_index = OperandIndex{init_param.inputs[7]};
282 const auto vstride_index = OperandIndex{init_param.inputs[8]};
283 const auto multiplier_index = OperandIndex{init_param.inputs[9]};
284 const auto activation_index = OperandIndex{init_param.inputs[10]};
286 param.padding.type = PaddingType::EXPLICIT;
287 param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index,
288 padding_top_index, padding_bottom_index);
289 param.stride = makeStride(operands, hstride_index, vstride_index);
290 param.multiplier = getUint32Scalar(operands, multiplier_index);
292 NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
295 return new operation::DepthwiseConv2D{inputs, outputs, param};
298 _map[ANEURALNETWORKS_MAX_POOL_2D] = createPool2DOp<operation::MaxPool2D>;
300 _map[ANEURALNETWORKS_AVERAGE_POOL_2D] = createPool2DOp<operation::AvgPool2D>;
302 _map[ANEURALNETWORKS_CONCATENATION] = [](const OperationFactory::Param &init_param,
303 Operands &operands) {
304 assert(init_param.input_count >= 2); // At least one one input tensor and axis
305 assert(init_param.output_count == 1);
307 // When there are N + 1 inputs, each input should be interpreted as follows:
309 // [0, N) -> Input tensors
313 OperandIndexSequence inputs;
314 for (uint32_t n = 0; n < init_param.input_count - 1; ++n)
316 inputs.append(OperandIndex{init_param.inputs[n]});
318 OperandIndexSequence outputs{init_param.outputs[0]};
320 operation::Concat::Param param;
321 const OperandIndex axis_index{init_param.inputs[init_param.input_count - 1]};
322 param.axis = operands.at(axis_index).asScalar<int32_t>();
324 return new operation::Concat{inputs, outputs, param};
327 _map[ANEURALNETWORKS_RESHAPE] = [](const OperationFactory::Param &init_param, Operands &) {
328 assert(init_param.input_count == 2 && init_param.output_count == 1);
330 // Each input should be interpreted as follows:
332 // 0 -> A tensor, specifying the tensor to be reshaped.
333 // 1 -> A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32, defining the shape of the output
336 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
337 OperandIndexSequence outputs{init_param.outputs[0]};
339 operation::Reshape::Param param{};
341 return new operation::Reshape{inputs, outputs, param};
344 _map[ANEURALNETWORKS_FULLY_CONNECTED] = [](const OperationFactory::Param &init_param,
345 Operands &operands) {
346 assert(init_param.input_count == 4 && init_param.output_count == 1);
348 // Each input should be interpreted as follows:
350 // 0 -> A tensor, specifying the input.
351 // 1 -> A 2-D tensor, specifying the weights
352 // 2 -> A 1-D tensor, specifying the bias
353 // 3 -> An INT32 value, and has to be one of the FuseCode values
355 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
356 OperandIndexSequence outputs{init_param.outputs[0]};
358 operation::FullyConnected::Param param;
359 const auto activation_index = OperandIndex{init_param.inputs[3]};
361 NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
363 return new operation::FullyConnected{inputs, outputs, param};
366 _map[ANEURALNETWORKS_SOFTMAX] = [](const OperationFactory::Param &init_param,
367 Operands &operands) {
368 assert(init_param.input_count == 2 && init_param.output_count == 1);
370 // Each input should be interpreted as follows:
372 // 0 -> A 2-D or 4-D tensor, specifying the tensor to be reshaped.
373 // 1 -> FLOAT32 value, specifying the positive scaling factor for the exponent, beta.
375 OperandIndexSequence inputs{init_param.inputs[0]};
376 OperandIndexSequence outputs{init_param.outputs[0]};
378 const auto beta_index = OperandIndex{init_param.inputs[1]};
380 operation::Softmax::Param param;
381 param.beta = operands.at(beta_index).asScalar<float>();
383 return new operation::Softmax{inputs, outputs, param};
386 _map[ANEURALNETWORKS_CAST] = [](const OperationFactory::Param &init_param, Operands &operands) {
387 assert(init_param.input_count == 1 && init_param.output_count == 1);
389 OperandIndexSequence outputs{init_param.outputs[0]};
391 // Each input should be interpreted as follows:
392 // 0 -> input Tensor Index
393 OperandIndexSequence inputs{init_param.inputs[0]};
395 // NNAPI uses QUANT_UINT8_ASYMM to represent UINT8 type for ANEURALNETWORKS_CAST's input/output
396 if (operands.at(inputs.at(0)).typeInfo().type() == DataType::QUANT_UINT8_ASYMM)
398 replaceDataType(operands, inputs.at(0), DataType::UINT8);
400 if (operands.at(outputs.at(0)).typeInfo().type() == DataType::QUANT_UINT8_ASYMM)
402 replaceDataType(operands, outputs.at(0), DataType::UINT8);
405 return new operation::Cast{inputs, outputs};
408 // ANEURALNETWORKS_CAST_EX is deprecated
409 // TODO Remove ANEURALNETWORKS_CAST_EX
410 _map[ANEURALNETWORKS_CAST_EX] = _map[ANEURALNETWORKS_CAST];
412 _map[ANEURALNETWORKS_CONV_2D] = [](const OperationFactory::Param &init_param,
413 Operands &operands) {
414 using operation::Conv2D;
416 // inputCount is either 7 or 10 acccording to NN API specification.
417 // - Padding is implicit when inputCount is 7
418 // - Padding is explicit when inputCount is 10
419 assert(init_param.input_count == 7 || init_param.input_count == 10);
420 assert(init_param.output_count == 1);
422 // 0 -> IFM Tensor Index
423 // 1 -> Kernel Tensor Index
424 // 2 -> Bias Tensor Index
426 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
427 OperandIndexSequence outputs{init_param.outputs[0]};
431 if (init_param.input_count == 7) // support implicit padding
433 // Each input should be interpreted as follows:
435 // 3 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
436 // 4 -> Stride (width) Index
437 // 5 -> Stride (height) INdex
438 // 6 -> Activation Index
440 const auto padding_index = OperandIndex{init_param.inputs[3]};
441 const auto hstride_index = OperandIndex{init_param.inputs[4]};
442 const auto vstride_index = OperandIndex{init_param.inputs[5]};
443 const auto activation_index = OperandIndex{init_param.inputs[6]};
446 NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>());
447 param.stride = makeStride(operands, hstride_index, vstride_index);
449 NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
451 else if (init_param.input_count == 10) // support explicit padding
453 // Each input should be interpreted as follows:
455 // 3 -> Padding_left index
456 // 4 -> Padding_right index
457 // 5 -> Padding_top index
458 // 6 -> Padding_bottom index
459 // 7 -> Stride (width) Index
460 // 8 -> Stride (height) INdex
461 // 9 -> Activation Index
463 const auto padding_left_index = OperandIndex{init_param.inputs[3]};
464 const auto padding_right_index = OperandIndex{init_param.inputs[4]};
465 const auto padding_top_index = OperandIndex{init_param.inputs[5]};
466 const auto padding_bottom_index = OperandIndex{init_param.inputs[6]};
467 const auto hstride_index = OperandIndex{init_param.inputs[7]};
468 const auto vstride_index = OperandIndex{init_param.inputs[8]};
469 const auto activation_index = OperandIndex{init_param.inputs[9]};
471 param.padding.type = PaddingType::EXPLICIT;
472 param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index,
473 padding_top_index, padding_bottom_index);
474 param.stride = makeStride(operands, hstride_index, vstride_index);
476 NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
479 return new Conv2D{inputs, outputs, param};
482 _map[ANEURALNETWORKS_ADD] = [](const OperationFactory::Param &init_param, Operands &operands) {
483 assert(init_param.input_count == 3);
484 assert(init_param.output_count == 1);
486 // Each input should be interpreted as follows:
488 // 0 -> Lefthand side operand
489 // 1 -> Righthand side operand
491 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
492 OperandIndexSequence outputs{init_param.outputs[0]};
494 operation::Add::Param param;
496 const auto activation_index = OperandIndex{init_param.inputs[2]};
498 NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
500 return new operation::Add{inputs, outputs, param};
503 _map[ANEURALNETWORKS_ADDV2_EX] = _map[ANEURALNETWORKS_ADD];
505 _map[ANEURALNETWORKS_REDUCE_SUM] =
506 getReduceGenerator(onert::ir::operation::Reduce::ReduceType::SUM);
508 // ANEURALNETWORKS_REDUCE_SUM_EX is deprecated
509 // TODO Remove ANEURALNETWORKS_REDUCE_SUM_EX
510 _map[ANEURALNETWORKS_REDUCE_SUM_EX] = _map[ANEURALNETWORKS_REDUCE_SUM];
512 _map[ANEURALNETWORKS_SUB] = [](const OperationFactory::Param &init_param, Operands &operands) {
513 assert(init_param.input_count == 3);
514 assert(init_param.output_count == 1);
516 // Each input should be interpreted as follows:
518 // 0 -> Lefthand side operand
519 // 1 -> Righthand side operand
521 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
522 OperandIndexSequence outputs{init_param.outputs[0]};
524 operation::Sub::Param param;
526 const auto activation_index = OperandIndex{init_param.inputs[2]};
528 NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
530 return new operation::Sub{inputs, outputs, param};
533 _map[ANEURALNETWORKS_SLICE] = [](const OperationFactory::Param &init_param, Operands &) {
534 assert(init_param.input_count == 3 && init_param.output_count == 1);
536 OperandIndexSequence outputs{init_param.outputs[0]};
538 // Each input should be interpreted as follows:
540 // 0 -> Input Tensor Index
541 // 1 -> Begins Tensor Index
542 // 2 -> Sizes Tensor Index
543 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
545 return new operation::Slice{inputs, outputs};
548 _map[ANEURALNETWORKS_STRIDED_SLICE] = [](const OperationFactory::Param &init_param,
549 Operands &operands) {
550 assert(init_param.input_count == 7 && init_param.output_count == 1);
552 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2],
553 init_param.inputs[3]};
554 OperandIndexSequence outputs{init_param.outputs[0]};
556 // Each input should be interpreted as follows:
558 // 1 -> A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the starts of
559 // the dimensions of the input tensor to be sliced. The length must be
561 // 2 -> A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the ends of
562 // the dimensions of the input tensor to be sliced. The length must be
564 // 3 -> A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the strides of
565 // the dimensions of the input tensor to be sliced. The length must be
567 // 4 -> An {@link ANEURALNETWORKS_INT32} scalar, begin_mask. If the ith bit
568 // of begin_mask is set, begin[i] is ignored and the fullest possible
569 // range in that dimension is used instead.
570 // 5 -> An {@link ANEURALNETWORKS_INT32} scalar, end_mask. If the ith bit of
571 // end_mask is set, end[i] is ignored and the fullest possible range in
572 // that dimension is used instead.
573 // 6 -> An {@link ANEURALNETWORKS_INT32} scalar, shrink_axis_mask. An int32
574 // mask. If the ith bit of shrink_axis_mask is set, it implies that the
575 // ith specification shrinks the dimensionality by 1. A slice of size 1
576 // starting from begin[i] in the dimension must be preserved.
578 operation::StridedSlice::Param param;
580 param.begin_mask = operands.at(OperandIndex{init_param.inputs[4]}).asScalar<std::int32_t>();
581 param.end_mask = operands.at(OperandIndex{init_param.inputs[5]}).asScalar<std::int32_t>();
582 param.shrink_axis_mask =
583 operands.at(OperandIndex{init_param.inputs[6]}).asScalar<std::int32_t>();
585 return new operation::StridedSlice{inputs, outputs, param};
588 _map[ANEURALNETWORKS_TRANSPOSE] = [](const OperationFactory::Param &init_param,
589 Operands &operands) {
590 // TODO make this work with init_param.input_count == 1 (when permutation vector is optional)
593 // 0: An n-D tensor, specifying the tensor to be transposed.
594 // 1: An optional 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32},
595 // the permutation of the dimensions of the input tensor.
596 // The returned tensor's dimension i corresponds to the input dimension
597 // perm[i]. If perm is not given, it is set to (n-1...0), where n is the
598 // rank of the input tensor. Hence by default, this operation performs a
599 // regular matrix transpose on 2-D input Tensors.
600 assert(init_param.input_count == 2);
601 assert(init_param.output_count == 1);
603 OperandIndexSequence inputs{init_param.inputs[0]};
604 OperandIndexSequence outputs{init_param.outputs[0]};
605 std::vector<std::int32_t> perm =
606 operands.at(OperandIndex{init_param.inputs[1]}).asVector<std::int32_t>();
608 operation::Transpose::Param param;
609 param.perm.assign(perm.cbegin(), perm.cend());
611 return new operation::Transpose{inputs, outputs, param};
614 _map[ANEURALNETWORKS_MUL] = [](const OperationFactory::Param &init_param, Operands &operands) {
615 assert(init_param.input_count == 3 && init_param.output_count == 1);
617 OperandIndexSequence outputs{init_param.outputs[0]};
619 // Each input should be interpreted as follows:
621 // 0 -> LHS Tensor Index
622 // 1 -> RHS Tensor Index
623 // 2 -> Activation Index
625 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
627 operation::Mul::Param param;
629 const auto activation_index = OperandIndex{init_param.inputs[2]};
631 NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
633 return new operation::Mul{inputs, outputs, param};
636 _map[ANEURALNETWORKS_SQUEEZE] = [](const OperationFactory::Param &init_param,
637 Operands &operands) {
638 assert(init_param.input_count == 1 || init_param.input_count == 2);
639 assert(init_param.output_count == 1);
641 OperandIndexSequence outputs{init_param.outputs[0]};
643 // Each input should be interpreted as follows:
645 // 0 -> An n-D tensor, the tensor to be squeezed.
646 // 1 -> An optional 1-D tensor of ANEURALNETWORKS_TENSOR_INT32. The dimensions to squeeze.
647 // If specified only squeezes the dimensions listed. Otherwise, squeezes all dimensions.
648 // The dimension index starts at 0. An error must be reported if squeezing a dimension that
651 // Add mandatory input index
652 OperandIndexSequence inputs{init_param.inputs[0]};
654 // Add dims index if specified
655 operation::Squeeze::Param param{};
656 if (init_param.input_count == 2)
658 auto squeeze_dims_idx = OperandIndex{init_param.inputs[1]};
659 assert(operands.at(squeeze_dims_idx).shape().rank() == 1);
660 assert(operands.at(squeeze_dims_idx).shape().dim(0) >= 0);
661 assert(static_cast<uint32_t>(operands.at(squeeze_dims_idx).shape().dim(0)) <=
663 param.ndim = operands.at(squeeze_dims_idx).shape().dim(0);
666 assert(operands.at(squeeze_dims_idx).data());
667 memcpy(param.dims, operands.at(squeeze_dims_idx).data()->base(),
668 param.ndim * sizeof(param.dims[0]));
672 return new operation::Squeeze{inputs, outputs, param};
675 _map[ANEURALNETWORKS_TANH] = CreateSimpleUnaryOp<operation::Tanh>;
677 _map[ANEURALNETWORKS_LOG] = CreateSimpleUnaryOp<operation::Log>;
679 _map[ANEURALNETWORKS_LOGISTIC] = CreateSimpleUnaryOp<operation::Logistic>;
681 _map[ANEURALNETWORKS_DIV] = [](const OperationFactory::Param &init_param, Operands &operands) {
682 assert(init_param.input_count == 3 && init_param.output_count == 1);
684 OperandIndexSequence outputs{init_param.outputs[0]};
686 // Each input should be interpreted as follows:
688 // 0 -> LHS Tensor Index
689 // 1 -> RHS Tensor Index
690 // 2 -> Activation Index
691 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
693 operation::Div::Param param;
695 const auto activation_index = OperandIndex{init_param.inputs[2]};
697 NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
699 return new operation::Div{inputs, outputs, param};
702 _map[ANEURALNETWORKS_EXP] = CreateSimpleUnaryOp<operation::Exp>;
704 // ANEURALNETWORKS_EXP_EX is deprecated
705 // TODO Remove ANEURALNETWORKS_EXP_EX
706 _map[ANEURALNETWORKS_EXP_EX] = _map[ANEURALNETWORKS_EXP];
708 // Each input should be interpreted as follows:
709 // 0 -> Input Tensor Index
710 // 1 -> Axis Tensor Index
711 _map[ANEURALNETWORKS_EXPAND_DIMS] = createSimpleBinaryOp<operation::ExpandDims>;
713 _map[ANEURALNETWORKS_GREATER] = [](const OperationFactory::Param &init_param, Operands &) {
714 assert(init_param.input_count == 2 && init_param.output_count == 1);
716 OperandIndexSequence outputs{init_param.outputs[0]};
718 // Each input should be interpreted as follows:
720 // 0 -> input0 Tensor Index
721 // 1 -> input1 Tensor Index
722 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
724 operation::Comparison::Param param;
725 param.comparison_type = operation::Comparison::ComparisonType::Greater;
727 return new operation::Comparison{inputs, outputs, param};
730 _map[ANEURALNETWORKS_GREATER_EQUAL] = [](const OperationFactory::Param &init_param, Operands &) {
731 assert(init_param.input_count == 2 && init_param.output_count == 1);
733 OperandIndexSequence outputs{init_param.outputs[0]};
735 // Each input should be interpreted as follows:
737 // 0 -> input0 Tensor Index
738 // 1 -> input1 Tensor Index
739 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
741 operation::Comparison::Param param;
742 param.comparison_type = operation::Comparison::ComparisonType::GreaterEqual;
744 return new operation::Comparison{inputs, outputs, param};
747 // ANEURALNETWORKS_GREATER_EQUAL_EX is deprecated
748 // TODO Remove ANEURALNETWORKS_GREATER_EQUAL_EX
749 _map[ANEURALNETWORKS_GREATER_EQUAL_EX] = [](const OperationFactory::Param &init_param,
750 Operands &operands) {
751 assert(init_param.input_count == 2 && init_param.output_count == 1);
753 OperandIndexSequence outputs{init_param.outputs[0]};
755 // Each input should be interpreted as follows:
757 // 0 -> input0 Tensor Index
758 // 1 -> input1 Tensor Index
759 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
761 operation::Comparison::Param param;
762 param.comparison_type = operation::Comparison::ComparisonType::GreaterEqual;
764 // Output operand type must be boolean
765 replaceDataType(operands, outputs.at(0), DataType::BOOL8);
767 return new operation::Comparison{inputs, outputs, param};
770 _map[ANEURALNETWORKS_LESS] = [](const OperationFactory::Param &init_param, Operands &) {
771 assert(init_param.input_count == 2 && init_param.output_count == 1);
773 OperandIndexSequence outputs{init_param.outputs[0]};
775 // Each input should be interpreted as follows:
777 // 0 -> input0 Tensor Index
778 // 1 -> input1 Tensor Index
779 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
781 operation::Comparison::Param param;
782 param.comparison_type = operation::Comparison::ComparisonType::Less;
784 return new operation::Comparison{inputs, outputs, param};
787 _map[ANEURALNETWORKS_LESS_EQUAL] = [](const OperationFactory::Param &init_param, Operands &) {
788 assert(init_param.input_count == 2 && init_param.output_count == 1);
790 OperandIndexSequence outputs{init_param.outputs[0]};
792 // Each input should be interpreted as follows:
794 // 0 -> input0 Tensor Index
795 // 1 -> input1 Tensor Index
796 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
798 operation::Comparison::Param param;
799 param.comparison_type = operation::Comparison::ComparisonType::LessEqual;
801 return new operation::Comparison{inputs, outputs, param};
804 // ANEURALNETWORKS_LESS_EX is deprecated
805 // TODO Remove ANEURALNETWORKS_LESS_EX
806 _map[ANEURALNETWORKS_LESS_EX] = [](const OperationFactory::Param &init_param,
807 Operands &operands) {
808 assert(init_param.input_count == 2 && init_param.output_count == 1);
810 OperandIndexSequence outputs{init_param.outputs[0]};
812 // Each input should be interpreted as follows:
814 // 0 -> input0 Tensor Index
815 // 1 -> input1 Tensor Index
816 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
818 operation::Comparison::Param param;
819 param.comparison_type = operation::Comparison::ComparisonType::Less;
821 // Output operand type must be boolean
822 replaceDataType(operands, outputs.at(0), DataType::BOOL8);
824 return new operation::Comparison{inputs, outputs, param};
827 _map[ANEURALNETWORKS_REDUCE_ALL] =
828 getReduceGenerator(onert::ir::operation::Reduce::ReduceType::ALL);
830 _map[ANEURALNETWORKS_REDUCE_ANY] =
831 getReduceGenerator(onert::ir::operation::Reduce::ReduceType::ANY);
833 _map[ANEURALNETWORKS_REDUCE_MAX] =
834 getReduceGenerator(onert::ir::operation::Reduce::ReduceType::MAX);
836 // ANEURALNETWORKS_REDUCE_MAX_EX is deprecated
837 // TODO Remove ANEURALNETWORKS_REDUCE_MAX_EX
838 _map[ANEURALNETWORKS_REDUCE_MAX_EX] = _map[ANEURALNETWORKS_REDUCE_MAX];
840 _map[ANEURALNETWORKS_NOT_EQUAL] = [](const OperationFactory::Param &init_param, Operands &) {
841 assert(init_param.input_count == 2 && init_param.output_count == 1);
843 OperandIndexSequence outputs{init_param.outputs[0]};
845 // Each input should be interpreted as follows:
847 // 0 -> input1 Tensor Index
848 // 1 -> input2 Tensor Index
849 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
851 operation::Comparison::Param param;
852 param.comparison_type = operation::Comparison::ComparisonType::NotEqual;
854 return new operation::Comparison{inputs, outputs, param};
857 // ANEURALNETWORKS_NOT_EQUAL_EX is deprecated
858 // TODO Remove ANEURALNETWORKS_NOT_EQUAL_EX
859 _map[ANEURALNETWORKS_NOT_EQUAL_EX] = [](const OperationFactory::Param &init_param,
860 Operands &operands) {
861 assert(init_param.input_count == 2 && init_param.output_count == 1);
863 OperandIndexSequence outputs{init_param.outputs[0]};
865 // Each input should be interpreted as follows:
867 // 0 -> input1 Tensor Index
868 // 1 -> input2 Tensor Index
869 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
871 operation::Comparison::Param param;
872 param.comparison_type = operation::Comparison::ComparisonType::NotEqual;
874 // Output operand type must be boolean
875 replaceDataType(operands, outputs.at(0), DataType::BOOL8);
877 return new operation::Comparison{inputs, outputs, param};
880 _map[ANEURALNETWORKS_LOGICAL_AND] = createSimpleBinaryOp<operation::LogicalAnd>;
882 // ANEURALNETWORKS_LOGICAL_AND_EX is deprecated
883 // TODO Remove ANEURALNETWORKS_LOGICAL_AND_EX
884 _map[ANEURALNETWORKS_LOGICAL_AND_EX] = [](const OperationFactory::Param &init_param,
885 Operands &operands) {
886 assert(init_param.input_count == 2 && init_param.output_count == 1);
888 OperandIndexSequence outputs{init_param.outputs[0]};
890 // Each input should be interpreted as follows:
892 // 0 -> input0 Tensor Index
893 // 1 -> input1 Tensor Index
894 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
896 // This operation's operands must be boolean type.
897 replaceDataType(operands, inputs.at(0), DataType::BOOL8);
898 replaceDataType(operands, inputs.at(1), DataType::BOOL8);
899 replaceDataType(operands, outputs.at(0), DataType::BOOL8);
901 return new operation::LogicalAnd{inputs, outputs};
904 _map[ANEURALNETWORKS_RSQRT] = CreateSimpleUnaryOp<operation::RSQRT>;
906 _map[ANEURALNETWORKS_SELECT] = [](const OperationFactory::Param &init_param, Operands &) {
907 assert(init_param.input_count == 3 && init_param.output_count == 1);
909 OperandIndexSequence outputs{init_param.outputs[0]};
911 // Each input should be interpreted as follows:
913 // 0 -> Condition Tensor Index
914 // 1 -> Input X(true) Tensor Index
915 // 2 -> Input Y(false) Tensor Index
916 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
918 return new operation::Select{inputs, outputs};
921 _map[ANEURALNETWORKS_SELECT_V2_EX] = [](const OperationFactory::Param &init_param, Operands &) {
922 assert(init_param.input_count == 3 && init_param.output_count == 1);
924 OperandIndexSequence outputs{init_param.outputs[0]};
926 // Each input should be interpreted as follows:
928 // 0 -> Condition Tensor Index
929 // 1 -> Input X(true) Tensor Index
930 // 2 -> Input Y(false) Tensor Index
931 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
933 return new operation::Select{inputs, outputs};
936 // ANEURALNETWORKS_RSQRT_EX is deprecated
937 // TODO Remove ANEURALNETWORKS_RSQRT_EX
938 _map[ANEURALNETWORKS_RSQRT_EX] = _map[ANEURALNETWORKS_RSQRT];
940 _map[ANEURALNETWORKS_RELU] = CreateSimpleUnaryOp<operation::ReLU>;
942 _map[ANEURALNETWORKS_RESIZE_BILINEAR] = [](const OperationFactory::Param &init_param,
943 Operands &operands) {
944 assert(init_param.input_count == 3 && init_param.output_count == 1);
946 OperandIndexSequence outputs{init_param.outputs[0]};
948 // Each input should be interpreted as follows:
953 OperandIndexSequence inputs{init_param.inputs[0]};
955 operation::ResizeBilinear::Param param;
956 param.height_out = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<int32_t>();
957 param.width_out = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<int32_t>();
958 param.align_corners = false;
959 param.half_pixel_centers = false;
960 return new operation::ResizeBilinear{inputs, outputs, param};
963 _map[ANEURALNETWORKS_RELU1] = CreateSimpleUnaryOp<operation::ReLU1>;
965 _map[ANEURALNETWORKS_RELU6] = CreateSimpleUnaryOp<operation::ReLU6>;
967 _map[ANEURALNETWORKS_REVERSE_EX] = [](const OperationFactory::Param &init_param, Operands &) {
968 assert(init_param.input_count == 2 && init_param.output_count == 1);
970 // Each input should be interpreted as follows:
972 // 0 -> Input Tensor Index
973 // 1 -> Axis Tensor Index
975 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
976 OperandIndexSequence outputs{init_param.outputs[0]};
978 return new operation::Reverse{inputs, outputs};
981 _map[ANEURALNETWORKS_RNN] = [](const OperationFactory::Param &init_param, Operands &operands) {
982 assert(init_param.input_count == 6 && init_param.output_count == 2);
984 // Each input should be interpreted as follows:
986 // 0 -> Input Tensor Index
987 // 1 -> Weights Tensor Index
988 // 2 -> Recurrent Weights Tensor Index
989 // 3 -> Bias Tensor Index
990 // 4 -> Hidden state (in) Index
991 // 5 -> Activation Index
993 OperandIndexSequence inputs;
994 for (uint32_t n = 0; n < init_param.input_count - 1; ++n)
996 inputs.append(OperandIndex{init_param.inputs[n]});
998 OperandIndexSequence outputs;
999 for (uint32_t n = 0; n < init_param.output_count; ++n)
1001 outputs.append(OperandIndex{init_param.outputs[n]});
1004 operation::RNN::Param param;
1005 const auto activation_index = OperandIndex{init_param.inputs[5]};
1007 NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
1009 return new operation::RNN{inputs, outputs, param};
1012 _map[ANEURALNETWORKS_FLOOR] = [](const OperationFactory::Param &init_param, Operands &) {
1013 assert(init_param.input_count == 1 && init_param.output_count == 1);
1015 OperandIndexSequence outputs{init_param.outputs[0]};
1017 // Each input should be interpreted as follows:
1018 // 0 -> input Tensor Index
1019 OperandIndexSequence inputs{init_param.inputs[0]};
1021 return new operation::Floor{inputs, outputs};
1024 _map[ANEURALNETWORKS_SPACE_TO_BATCH_ND] = [](const OperationFactory::Param &init_param,
1026 assert(init_param.input_count == 3 && init_param.output_count == 1);
1028 OperandIndexSequence outputs{init_param.outputs[0]};
1030 // Each input should be interpreted as follows:
1032 // 0 -> Input Tensor Index
1033 // 1 -> Block size Index
1034 // 2 -> Paddings Index
1035 OperandIndexSequence inputs;
1036 for (uint32_t n = 0; n < init_param.input_count; ++n)
1038 inputs.append(OperandIndex{init_param.inputs[n]});
1041 return new operation::SpaceToBatchND{inputs, outputs};
1044 _map[ANEURALNETWORKS_SPACE_TO_DEPTH] = [](const OperationFactory::Param &init_param,
1045 Operands &operands) {
1046 assert(init_param.input_count == 2 && init_param.output_count == 1);
1048 OperandIndexSequence outputs{init_param.outputs[0]};
1050 // Each input should be interpreted as follows:
1052 // 0 -> Input Tensor Index
1053 // 1 -> Block size Index
1054 OperandIndexSequence inputs{init_param.inputs[0]};
1056 operation::SpaceToDepth::Param param;
1057 param.block_size = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>();
1059 return new operation::SpaceToDepth{inputs, outputs, param};
1062 _map[ANEURALNETWORKS_L2_POOL_2D] = createPool2DOp<operation::L2Pool2D>;
1064 _map[ANEURALNETWORKS_EMBEDDING_LOOKUP] = [](const OperationFactory::Param &init_param,
1066 assert(init_param.input_count == 2 && init_param.output_count == 1);
1068 OperandIndexSequence outputs{init_param.outputs[0]};
1070 // Each input should be interpreted as follows:
1072 // 0 -> Lookups Index
1073 // 1 -> Values Index
1074 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
1076 return new operation::EmbeddingLookup{inputs, outputs};
1079 _map[ANEURALNETWORKS_L2_NORMALIZATION] = [](const OperationFactory::Param &init_param,
1081 assert(init_param.input_count == 1 && init_param.output_count == 1);
1083 OperandIndexSequence outputs{init_param.outputs[0]};
1085 // Each input should be interpreted as follows:
1086 // 0 -> input Tensor Index
1087 OperandIndexSequence inputs{init_param.inputs[0]};
1089 return new operation::L2Normalization{inputs, outputs};
1092 _map[ANEURALNETWORKS_HASHTABLE_LOOKUP] = [](const OperationFactory::Param &init_param,
1094 assert(init_param.input_count == 3 && init_param.output_count == 2);
1096 // Each output should be interpreted as follows:
1098 // 0 -> Output Index
1100 OperandIndexSequence outputs{init_param.outputs[0], init_param.outputs[1]};
1102 // Each input should be interpreted as follows:
1104 // 0 -> Lookups Index
1106 // 2 -> Values Index
1107 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
1109 return new operation::HashtableLookup{inputs, outputs};
1112 _map[ANEURALNETWORKS_PRELU] = [](const OperationFactory::Param &init_param, Operands &) {
1113 assert(init_param.input_count == 2 && init_param.output_count == 1);
1115 OperandIndexSequence outputs{init_param.outputs[0]};
1117 // Each input should be interpreted as follows:
1119 // 0 -> input Tensor Index
1120 // 1 -> alpha Tensor Index
1121 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
1123 return new operation::PReLU{inputs, outputs};
1126 // ANEURALNETWORKS_PRELU_EX is deprecated
1127 // TODO Remove ANEURALNETWORKS_PRELU_EX
1128 _map[ANEURALNETWORKS_PRELU_EX] = _map[ANEURALNETWORKS_PRELU];
1130 _map[ANEURALNETWORKS_TRANSPOSE_CONV_EX] = [](const OperationFactory::Param &init_param,
1131 Operands &operands) {
1132 assert(init_param.input_count == 6 && init_param.output_count == 1);
1134 OperandIndexSequence outputs{init_param.outputs[0]};
1136 // Each input should be interpreted as follows:
1138 // 0 -> Output Shape Index
1139 // 1 -> Weights Index
1140 // 2 -> Input Tensor Index
1141 // 3 -> Padding Type
1142 // 4 -> Stride width
1143 // 5 -> Stride height
1145 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
1147 operation::TransposeConv::Param param;
1149 const auto padding_index = OperandIndex{init_param.inputs[3]};
1150 const auto hstride_index = OperandIndex{init_param.inputs[4]};
1151 const auto vstride_index = OperandIndex{init_param.inputs[5]};
1153 param.padding.type =
1154 NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>());
1155 param.stride = makeStride(operands, hstride_index, vstride_index);
1157 return new operation::TransposeConv{inputs, outputs, param};
1160 _map[ANEURALNETWORKS_SQRT] = [](const OperationFactory::Param &init_param, Operands &) {
1161 assert(init_param.input_count == 1 && init_param.output_count == 1);
1163 OperandIndexSequence outputs{init_param.outputs[0]};
1165 // Each input should be interpreted as follows:
1166 // 0 -> input Tensor Index
1168 OperandIndexSequence inputs{init_param.inputs[0]};
1169 return new operation::SQRT{inputs, outputs};
1172 // ANEURALNETWORKS_SQRT_EX is deprecated
1173 // TODO Remove ANEURALNETWORKS_SQRT_EX
1174 _map[ANEURALNETWORKS_SQRT_EX] = _map[ANEURALNETWORKS_SQRT];
1176 _map[ANEURALNETWORKS_LOGICAL_OR] = [](const OperationFactory::Param &init_param, Operands &) {
1177 assert(init_param.input_count == 2 && init_param.output_count == 1);
1179 OperandIndexSequence outputs{init_param.outputs[0]};
1181 // Each input should be interpreted as follows:
1183 // 0 -> input0 Tensor Index
1184 // 1 -> input1 Tensor Index
1185 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
1187 return new operation::LogicalOr{inputs, outputs};
1190 // ANEURALNETWORKS_LOGICAL_OR_EX is deprecated
1191 // TODO Remove ANEURALNETWORKS_LOGICAL_OR_EX
1192 _map[ANEURALNETWORKS_LOGICAL_OR_EX] = [](const OperationFactory::Param &init_param,
1193 Operands &operands) {
1194 assert(init_param.input_count == 2 && init_param.output_count == 1);
1196 OperandIndexSequence outputs{init_param.outputs[0]};
1198 // Each input should be interpreted as follows:
1200 // 0 -> input0 Tensor Index
1201 // 1 -> input1 Tensor Index
1202 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
1204 // This operation's operands must be boolean type.
1205 replaceDataType(operands, inputs.at(0), DataType::BOOL8);
1206 replaceDataType(operands, inputs.at(1), DataType::BOOL8);
1207 replaceDataType(operands, outputs.at(0), DataType::BOOL8);
1209 return new operation::LogicalOr{inputs, outputs};
1212 _map[ANEURALNETWORKS_LOGICAL_NOT] = CreateSimpleUnaryOp<operation::LogicalNot>;
1214 // ANEURALNETWORKS_LOGICAL_NOT_EX is deprecated
1215 // TODO Remove ANEURALNETWORKS_LOGICAL_NOT_EX
1216 _map[ANEURALNETWORKS_LOGICAL_NOT_EX] = [](const OperationFactory::Param &init_param,
1217 Operands &operands) {
1218 assert(init_param.input_count == 1 && init_param.output_count == 1);
1220 OperandIndexSequence outputs{init_param.outputs[0]};
1222 // Each input should be interpreted as follows:
1224 // 0 -> input Tensor Index
1225 OperandIndexSequence inputs{init_param.inputs[0]};
1227 // This operation's operands must be boolean type.
1228 replaceDataType(operands, inputs.at(0), DataType::BOOL8);
1229 replaceDataType(operands, outputs.at(0), DataType::BOOL8);
1231 return new operation::LogicalNot{inputs, outputs};
1234 _map[ANEURALNETWORKS_LSTM] = [](const OperationFactory::Param &init_param, Operands &operands) {
1235 assert(init_param.input_count == 23 && init_param.output_count == 4);
1237 // Each input should be interpreted as follows:
1239 // 0 -> Input Tensor Index
1240 // 1 -> Input to Input Tensor Index
1241 // 2 -> Input to Forget Tensor Index
1242 // 3 -> Input to Cell Tensor Index
1243 // 4 -> Input to Output Tensor Index
1244 // 5 -> Recurrent to Input Weights Tensor Index
1245 // 6 -> Recurrent to Forget Weights Tensor Index
1246 // 7 -> Recurrent to Cell Weights Tensor Index
1247 // 8 -> Recurrent to Output Weights Tensor Index
1248 // 9 -> Cell to Input Weights Tensor Index
1249 // 10 -> Cell to Forget Weights Tensor Index
1250 // 11 -> Cell to Output Weights Tensor Index
1251 // 12 -> Input Gate Bias Tensor Index
1252 // 13 -> Forget Gate Bias Tensor Index
1253 // 14 -> Cell Bias Tensor Index
1254 // 15 -> Output Gate Bias Tensor Index
1255 // 16 -> Projection Weights Tensor Index
1256 // 17 -> Projection Bias Tensor Index
1257 // 18 -> Output State In Tensor Index
1258 // 19 -> Cell State In Tensor Index
1259 OperandIndexSequence inputs;
1260 for (uint32_t n = 0; n < init_param.input_count - 3; ++n)
1262 inputs.append(OperandIndex{init_param.inputs[n]});
1265 // Each output should be interpreted as follows:
1267 // 0 -> Scratch Buffer Tensor Index
1268 // 1 -> Output State Out Tensor Index
1269 // 2 -> Cell State Out Tensor Index
1270 // 3 -> Output Tensor Index
1271 OperandIndexSequence outputs;
1272 for (uint32_t n = 0; n < init_param.output_count; ++n)
1274 outputs.append(OperandIndex{init_param.outputs[n]});
1277 operation::LSTM::Param param;
1278 const auto activation_index = OperandIndex{init_param.inputs[20]};
1279 switch (operands.at(activation_index).asScalar<int32_t>())
1282 param.activation = Activation::NONE;
1285 param.activation = Activation::RELU;
1288 param.activation = Activation::RELU1;
1291 param.activation = Activation::RELU6;
1294 param.activation = Activation::TANH;
1297 param.activation = Activation::SIGMOID;
1300 throw std::runtime_error("Unsupported activation type");
1303 param.cell_threshold = operands.at(OperandIndex{init_param.inputs[21]}).asScalar<float>();
1304 param.projection_threshold = operands.at(OperandIndex{init_param.inputs[22]}).asScalar<float>();
1306 return new operation::LSTM{inputs, outputs, param};
1309 _map[ANEURALNETWORKS_EQUAL] = [](const OperationFactory::Param &init_param, Operands &) {
1310 assert(init_param.input_count == 2 && init_param.output_count == 1);
1312 OperandIndexSequence outputs{init_param.outputs[0]};
1314 // Each input should be interpreted as follows:
1316 // 0 -> input0 Tensor Index
1317 // 1 -> input1 Tensor Index
1318 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
1320 operation::Comparison::Param param;
1321 param.comparison_type = operation::Comparison::ComparisonType::Equal;
1323 return new operation::Comparison{inputs, outputs, param};
1326 // ANEURALNETWORKS_EQUAL_EX is deprecated
1327 // TODO Remove ANEURALNETWORKS_EQUAL_EX
1328 _map[ANEURALNETWORKS_EQUAL_EX] = [](const OperationFactory::Param &init_param,
1329 Operands &operands) {
1330 assert(init_param.input_count == 2 && init_param.output_count == 1);
1332 OperandIndexSequence outputs{init_param.outputs[0]};
1334 // Each input should be interpreted as follows:
1336 // 0 -> input0 Tensor Index
1337 // 1 -> input1 Tensor Index
1338 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
1340 operation::Comparison::Param param;
1341 param.comparison_type = operation::Comparison::ComparisonType::Equal;
1343 // Output operand type must be boolean
1344 replaceDataType(operands, outputs.at(0), DataType::BOOL8);
1346 return new operation::Comparison{inputs, outputs, param};
1349 _map[ANEURALNETWORKS_SQUARED_DIFFERENCE_EX] = [](const OperationFactory::Param &init_param,
1351 assert(init_param.input_count == 2 && init_param.output_count == 1);
1353 OperandIndexSequence outputs{init_param.outputs[0]};
1355 // Each input should be interpreted as follows:
1357 // 0 -> LHS Tensor Index
1358 // 1 -> RHS Tensor Index
1359 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
1361 return new operation::SquaredDifference{inputs, outputs};
1364 _map[ANEURALNETWORKS_TOPK_V2] = [](const OperationFactory::Param &init_param,
1365 Operands &operands) {
1366 assert(init_param.input_count == 2 && init_param.output_count == 2);
1368 // Each output should be interpreted as follows:
1370 // 0 -> Index for Output Values
1371 // 1 -> Index for Output Indices
1372 OperandIndexSequence outputs{init_param.outputs[0], init_param.outputs[1]};
1374 // Each input should be interpreted as follows:
1376 // 0 -> Index for Input Data
1378 OperandIndexSequence inputs{init_param.inputs[0]};
1380 operation::TopKV2::Param param;
1381 param.k = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>();
1383 return new operation::TopKV2{inputs, outputs, param};
1386 // ANEURALNETWORKS_CAST_EX is deprecated
1387 // TODO Remove ANEURALNETWORKS_CAST_EX
1388 _map[ANEURALNETWORKS_TOPK_V2_EX] = _map[ANEURALNETWORKS_TOPK_V2];
1390 _map[ANEURALNETWORKS_GATHER] = [](const OperationFactory::Param &init_param, Operands &operands) {
1391 assert(init_param.input_count == 3 && init_param.output_count == 1);
1393 OperandIndexSequence outputs{init_param.outputs[0]};
1395 // Each input should be interpreted as follows:
1397 // 0 -> input Tensor Index
1399 // 2 -> indices Tensor Index
1400 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[2]};
1402 operation::Gather::Param param;
1403 param.axis = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<int32_t>();
1405 return new operation::Gather{inputs, outputs, param};
1408 // ANEURALNETWORKS_GATHER_EX is deprecated
1409 // TODO Remove ANEURALNETWORKS_GATHER_EX
1410 _map[ANEURALNETWORKS_GATHER_EX] = _map[ANEURALNETWORKS_GATHER];
1412 _map[ANEURALNETWORKS_NEG] = CreateSimpleUnaryOp<operation::Neg>;
1414 // ANEURALNETWORKS_NEG_EX is deprecated
1415 // TODO Remove ANEURALNETWORKS_NEG_EX
1416 _map[ANEURALNETWORKS_NEG_EX] = _map[ANEURALNETWORKS_NEG];
1418 _map[ANEURALNETWORKS_ABS] = CreateSimpleUnaryOp<operation::Abs>;
1420 // ANEURALNETWORKS_ABS_EX is deprecated
1421 // TODO Remove ANEURALNETWORKS_ABS_EX
1422 _map[ANEURALNETWORKS_ABS_EX] = _map[ANEURALNETWORKS_ABS];
1424 _map[ANEURALNETWORKS_ARGMAX] = [](const OperationFactory::Param &init_param, Operands &operands) {
1425 assert(init_param.input_count == 2 && init_param.output_count == 1);
1427 OperandIndexSequence outputs{init_param.outputs[0]};
1429 // Each input should be interpreted as follows:
1431 // 0 -> Input Tensor Index
1432 // 1 -> Axis Tensor Index
1433 OperandIndexSequence inputs{init_param.inputs[0]};
1435 operation::ArgMax::Param param;
1436 param.axis = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>();
1438 return new operation::ArgMax{inputs, outputs, param};
1441 // ANEURALNETWORKS_ARGMAX_EX is deprecated
1442 // TODO Remove ANEURALNETWORKS_ARGMAX_EX
1443 _map[ANEURALNETWORKS_ARGMAX_EX] = _map[ANEURALNETWORKS_ARGMAX];
1445 _map[ANEURALNETWORKS_DEQUANTIZE] = CreateSimpleUnaryOp<operation::Dequantize>;
1447 _map[ANEURALNETWORKS_MEAN] = [](const OperationFactory::Param &init_param, Operands &operands) {
1448 assert(init_param.input_count == 3 && init_param.output_count == 1);
1450 OperandIndexSequence outputs{init_param.outputs[0]};
1452 // Each input should be interpreted as follows:
1454 // 0 -> ifm Tensor Index
1455 // 1 -> axis Tensor Index
1456 // 2 -> keep_dims Index
1457 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
1459 operation::Reduce::Param param;
1460 param.reduce_type = operation::Reduce::ReduceType::MEAN;
1461 param.keep_dims = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<int32_t>() != 0;
1463 return new operation::Reduce{inputs, outputs, param};
1466 _map[ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION] = [](const OperationFactory::Param &init_param,
1467 Operands &operands) {
1468 assert(init_param.input_count == 5 && init_param.output_count == 1);
1470 OperandIndexSequence outputs{init_param.outputs[0]};
1472 OperandIndexSequence inputs{init_param.inputs[0]};
1474 operation::LocalResponseNormalization::Param param;
1475 param.radius = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>();
1476 param.bias = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<float>();
1477 param.alpha = operands.at(OperandIndex{init_param.inputs[3]}).asScalar<float>();
1478 param.beta = operands.at(OperandIndex{init_param.inputs[4]}).asScalar<float>();
1480 return new operation::LocalResponseNormalization{inputs, outputs, param};
1483 _map[ANEURALNETWORKS_DEPTH_TO_SPACE] = [](const OperationFactory::Param &init_param,
1484 Operands &operands) {
1485 assert(init_param.input_count == 2 && init_param.output_count == 1);
1487 OperandIndexSequence outputs{init_param.outputs[0]};
1489 // Each input should be interpreted as follows:
1491 // 0 -> Input Tensor Index
1492 // 1 -> Block size Index
1493 OperandIndexSequence inputs{init_param.inputs[0]};
1495 operation::DepthToSpace::Param param;
1496 param.block_size = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>();
1498 return new operation::DepthToSpace{inputs, outputs, param};
1501 _map[ANEURALNETWORKS_PACK_EX] = [](const OperationFactory::Param &init_param,
1502 Operands &operands) {
1503 assert(init_param.input_count >= 3 && init_param.output_count == 1);
1505 OperandIndexSequence outputs{init_param.outputs[0]};
1506 OperandIndexSequence inputs;
1507 for (uint32_t n = 0; n < init_param.input_count - 2; ++n)
1509 inputs.append(OperandIndex{init_param.inputs[n]});
1512 operation::Pack::Param param;
1513 const auto num_index = OperandIndex{init_param.inputs[init_param.input_count - 2]};
1514 const auto axis_index = OperandIndex{init_param.inputs[init_param.input_count - 1]};
1515 param.num = operands.at(num_index).asScalar<int32_t>();
1516 param.axis = operands.at(axis_index).asScalar<int32_t>();
1518 return new operation::Pack{inputs, outputs, param};
1521 _map[ANEURALNETWORKS_REDUCE_MIN] =
1522 getReduceGenerator(onert::ir::operation::Reduce::ReduceType::MIN);
1524 // ANEURALNETWORKS_REDUCE_MIN_EX is deprecated
1525 // TODO Remove ANEURALNETWORKS_REDUCE_MIN_EX
1526 _map[ANEURALNETWORKS_REDUCE_MIN_EX] = _map[ANEURALNETWORKS_REDUCE_MIN];
1528 _map[ANEURALNETWORKS_SPLIT] = [](const OperationFactory::Param &init_param, Operands &operands) {
1529 assert(init_param.input_count == 3);
1530 assert(init_param.output_count >= 1); // At least one output tensor and axis
1532 OperandIndexSequence inputs{init_param.inputs[0]};
1533 OperandIndexSequence outputs;
1534 for (uint32_t n = 0; n < init_param.output_count; ++n)
1536 outputs.append(OperandIndex{init_param.outputs[n]});
1539 operation::Split::Param param;
1540 param.axis = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>();
1541 param.num_splits = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<std::int32_t>();
1543 return new operation::Split{inputs, outputs, param};
1546 _map[ANEURALNETWORKS_SPLIT_V_EX] = [](const OperationFactory::Param &init_param,
1547 Operands &operands) {
1548 assert(init_param.input_count == 4);
1549 assert(init_param.output_count >= 1); // At least one output tensor and axis
1551 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
1552 OperandIndexSequence outputs;
1553 for (uint32_t n = 0; n < init_param.output_count; ++n)
1555 outputs.append(OperandIndex{init_param.outputs[n]});
1558 operation::SplitV::Param param;
1559 param.num_splits = operands.at(OperandIndex{init_param.inputs[3]}).asScalar<std::int32_t>();
1560 return new operation::SplitV{inputs, outputs, param};
1563 // ANEURALNETWORKS_SPLIT_EX is deprecated
1564 // TODO Remove ANEURALNETWORKS_SPLIT_EX
1565 _map[ANEURALNETWORKS_SPLIT_EX] = _map[ANEURALNETWORKS_SPLIT];
1567 _map[ANEURALNETWORKS_UNPACK_EX] = [](const OperationFactory::Param &init_param,
1568 Operands &operands) {
1569 assert(init_param.input_count == 3 && init_param.output_count >= 1);
1571 OperandIndexSequence inputs{init_param.inputs[0]};
1572 OperandIndexSequence outputs;
1573 for (uint32_t n = 0; n < init_param.output_count; ++n)
1575 outputs.append(OperandIndex{init_param.outputs[n]});
1578 operation::Unpack::Param param;
1579 const auto num_index = OperandIndex{init_param.inputs[1]};
1580 const auto axis_index = OperandIndex{init_param.inputs[2]};
1581 param.num = operands.at(num_index).asScalar<int32_t>();
1582 param.axis = operands.at(axis_index).asScalar<int32_t>();
1584 return new operation::Unpack{inputs, outputs, param};
1587 _map[ANEURALNETWORKS_PAD] = [](const OperationFactory::Param &init_param, Operands &) {
1588 assert(init_param.input_count >= 2 && init_param.input_count <= 3 &&
1589 init_param.output_count >= 1);
1591 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
1592 if (init_param.input_count == 3)
1594 inputs.append(OperandIndex{init_param.inputs[2]});
1596 OperandIndexSequence outputs{init_param.outputs[0]};
1598 return new operation::Pad{inputs, outputs};
1601 _map[ANEURALNETWORKS_PAD_V2] = _map[ANEURALNETWORKS_PAD];
1603 _map[ANEURALNETWORKS_MINIMUM] = createSimpleBinaryOp<operation::Min>;
1605 _map[ANEURALNETWORKS_MAXIMUM] = createSimpleBinaryOp<operation::Max>;
1607 _map[ANEURALNETWORKS_ONE_HOT_EX] = [](const OperationFactory::Param &init_param,
1608 Operands &operands) {
1609 assert(init_param.input_count == 5);
1610 assert(init_param.output_count == 1);
1611 // Each input should be interpreted as follows:
1613 // 0 -> indices tensor
1614 // 1 -> depth tensor
1615 // 2 -> on_value tensor
1616 // 3 -> off_value tensor
1618 OperandIndexSequence inputs;
1619 for (uint32_t n = 0; n < init_param.input_count - 1; ++n)
1621 inputs.append(OperandIndex{init_param.inputs[n]});
1623 OperandIndexSequence outputs{init_param.outputs[0]};
1625 operation::OneHot::Param param;
1626 param.axis = operands.at(OperandIndex{init_param.inputs[4]}).asScalar<std::int32_t>();
1628 return new operation::OneHot{inputs, outputs, param};
1631 _map[ANEURALNETWORKS_COS_EX] = [](const OperationFactory::Param &init_param, Operands &) {
1632 assert(init_param.input_count == 1 && init_param.output_count == 1);
1634 OperandIndexSequence inputs{init_param.inputs[0]};
1635 OperandIndexSequence outputs{init_param.outputs[0]};
1637 return new operation::Cos{inputs, outputs};
1640 _map[ANEURALNETWORKS_SIN] = [](const OperationFactory::Param &init_param, Operands &) {
1641 assert(init_param.input_count == 1 && init_param.output_count == 1);
1643 OperandIndexSequence inputs{init_param.inputs[0]};
1644 OperandIndexSequence outputs{init_param.outputs[0]};
1646 return new operation::Sin{inputs, outputs};
1649 _map[ANEURALNETWORKS_SHAPE_EX] = [](const OperationFactory::Param &init_param, Operands &) {
1650 assert(init_param.input_count == 1 && init_param.output_count == 1);
1652 OperandIndexSequence inputs{init_param.inputs[0]};
1653 OperandIndexSequence outputs{init_param.outputs[0]};
1655 return new operation::Shape{inputs, outputs};
1658 _map[ANEURALNETWORKS_REDUCE_PROD] =
1659 getReduceGenerator(onert::ir::operation::Reduce::ReduceType::PROD);
1661 _map[ANEURALNETWORKS_ROUND_EX] = [](const OperationFactory::Param &init_param, Operands &) {
1662 assert(init_param.input_count == 1 && init_param.output_count == 1);
1664 OperandIndexSequence outputs{init_param.outputs[0]};
1666 // Each input should be interpreted as follows:
1667 // 0 -> input Tensor Index
1668 OperandIndexSequence inputs{init_param.inputs[0]};
1670 return new operation::Round{inputs, outputs};
1673 _map[ANEURALNETWORKS_RANGE_EX] = [](const OperationFactory::Param &init_param, Operands &) {
1674 assert(init_param.input_count == 3 && init_param.output_count == 1);
1676 OperandIndexSequence outputs{init_param.outputs[0]};
1678 // Each input should be interpreted as follows:
1679 // 0 -> start Tensor Index
1680 // 1 -> limit Tensor Index
1681 // 2 -> delta Tensor Index
1683 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
1685 return new operation::Range{inputs, outputs};
1688 // Each input should be interpreted as follows:
1689 // 0 -> LHS Tensor Index
1690 // 1 -> RHS Tensor Index
1691 _map[ANEURALNETWORKS_POW] = createSimpleBinaryOp<operation::Pow>;
1693 // Each input should be interpreted as follows:
1694 // 0 -> A tensor, specifying the input.
1695 // 1 -> A 1-D tensor, specifying the value
1696 _map[ANEURALNETWORKS_FILL_EX] = createSimpleBinaryOp<operation::Fill>;
1698 _map[ANEURALNETWORKS_ZEROS_LIKE_EX] = [](const OperationFactory::Param &init_param, Operands &) {
1699 assert(init_param.input_count == 1 && init_param.output_count == 1);
1701 OperandIndexSequence outputs{init_param.outputs[0]};
1703 // Each input should be interpreted as follows:
1704 // 0 -> input Tensor Index
1705 OperandIndexSequence inputs{init_param.inputs[0]};
1707 return new operation::ZerosLike{inputs, outputs};
1710 // Each input should be interpreted as follows:
1711 // 0 -> Input Tensor Index
1712 // 1 -> Multiple Tensor Index
1713 _map[ANEURALNETWORKS_TILE] = createSimpleBinaryOp<operation::Tile>;
1715 _map[ANEURALNETWORKS_MATRIX_BAND_PART_EX] = [](const OperationFactory::Param &init_param,
1717 assert(init_param.input_count == 3);
1718 assert(init_param.output_count == 1);
1719 // Each input should be interpreted as follows:
1721 // 0 -> A tensor, input
1722 // 1 -> A 0-D tensor, number of lower diagnonals to keep
1723 // 2 -> A 0-D tensor, number of upper diagnonals to keep
1724 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
1725 OperandIndexSequence outputs{init_param.outputs[0]};
1727 return new operation::MatrixBandPart{inputs, outputs};
1730 _map[ANEURALNETWORKS_BATCH_MATMUL_EX] = [](const OperationFactory::Param &init_param,
1731 Operands &operands) {
1732 assert(init_param.input_count == 4 && init_param.output_count == 1);
1734 OperandIndexSequence outputs{init_param.outputs[0]};
1736 // Each input should be interpreted as follows:
1738 // 0 -> Lhs Tensor Index
1739 // 1 -> Rhs Tensor Index
1740 // 2 -> adj_x boolean scalar Index
1741 // 3 -> adj_y boolean scalar Index
1743 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
1745 operation::BatchMatMul::Param param;
1746 param.adj_x = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<bool>();
1747 param.adj_y = operands.at(OperandIndex{init_param.inputs[3]}).asScalar<bool>();
1749 return new operation::BatchMatMul{inputs, outputs, param};
1752 _map[ANEURALNETWORKS_EINSUM_EX] = [](const OperationFactory::Param &init_param,
1753 Operands &operands) {
1754 // Each input should be interpreted as follows:
1756 // 0....n - 1 -> n Input Tensors Index
1758 assert(init_param.input_count >= 1 && init_param.output_count == 1);
1760 OperandIndexSequence inputs;
1761 for (uint32_t n = 0; n < init_param.input_count - 1; ++n)
1763 inputs.append(OperandIndex{init_param.inputs[n]});
1765 OperandIndexSequence outputs{init_param.outputs[0]};
1767 operation::Einsum::Param param;
1768 const OperandIndex equation_index{init_param.inputs[init_param.input_count - 1]};
1769 std::vector<char> equation_vector = operands.at(equation_index).asVector<char>();
1770 param.equation = std::string(equation_vector.begin(), equation_vector.end());
1772 return new operation::Einsum{inputs, outputs, param};
1775 // 0 -> Input Tensor Index
1776 // 1 -> int32, int64, An 1-D int tensor Index
1777 _map[ANEURALNETWORKS_BROADCAST_TO_EX] = createSimpleBinaryOp<operation::BroadcastTo>;
1779 _map[ANEURALNETWORKS_STATELESS_RANDOM_UNIFORM_EX] = [](const OperationFactory::Param &init_param,
1781 assert(init_param.input_count == 2 && init_param.output_count == 1);
1782 OperandIndexSequence outputs{init_param.outputs[0]};
1784 // Each input should be interpreted as follows:
1786 // 0 -> Shape Tensor Index
1787 // 1 -> int32, int64, An 1-D int tensor Index
1789 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
1791 return new operation::StatelessRandomUniform{inputs, outputs};
1794 _map[ANEURALNETWORKS_FUSED_BATCH_NORM_V3_EX] = [](const OperationFactory::Param &init_param,
1795 Operands &operands) {
1796 // Each input should be interpreted as follows:
1798 // 0....4 -> 5 Input Tensors Index
1799 // n-2 -> is_training
1800 // n-1 -> data_format
1803 assert(init_param.input_count == 8 && init_param.output_count == 1);
1805 OperandIndexSequence inputs;
1806 for (uint32_t n = 0; n < init_param.input_count - 3; ++n)
1808 inputs.append(OperandIndex{init_param.inputs[n]});
1810 OperandIndexSequence outputs{init_param.outputs[0]};
1812 operation::FusedBatchNorm::Param param;
1813 const OperandIndex is_training_index{init_param.inputs[init_param.input_count - 3]};
1814 param.is_training = operands.at(is_training_index).asScalar<bool>();
1816 const OperandIndex data_format_index{init_param.inputs[init_param.input_count - 2]};
1817 std::vector<char> data_format_vector = operands.at(data_format_index).asVector<char>();
1818 param.data_format = std::string(data_format_vector.begin(), data_format_vector.end());
1820 const OperandIndex epsilon_index{init_param.inputs[init_param.input_count - 1]};
1821 param.epsilon = operands.at(epsilon_index).asScalar<float>();
1822 return new operation::FusedBatchNorm{inputs, outputs, param};
1825 _map[ANEURALNETWORKS_LOG_SOFTMAX] = [](const OperationFactory::Param &init_param,
1826 Operands &operands) {
1827 assert(init_param.input_count == 3 && init_param.output_count == 1);
1829 // Each input should be interpreted as follows:
1831 // 0 -> A tensor specifying the input logits.
1832 // 1 -> A scalar, specifying the positive scaling factor for the exponent, beta.
1833 // 2 -> An scalar specifying the axis to reduce across.
1835 OperandIndexSequence inputs{init_param.inputs[0]};
1836 OperandIndexSequence outputs{init_param.outputs[0]};
1838 const auto beta_index = OperandIndex{init_param.inputs[1]};
1839 const auto axis_index = OperandIndex{init_param.inputs[2]};
1841 operation::LogSoftmax::Param param;
1842 param.beta = operands.at(beta_index).asScalar<float>();
1843 param.axis = operands.at(axis_index).asScalar<int>();
1845 return new operation::LogSoftmax{inputs, outputs, param};
1848 _map[ANEURALNETWORKS_QUANTIZE] = [](const OperationFactory::Param &init_param, Operands &) {
1849 assert(init_param.input_count == 1 && init_param.output_count == 1);
1851 OperandIndexSequence inputs{init_param.inputs[0]};
1852 OperandIndexSequence outputs{init_param.outputs[0]};
1854 return new operation::Quantize{inputs, outputs};
1858 Operation *OperationFactory::create(ANeuralNetworksOperationType type,
1859 const OperationFactory::Param ¶m, Operands &operands)
1861 auto it = _map.find(type);
1862 if (it == _map.end())
1864 throw std::runtime_error("Unsupported operation type: " + std::to_string(type));
1866 return it->second(param, operands);