2 * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "OperationFactory.h"
18 #include "NNAPIConvert.h"
20 #include <ir/Operations.Include.h>
25 using namespace onert::ir;
27 void replaceDataType(Operands &operands, const OperandIndex &index, const DataType type)
29 assert(operands.exist(index));
30 operands.at(index).type(type);
33 ExplicitPadding makeExplicitPadding(Operands &operands, const OperandIndex &left_index,
34 const OperandIndex &right_index, const OperandIndex &top_index,
35 const OperandIndex &bottom_index)
37 auto left = operands.at(left_index).asScalar<int32_t>();
38 auto right = operands.at(right_index).asScalar<int32_t>();
39 auto top = operands.at(top_index).asScalar<int32_t>();
40 auto bottom = operands.at(bottom_index).asScalar<int32_t>();
42 if (left < 0 || right < 0 || top < 0 || bottom < 0)
44 throw std::runtime_error{"Cannot handle negative explicit padding value"};
47 ExplicitPadding param;
48 param.left = static_cast<uint32_t>(left);
49 param.right = static_cast<uint32_t>(right);
50 param.top = static_cast<uint32_t>(top);
51 param.bottom = static_cast<uint32_t>(bottom);
56 Stride makeStride(Operands &operands, const OperandIndex &horizontal_index,
57 const OperandIndex &vertical_index)
59 auto horizontal = operands.at(horizontal_index).asScalar<int32_t>();
60 auto vertical = operands.at(vertical_index).asScalar<int32_t>();
62 if (vertical < 0 || horizontal < 0)
64 throw std::runtime_error{"Cannot handle negative stride value"};
68 stride.horizontal = static_cast<uint32_t>(horizontal);
69 stride.vertical = static_cast<uint32_t>(vertical);
74 uint32_t getUint32Scalar(Operands &operands, const OperandIndex index)
76 auto int32_value = operands.at(index).asScalar<int32_t>();
79 throw std::runtime_error{"Cannot handle negative value"};
82 return static_cast<uint32_t>(int32_value);
85 OperationFactory::Generator
86 getElementwiseActivationGenerator(const onert::ir::operation::ElementwiseActivation::Type op_type,
87 float alpha = 0.f, float beta = 0.f)
89 return [op_type, alpha, beta](const OperationFactory::Param &init_param, Operands &) {
90 assert(init_param.input_count == 1);
91 assert(init_param.output_count == 1);
93 // Each input should be interpreted as follows:
95 // 0 -> Input Tensor Index
97 OperandIndexSequence inputs{init_param.inputs[0]};
98 OperandIndexSequence outputs{init_param.outputs[0]};
100 operation::ElementwiseActivation::Param param;
101 param.op_type = op_type;
105 return new operation::ElementwiseActivation{inputs, outputs, param};
109 OperationFactory::Generator getElementwiseBinaryGenerator(
110 const onert::ir::operation::ElementwiseBinary::ElementwiseBinaryType op_type)
112 return [op_type](const OperationFactory::Param &init_param, Operands &) {
113 assert(init_param.input_count == 2);
114 assert(init_param.output_count == 1);
116 // Each input should be interpreted as follows:
118 // 0 -> Lefthand side operand
119 // 1 -> Righthand side operand
121 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
122 OperandIndexSequence outputs{init_param.outputs[0]};
124 operation::ElementwiseBinary::Param param;
125 param.op_type = op_type;
127 return new operation::ElementwiseBinary{inputs, outputs, param};
131 OperationFactory::Generator
132 getElementwiseUnaryGenerator(const onert::ir::operation::ElementwiseUnary::Type op_type)
134 return [op_type](const OperationFactory::Param &init_param, Operands &operands) {
135 assert(init_param.input_count == 1);
136 assert(init_param.output_count == 1);
138 // Each input should be interpreted as follows:
140 // 0 -> Input Tensor Index
142 OperandIndexSequence inputs{init_param.inputs[0]};
143 OperandIndexSequence outputs{init_param.outputs[0]};
145 operation::ElementwiseUnary::Param param;
146 param.op_type = op_type;
148 if (op_type == operation::ElementwiseUnary::Type::CAST)
150 // NNAPI uses QUANT_UINT8_ASYMM to represent UINT8 type for ANEURALNETWORKS_CAST's
152 if (operands.at(inputs.at(0)).typeInfo().type() == DataType::QUANT_UINT8_ASYMM)
154 replaceDataType(operands, inputs.at(0), DataType::UINT8);
156 if (operands.at(outputs.at(0)).typeInfo().type() == DataType::QUANT_UINT8_ASYMM)
158 replaceDataType(operands, outputs.at(0), DataType::UINT8);
162 return new operation::ElementwiseUnary{inputs, outputs, param};
166 OperationFactory::Generator
167 getBinaryArithmeticGenerator(const onert::ir::operation::BinaryArithmetic::ArithmeticType op_type)
169 return [op_type](const OperationFactory::Param &init_param, Operands &operands) {
170 assert(init_param.input_count == 3);
171 assert(init_param.output_count == 1);
173 // Each input should be interpreted as follows:
175 // 0 -> Lefthand side operand
176 // 1 -> Righthand side operand
178 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
179 OperandIndexSequence outputs{init_param.outputs[0]};
181 operation::BinaryArithmetic::Param param;
182 param.arithmetic_type = op_type;
183 const auto activation_index = OperandIndex{init_param.inputs[2]};
185 NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
187 return new operation::BinaryArithmetic{inputs, outputs, param};
191 OperationFactory::Generator
192 getPool2DGenerator(const onert::ir::operation::Pool2D::PoolType pool_type)
194 return [pool_type](const OperationFactory::Param &init_param, Operands &operands) {
195 assert(init_param.input_count == 7 || init_param.input_count == 10);
196 assert(init_param.output_count == 1);
199 // 0 -> IFM Tensor Index
200 OperandIndexSequence inputs{init_param.inputs[0]};
201 OperandIndexSequence outputs{init_param.outputs[0]};
203 operation::Pool2D::Param param;
204 param.op_type = pool_type;
205 if (init_param.input_count == 7) // support implicit padding
207 // Each input should be interpreted as follows:
209 // 1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
210 // 2 -> Horizontal (over width) Stride Index
211 // 3 -> Vertial (over height) Stride Index
212 // 4 -> Filter Width Index
213 // 5 -> Filter Height Index
214 // 6 -> FuseCode (activation) Index
216 const auto padding_index = OperandIndex{init_param.inputs[1]};
217 const auto hstride_index = OperandIndex{init_param.inputs[2]};
218 const auto vstride_index = OperandIndex{init_param.inputs[3]};
219 const auto kw_index = OperandIndex{init_param.inputs[4]};
220 const auto kh_index = OperandIndex{init_param.inputs[5]};
221 const auto activation_index = OperandIndex{init_param.inputs[6]};
224 NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>());
225 param.stride = makeStride(operands, hstride_index, vstride_index);
226 param.kw = getUint32Scalar(operands, kw_index);
227 param.kh = operands.at(kh_index).asScalar<uint32_t>();
229 NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
231 else // support explicit padding
233 // Each input should be interpreted as follows:
235 // 1 -> Padding_left index
236 // 2 -> Padding_right index
237 // 3 -> Padding_top index
238 // 4 -> Padding_bottom index
239 // 5 -> Horizontal (over width) Stride Index
240 // 6 -> Vertial (over height) Stride Index
241 // 7 -> Filter Width Index
242 // 8 -> Filter Height Index
243 // 9 -> FuseCode (activation) Index
245 const auto padding_left_index = OperandIndex{init_param.inputs[1]};
246 const auto padding_right_index = OperandIndex{init_param.inputs[2]};
247 const auto padding_top_index = OperandIndex{init_param.inputs[3]};
248 const auto padding_bottom_index = OperandIndex{init_param.inputs[4]};
249 const auto hstride_index = OperandIndex{init_param.inputs[5]};
250 const auto vstride_index = OperandIndex{init_param.inputs[6]};
251 const auto kw_index = OperandIndex{init_param.inputs[7]};
252 const auto kh_index = OperandIndex{init_param.inputs[8]};
253 const auto activation_index = OperandIndex{init_param.inputs[9]};
255 param.padding.type = PaddingType::EXPLICIT;
256 param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index,
257 padding_top_index, padding_bottom_index);
258 param.stride = makeStride(operands, hstride_index, vstride_index);
259 param.kw = getUint32Scalar(operands, kw_index);
260 param.kh = getUint32Scalar(operands, kh_index);
262 NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
264 return new operation::Pool2D{inputs, outputs, param};
268 OperationFactory::Generator
269 getReduceGenerator(const onert::ir::operation::Reduce::ReduceType reduce_type)
271 return [reduce_type](const OperationFactory::Param &init_param, Operands &operands) {
272 assert(init_param.input_count == 3);
273 assert(init_param.output_count == 1);
275 // Each input should be interpreted as follows:
277 // 0 -> Input Tensor Index
278 // 1 -> Reduced Axes Tensor Index
279 // 2 -> keep_dims Index
281 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
282 OperandIndexSequence outputs{init_param.outputs[0]};
284 operation::Reduce::Param param;
285 param.reduce_type = reduce_type;
286 param.keep_dims = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<int8_t>() != 0;
288 return new operation::Reduce{inputs, outputs, param};
292 template <typename T>
293 Operation *CreateSimpleUnaryOp(const OperationFactory::Param &init_param, Operands &)
295 assert(init_param.input_count == 1 && init_param.output_count == 1);
297 OperandIndexSequence outputs{init_param.outputs[0]};
299 // Each input should be interpreted as follows:
301 // 0 -> Input Tensor Index
302 OperandIndexSequence inputs{init_param.inputs[0]};
304 return new T{inputs, outputs};
307 // A generator function for binary ops with no params
308 template <typename T>
309 Operation *createSimpleBinaryOp(const OperationFactory::Param &init_param, Operands &)
311 assert(init_param.input_count == 2 && init_param.output_count == 1);
313 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
314 OperandIndexSequence outputs{init_param.outputs[0]};
316 return new T{inputs, outputs};
319 OperationFactory::Generator getComparisonGenerator(operation::Comparison::ComparisonType type)
321 return [type](const OperationFactory::Param &init_param, Operands &) -> Operation * {
322 assert(init_param.input_count == 2 && init_param.output_count == 1);
324 OperandIndexSequence outputs{init_param.outputs[0]};
326 // Each input should be interpreted as follows:
328 // 0 -> input0 Tensor Index
329 // 1 -> input1 Tensor Index
330 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
332 operation::Comparison::Param param;
333 param.comparison_type = type;
335 return new operation::Comparison{inputs, outputs, param};
341 OperationFactory &OperationFactory::get()
343 static OperationFactory factory;
347 OperationFactory::OperationFactory()
349 // Each input should be interpreted as follows:
350 // 0 -> Input Tensor Index
351 // 1 -> Block size Index
352 _map[ANEURALNETWORKS_BATCH_TO_SPACE_ND] = createSimpleBinaryOp<operation::BatchToSpaceND>;
354 _map[ANEURALNETWORKS_DEPTHWISE_CONV_2D] = [](const OperationFactory::Param &init_param,
355 Operands &operands) {
356 assert((init_param.input_count == 8 || init_param.input_count == 11) &&
357 init_param.output_count == 1);
360 // 0 -> IFM Tensor Index
361 // 1 -> Kernel Tensor Index
362 // 2 -> Bias Tensor Index
363 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
364 OperandIndexSequence outputs{init_param.outputs[0]};
366 operation::DepthwiseConv2D::Param param;
367 if (init_param.input_count == 8)
369 // Imlicit Padding case
370 // Each input should be interpreted as follows:
372 // 3 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
373 // 4 -> Stride (width) Index
374 // 5 -> Stride (height) INdex
375 // 6 -> Depthwise multiplier
376 // 7 -> Activation Index
378 const auto padding_index = OperandIndex{init_param.inputs[3]};
379 const auto hstride_index = OperandIndex{init_param.inputs[4]};
380 const auto vstride_index = OperandIndex{init_param.inputs[5]};
381 const auto multiplier_index = OperandIndex{init_param.inputs[6]};
382 const auto activation_index = OperandIndex{init_param.inputs[7]};
385 NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>());
386 param.stride = makeStride(operands, hstride_index, vstride_index);
387 param.multiplier = getUint32Scalar(operands, multiplier_index);
389 NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
393 // Explicit Padding case
394 // Each input should be interpreted as follows:
396 // 3 -> Padding On the Left
397 // 4 -> Padding On the Right
398 // 5 -> Padding On the Top
399 // 6 -> Padding On the Bottom
400 // 7 -> Stride (width) Index
401 // 8 -> Stride (height) Index
402 // 9 -> Depthwise multiplier
403 // 10-> Activation Index
405 const auto padding_left_index = OperandIndex{init_param.inputs[3]};
406 const auto padding_right_index = OperandIndex{init_param.inputs[4]};
407 const auto padding_top_index = OperandIndex{init_param.inputs[5]};
408 const auto padding_bottom_index = OperandIndex{init_param.inputs[6]};
409 const auto hstride_index = OperandIndex{init_param.inputs[7]};
410 const auto vstride_index = OperandIndex{init_param.inputs[8]};
411 const auto multiplier_index = OperandIndex{init_param.inputs[9]};
412 const auto activation_index = OperandIndex{init_param.inputs[10]};
414 param.padding.type = PaddingType::EXPLICIT;
415 param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index,
416 padding_top_index, padding_bottom_index);
417 param.stride = makeStride(operands, hstride_index, vstride_index);
418 param.multiplier = getUint32Scalar(operands, multiplier_index);
420 NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
423 return new operation::DepthwiseConv2D{inputs, outputs, param};
426 _map[ANEURALNETWORKS_MAX_POOL_2D] = getPool2DGenerator(operation::Pool2D::PoolType::MAX);
428 _map[ANEURALNETWORKS_AVERAGE_POOL_2D] = getPool2DGenerator(operation::Pool2D::PoolType::AVG);
430 _map[ANEURALNETWORKS_CONCATENATION] = [](const OperationFactory::Param &init_param,
431 Operands &operands) {
432 assert(init_param.input_count >= 2); // At least one one input tensor and axis
433 assert(init_param.output_count == 1);
435 // When there are N + 1 inputs, each input should be interpreted as follows:
437 // [0, N) -> Input tensors
441 OperandIndexSequence inputs;
442 for (uint32_t n = 0; n < init_param.input_count - 1; ++n)
444 inputs.append(OperandIndex{init_param.inputs[n]});
446 OperandIndexSequence outputs{init_param.outputs[0]};
448 operation::Concat::Param param;
449 const OperandIndex axis_index{init_param.inputs[init_param.input_count - 1]};
450 param.axis = operands.at(axis_index).asScalar<int32_t>();
452 return new operation::Concat{inputs, outputs, param};
455 _map[ANEURALNETWORKS_RESHAPE] = [](const OperationFactory::Param &init_param, Operands &) {
456 assert(init_param.input_count == 2 && init_param.output_count == 1);
458 // Each input should be interpreted as follows:
460 // 0 -> A tensor, specifying the tensor to be reshaped.
461 // 1 -> A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32, defining the shape of the output
464 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
465 OperandIndexSequence outputs{init_param.outputs[0]};
467 operation::Reshape::Param param{};
469 return new operation::Reshape{inputs, outputs, param};
472 _map[ANEURALNETWORKS_FULLY_CONNECTED] = [](const OperationFactory::Param &init_param,
473 Operands &operands) {
474 assert(init_param.input_count == 4 && init_param.output_count == 1);
476 // Each input should be interpreted as follows:
478 // 0 -> A tensor, specifying the input.
479 // 1 -> A 2-D tensor, specifying the weights
480 // 2 -> A 1-D tensor, specifying the bias
481 // 3 -> An INT32 value, and has to be one of the FuseCode values
483 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
484 OperandIndexSequence outputs{init_param.outputs[0]};
486 operation::FullyConnected::Param param;
487 const auto activation_index = OperandIndex{init_param.inputs[3]};
489 NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
491 return new operation::FullyConnected{inputs, outputs, param};
494 _map[ANEURALNETWORKS_SOFTMAX] = [](const OperationFactory::Param &init_param,
495 Operands &operands) {
496 assert(init_param.input_count == 2 && init_param.output_count == 1);
498 // Each input should be interpreted as follows:
500 // 0 -> A 2-D or 4-D tensor, specifying the tensor to be reshaped.
501 // 1 -> FLOAT32 value, specifying the positive scaling factor for the exponent, beta.
503 OperandIndexSequence inputs{init_param.inputs[0]};
504 OperandIndexSequence outputs{init_param.outputs[0]};
506 const auto beta_index = OperandIndex{init_param.inputs[1]};
508 operation::Softmax::Param param;
509 param.beta = operands.at(beta_index).asScalar<float>();
511 return new operation::Softmax{inputs, outputs, param};
514 _map[ANEURALNETWORKS_CAST] =
515 getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::CAST);
517 // ANEURALNETWORKS_CAST_EX is deprecated
518 // TODO Remove ANEURALNETWORKS_CAST_EX
519 _map[ANEURALNETWORKS_CAST_EX] = _map[ANEURALNETWORKS_CAST];
521 _map[ANEURALNETWORKS_CONV_2D] = [](const OperationFactory::Param &init_param,
522 Operands &operands) {
523 using operation::Conv2D;
525 // inputCount is either 7 or 10 acccording to NN API specification.
526 // - Padding is implicit when inputCount is 7
527 // - Padding is explicit when inputCount is 10
528 assert(init_param.input_count == 7 || init_param.input_count == 10 ||
529 init_param.input_count == 13);
530 assert(init_param.output_count == 1);
532 // 0 -> IFM Tensor Index
533 // 1 -> Kernel Tensor Index
534 // 2 -> Bias Tensor Index
536 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
537 OperandIndexSequence outputs{init_param.outputs[0]};
540 if (init_param.input_count == 7) // support implicit padding
542 // Each input should be interpreted as follows:
544 // 3 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
545 // 4 -> Stride (width) Index
546 // 5 -> Stride (height) INdex
547 // 6 -> Activation Index
549 const auto padding_index = OperandIndex{init_param.inputs[3]};
550 const auto hstride_index = OperandIndex{init_param.inputs[4]};
551 const auto vstride_index = OperandIndex{init_param.inputs[5]};
552 const auto activation_index = OperandIndex{init_param.inputs[6]};
555 NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>());
556 param.stride = makeStride(operands, hstride_index, vstride_index);
558 param.dilation.width_factor = 1;
559 param.dilation.height_factor = 1;
562 NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
564 else if (init_param.input_count == 10) // support explicit padding
566 // Each input should be interpreted as follows:
568 // 3 -> Padding_left index
569 // 4 -> Padding_right index
570 // 5 -> Padding_top index
571 // 6 -> Padding_bottom index
572 // 7 -> Stride (width) Index
573 // 8 -> Stride (height) INdex
574 // 9 -> Activation Index
576 const auto padding_left_index = OperandIndex{init_param.inputs[3]};
577 const auto padding_right_index = OperandIndex{init_param.inputs[4]};
578 const auto padding_top_index = OperandIndex{init_param.inputs[5]};
579 const auto padding_bottom_index = OperandIndex{init_param.inputs[6]};
580 const auto hstride_index = OperandIndex{init_param.inputs[7]};
581 const auto vstride_index = OperandIndex{init_param.inputs[8]};
582 const auto activation_index = OperandIndex{init_param.inputs[9]};
584 param.padding.type = PaddingType::EXPLICIT;
585 param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index,
586 padding_top_index, padding_bottom_index);
587 param.stride = makeStride(operands, hstride_index, vstride_index);
589 param.dilation.width_factor = 1;
590 param.dilation.height_factor = 1;
593 NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
595 else if (init_param.input_count == 13) // support dilation
597 // Each input should be interpreted as follows:
599 // 3 -> Padding_left Index
600 // 4 -> Padding_right Index
601 // 5 -> Padding_top Index
602 // 6 -> Padding_bottom Index
603 // 7 -> Stride (width) Index
604 // 8 -> Stride (height) Index
605 // 9 -> Activation Index
606 // 11 -> Dilation (width_factor) Index
607 // 12 -> Dilation (height_factor) INdex
609 const auto padding_left_index = OperandIndex{init_param.inputs[3]};
610 const auto padding_right_index = OperandIndex{init_param.inputs[4]};
611 const auto padding_top_index = OperandIndex{init_param.inputs[5]};
612 const auto padding_bottom_index = OperandIndex{init_param.inputs[6]};
613 const auto hstride_index = OperandIndex{init_param.inputs[7]};
614 const auto vstride_index = OperandIndex{init_param.inputs[8]};
615 const auto activation_index = OperandIndex{init_param.inputs[9]};
616 const auto width_factor_index = OperandIndex{init_param.inputs[11]};
617 const auto height_factor_index = OperandIndex{init_param.inputs[12]};
619 param.padding.type = PaddingType::EXPLICIT;
620 param.padding.param = makeExplicitPadding(operands, padding_left_index, padding_right_index,
621 padding_top_index, padding_bottom_index);
622 param.stride = makeStride(operands, hstride_index, vstride_index);
624 auto width_factor = operands.at(width_factor_index).asScalar<int32_t>();
625 auto height_factor = operands.at(height_factor_index).asScalar<int32_t>();
627 param.dilation.width_factor = width_factor;
628 param.dilation.height_factor = height_factor;
631 NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
635 throw std::runtime_error{"Conv2D: unsupported input operand count"};
638 return new Conv2D{inputs, outputs, param};
641 _map[ANEURALNETWORKS_ADD] =
642 getBinaryArithmeticGenerator(onert::ir::operation::BinaryArithmetic::ArithmeticType::ADD);
644 _map[ANEURALNETWORKS_ADDV2_EX] = _map[ANEURALNETWORKS_ADD];
646 _map[ANEURALNETWORKS_REDUCE_SUM] =
647 getReduceGenerator(onert::ir::operation::Reduce::ReduceType::SUM);
649 // ANEURALNETWORKS_REDUCE_SUM_EX is deprecated
650 // TODO Remove ANEURALNETWORKS_REDUCE_SUM_EX
651 _map[ANEURALNETWORKS_REDUCE_SUM_EX] = _map[ANEURALNETWORKS_REDUCE_SUM];
653 _map[ANEURALNETWORKS_SUB] =
654 getBinaryArithmeticGenerator(onert::ir::operation::BinaryArithmetic::ArithmeticType::SUB);
656 _map[ANEURALNETWORKS_SLICE] = [](const OperationFactory::Param &init_param, Operands &) {
657 assert(init_param.input_count == 3 && init_param.output_count == 1);
659 OperandIndexSequence outputs{init_param.outputs[0]};
661 // Each input should be interpreted as follows:
663 // 0 -> Input Tensor Index
664 // 1 -> Begins Tensor Index
665 // 2 -> Sizes Tensor Index
666 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
668 return new operation::Slice{inputs, outputs};
671 _map[ANEURALNETWORKS_STRIDED_SLICE] = [](const OperationFactory::Param &init_param,
672 Operands &operands) {
673 assert(init_param.input_count == 7 && init_param.output_count == 1);
675 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2],
676 init_param.inputs[3]};
677 OperandIndexSequence outputs{init_param.outputs[0]};
679 // Each input should be interpreted as follows:
681 // 1 -> A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the starts of
682 // the dimensions of the input tensor to be sliced. The length must be
684 // 2 -> A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the ends of
685 // the dimensions of the input tensor to be sliced. The length must be
687 // 3 -> A 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32}, the strides of
688 // the dimensions of the input tensor to be sliced. The length must be
690 // 4 -> An {@link ANEURALNETWORKS_INT32} scalar, begin_mask. If the ith bit
691 // of begin_mask is set, begin[i] is ignored and the fullest possible
692 // range in that dimension is used instead.
693 // 5 -> An {@link ANEURALNETWORKS_INT32} scalar, end_mask. If the ith bit of
694 // end_mask is set, end[i] is ignored and the fullest possible range in
695 // that dimension is used instead.
696 // 6 -> An {@link ANEURALNETWORKS_INT32} scalar, shrink_axis_mask. An int32
697 // mask. If the ith bit of shrink_axis_mask is set, it implies that the
698 // ith specification shrinks the dimensionality by 1. A slice of size 1
699 // starting from begin[i] in the dimension must be preserved.
701 operation::StridedSlice::Param param;
703 param.begin_mask = operands.at(OperandIndex{init_param.inputs[4]}).asScalar<std::int32_t>();
704 param.end_mask = operands.at(OperandIndex{init_param.inputs[5]}).asScalar<std::int32_t>();
705 param.shrink_axis_mask =
706 operands.at(OperandIndex{init_param.inputs[6]}).asScalar<std::int32_t>();
708 return new operation::StridedSlice{inputs, outputs, param};
711 _map[ANEURALNETWORKS_TRANSPOSE] = [](const OperationFactory::Param &init_param,
712 Operands &operands) {
713 // TODO make this work with init_param.input_count == 1 (when permutation vector is optional)
716 // 0: An n-D tensor, specifying the tensor to be transposed.
717 // 1: An optional 1-D Tensor of {@link ANEURALNETWORKS_TENSOR_INT32},
718 // the permutation of the dimensions of the input tensor.
719 // The returned tensor's dimension i corresponds to the input dimension
720 // perm[i]. If perm is not given, it is set to (n-1...0), where n is the
721 // rank of the input tensor. Hence by default, this operation performs a
722 // regular matrix transpose on 2-D input Tensors.
723 assert(init_param.input_count == 2);
724 assert(init_param.output_count == 1);
726 OperandIndexSequence inputs{init_param.inputs[0]};
727 OperandIndexSequence outputs{init_param.outputs[0]};
728 std::vector<std::int32_t> perm =
729 operands.at(OperandIndex{init_param.inputs[1]}).asVector<std::int32_t>();
731 operation::Transpose::Param param;
732 param.perm.assign(perm.cbegin(), perm.cend());
734 return new operation::Transpose{inputs, outputs, param};
737 _map[ANEURALNETWORKS_MUL] =
738 getBinaryArithmeticGenerator(onert::ir::operation::BinaryArithmetic::ArithmeticType::MUL);
740 _map[ANEURALNETWORKS_SQUEEZE] = [](const OperationFactory::Param &init_param,
741 Operands &operands) {
742 assert(init_param.input_count == 1 || init_param.input_count == 2);
743 assert(init_param.output_count == 1);
745 OperandIndexSequence outputs{init_param.outputs[0]};
747 // Each input should be interpreted as follows:
749 // 0 -> An n-D tensor, the tensor to be squeezed.
750 // 1 -> An optional 1-D tensor of ANEURALNETWORKS_TENSOR_INT32. The dimensions to squeeze.
751 // If specified only squeezes the dimensions listed. Otherwise, squeezes all dimensions.
752 // The dimension index starts at 0. An error must be reported if squeezing a dimension that
755 // Add mandatory input index
756 OperandIndexSequence inputs{init_param.inputs[0]};
758 // Add dims index if specified
759 operation::Squeeze::Param param{};
760 if (init_param.input_count == 2)
762 auto squeeze_dims_idx = OperandIndex{init_param.inputs[1]};
763 assert(operands.at(squeeze_dims_idx).shape().rank() == 1);
764 assert(operands.at(squeeze_dims_idx).shape().dim(0) >= 0);
765 assert(static_cast<uint32_t>(operands.at(squeeze_dims_idx).shape().dim(0)) <=
767 param.ndim = operands.at(squeeze_dims_idx).shape().dim(0);
770 assert(operands.at(squeeze_dims_idx).data());
771 memcpy(param.dims, operands.at(squeeze_dims_idx).data()->base(),
772 param.ndim * sizeof(param.dims[0]));
776 return new operation::Squeeze{inputs, outputs, param};
779 _map[ANEURALNETWORKS_TANH] = getElementwiseActivationGenerator(
780 onert::ir::operation::ElementwiseActivation::Type::TANH, 1.f, 1.f);
782 _map[ANEURALNETWORKS_LOG] = getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::LOG);
784 _map[ANEURALNETWORKS_LOGISTIC] = getElementwiseActivationGenerator(
785 onert::ir::operation::ElementwiseActivation::Type::LOGISTIC);
787 _map[ANEURALNETWORKS_DIV] =
788 getBinaryArithmeticGenerator(onert::ir::operation::BinaryArithmetic::ArithmeticType::DIV);
790 _map[ANEURALNETWORKS_EXP] = getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::EXP);
792 // ANEURALNETWORKS_EXP_EX is deprecated
793 // TODO Remove ANEURALNETWORKS_EXP_EX
794 _map[ANEURALNETWORKS_EXP_EX] = _map[ANEURALNETWORKS_EXP];
796 // Each input should be interpreted as follows:
797 // 0 -> Input Tensor Index
798 // 1 -> Axis Tensor Index
799 _map[ANEURALNETWORKS_EXPAND_DIMS] = createSimpleBinaryOp<operation::ExpandDims>;
801 _map[ANEURALNETWORKS_GREATER] =
802 getComparisonGenerator(operation::Comparison::ComparisonType::Greater);
803 _map[ANEURALNETWORKS_GREATER_EQUAL] =
804 getComparisonGenerator(operation::Comparison::ComparisonType::GreaterEqual);
805 _map[ANEURALNETWORKS_LESS] = getComparisonGenerator(operation::Comparison::ComparisonType::Less);
806 _map[ANEURALNETWORKS_LESS_EQUAL] =
807 getComparisonGenerator(operation::Comparison::ComparisonType::LessEqual);
808 _map[ANEURALNETWORKS_NOT_EQUAL] =
809 getComparisonGenerator(operation::Comparison::ComparisonType::NotEqual);
810 _map[ANEURALNETWORKS_EQUAL] =
811 getComparisonGenerator(operation::Comparison::ComparisonType::Equal);
813 // ANEURALNETWORKS_GREATER_EQUAL_EX is deprecated
814 // TODO Remove ANEURALNETWORKS_GREATER_EQUAL_EX
815 _map[ANEURALNETWORKS_GREATER_EQUAL_EX] = [](const OperationFactory::Param &init_param,
816 Operands &operands) {
817 assert(init_param.input_count == 2 && init_param.output_count == 1);
819 OperandIndexSequence outputs{init_param.outputs[0]};
821 // Each input should be interpreted as follows:
823 // 0 -> input0 Tensor Index
824 // 1 -> input1 Tensor Index
825 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
827 operation::Comparison::Param param;
828 param.comparison_type = operation::Comparison::ComparisonType::GreaterEqual;
830 // Output operand type must be boolean
831 replaceDataType(operands, outputs.at(0), DataType::BOOL8);
833 return new operation::Comparison{inputs, outputs, param};
836 // ANEURALNETWORKS_LESS_EX is deprecated
837 // TODO Remove ANEURALNETWORKS_LESS_EX
838 _map[ANEURALNETWORKS_LESS_EX] = [](const OperationFactory::Param &init_param,
839 Operands &operands) {
840 assert(init_param.input_count == 2 && init_param.output_count == 1);
842 OperandIndexSequence outputs{init_param.outputs[0]};
844 // Each input should be interpreted as follows:
846 // 0 -> input0 Tensor Index
847 // 1 -> input1 Tensor Index
848 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
850 operation::Comparison::Param param;
851 param.comparison_type = operation::Comparison::ComparisonType::Less;
853 // Output operand type must be boolean
854 replaceDataType(operands, outputs.at(0), DataType::BOOL8);
856 return new operation::Comparison{inputs, outputs, param};
859 _map[ANEURALNETWORKS_REDUCE_ALL] =
860 getReduceGenerator(onert::ir::operation::Reduce::ReduceType::ALL);
862 _map[ANEURALNETWORKS_REDUCE_ANY] =
863 getReduceGenerator(onert::ir::operation::Reduce::ReduceType::ANY);
865 _map[ANEURALNETWORKS_REDUCE_MAX] =
866 getReduceGenerator(onert::ir::operation::Reduce::ReduceType::MAX);
868 // ANEURALNETWORKS_REDUCE_MAX_EX is deprecated
869 // TODO Remove ANEURALNETWORKS_REDUCE_MAX_EX
870 _map[ANEURALNETWORKS_REDUCE_MAX_EX] = _map[ANEURALNETWORKS_REDUCE_MAX];
872 // ANEURALNETWORKS_NOT_EQUAL_EX is deprecated
873 // TODO Remove ANEURALNETWORKS_NOT_EQUAL_EX
874 _map[ANEURALNETWORKS_NOT_EQUAL_EX] = [](const OperationFactory::Param &init_param,
875 Operands &operands) {
876 assert(init_param.input_count == 2 && init_param.output_count == 1);
878 OperandIndexSequence outputs{init_param.outputs[0]};
880 // Each input should be interpreted as follows:
882 // 0 -> input1 Tensor Index
883 // 1 -> input2 Tensor Index
884 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
886 operation::Comparison::Param param;
887 param.comparison_type = operation::Comparison::ComparisonType::NotEqual;
889 // Output operand type must be boolean
890 replaceDataType(operands, outputs.at(0), DataType::BOOL8);
892 return new operation::Comparison{inputs, outputs, param};
895 _map[ANEURALNETWORKS_LOGICAL_AND] = getElementwiseBinaryGenerator(
896 operation::ElementwiseBinary::ElementwiseBinaryType::LOGICAL_AND);
898 // ANEURALNETWORKS_LOGICAL_AND_EX is deprecated
899 // TODO Remove ANEURALNETWORKS_LOGICAL_AND_EX
900 _map[ANEURALNETWORKS_LOGICAL_AND_EX] = [](const OperationFactory::Param &init_param,
901 Operands &operands) {
902 assert(init_param.input_count == 2 && init_param.output_count == 1);
904 OperandIndexSequence outputs{init_param.outputs[0]};
906 // Each input should be interpreted as follows:
908 // 0 -> input0 Tensor Index
909 // 1 -> input1 Tensor Index
910 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
912 // This operation's operands must be boolean type.
913 replaceDataType(operands, inputs.at(0), DataType::BOOL8);
914 replaceDataType(operands, inputs.at(1), DataType::BOOL8);
915 replaceDataType(operands, outputs.at(0), DataType::BOOL8);
917 operation::ElementwiseBinary::Param param;
918 param.op_type = operation::ElementwiseBinary::ElementwiseBinaryType::LOGICAL_AND;
920 return new operation::ElementwiseBinary{inputs, outputs, param};
923 _map[ANEURALNETWORKS_RSQRT] =
924 getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::RSQRT);
926 _map[ANEURALNETWORKS_SELECT] = [](const OperationFactory::Param &init_param, Operands &) {
927 assert(init_param.input_count == 3 && init_param.output_count == 1);
929 OperandIndexSequence outputs{init_param.outputs[0]};
931 // Each input should be interpreted as follows:
933 // 0 -> Condition Tensor Index
934 // 1 -> Input X(true) Tensor Index
935 // 2 -> Input Y(false) Tensor Index
936 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
938 return new operation::Select{inputs, outputs};
941 _map[ANEURALNETWORKS_SELECT_V2_EX] = [](const OperationFactory::Param &init_param, Operands &) {
942 assert(init_param.input_count == 3 && init_param.output_count == 1);
944 OperandIndexSequence outputs{init_param.outputs[0]};
946 // Each input should be interpreted as follows:
948 // 0 -> Condition Tensor Index
949 // 1 -> Input X(true) Tensor Index
950 // 2 -> Input Y(false) Tensor Index
951 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
953 return new operation::Select{inputs, outputs};
956 // ANEURALNETWORKS_RSQRT_EX is deprecated
957 // TODO Remove ANEURALNETWORKS_RSQRT_EX
958 _map[ANEURALNETWORKS_RSQRT_EX] = _map[ANEURALNETWORKS_RSQRT];
960 _map[ANEURALNETWORKS_RELU] =
961 getElementwiseActivationGenerator(onert::ir::operation::ElementwiseActivation::Type::RELU,
962 onert::ir::operation::ElementwiseActivation::infinity, 0);
964 _map[ANEURALNETWORKS_RESIZE_BILINEAR] = [](const OperationFactory::Param &init_param,
965 Operands &operands) {
966 assert(init_param.input_count == 3 && init_param.output_count == 1);
968 OperandIndexSequence outputs{init_param.outputs[0]};
970 // Each input should be interpreted as follows:
975 OperandIndexSequence inputs{init_param.inputs[0]};
977 operation::ResizeBilinear::Param param;
978 param.height_out = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<int32_t>();
979 param.width_out = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<int32_t>();
980 param.align_corners = false;
981 param.half_pixel_centers = false;
982 return new operation::ResizeBilinear{inputs, outputs, param};
985 _map[ANEURALNETWORKS_RELU1] = getElementwiseActivationGenerator(
986 onert::ir::operation::ElementwiseActivation::Type::RELU, 1.f, -1.f);
988 _map[ANEURALNETWORKS_RELU6] = getElementwiseActivationGenerator(
989 onert::ir::operation::ElementwiseActivation::Type::RELU, 6.f, 0.f);
991 _map[ANEURALNETWORKS_REVERSE_EX] = [](const OperationFactory::Param &init_param, Operands &) {
992 assert(init_param.input_count == 2 && init_param.output_count == 1);
994 // Each input should be interpreted as follows:
996 // 0 -> Input Tensor Index
997 // 1 -> Axis Tensor Index
999 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
1000 OperandIndexSequence outputs{init_param.outputs[0]};
1002 return new operation::Reverse{inputs, outputs};
1005 _map[ANEURALNETWORKS_RNN] = [](const OperationFactory::Param &init_param, Operands &operands) {
1006 assert(init_param.input_count == 6 && init_param.output_count == 2);
1008 // Each input should be interpreted as follows:
1010 // 0 -> Input Tensor Index
1011 // 1 -> Weights Tensor Index
1012 // 2 -> Recurrent Weights Tensor Index
1013 // 3 -> Bias Tensor Index
1014 // 4 -> Hidden state (in) Index
1015 // 5 -> Activation Index
1017 OperandIndexSequence inputs;
1018 for (uint32_t n = 0; n < init_param.input_count - 1; ++n)
1020 inputs.append(OperandIndex{init_param.inputs[n]});
1022 OperandIndexSequence outputs;
1023 for (uint32_t n = 0; n < init_param.output_count; ++n)
1025 outputs.append(OperandIndex{init_param.outputs[n]});
1028 operation::RNN::Param param;
1029 const auto activation_index = OperandIndex{init_param.inputs[5]};
1031 NNAPIConvert::getFusedActivation(operands.at(activation_index).asScalar<FuseCode>());
1033 return new operation::RNN{inputs, outputs, param};
1036 _map[ANEURALNETWORKS_FLOOR] =
1037 getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::FLOOR);
1039 _map[ANEURALNETWORKS_SPACE_TO_BATCH_ND] = [](const OperationFactory::Param &init_param,
1041 assert(init_param.input_count == 3 && init_param.output_count == 1);
1043 OperandIndexSequence outputs{init_param.outputs[0]};
1045 // Each input should be interpreted as follows:
1047 // 0 -> Input Tensor Index
1048 // 1 -> Block size Index
1049 // 2 -> Paddings Index
1050 OperandIndexSequence inputs;
1051 for (uint32_t n = 0; n < init_param.input_count; ++n)
1053 inputs.append(OperandIndex{init_param.inputs[n]});
1056 return new operation::SpaceToBatchND{inputs, outputs};
1059 _map[ANEURALNETWORKS_SPACE_TO_DEPTH] = [](const OperationFactory::Param &init_param,
1060 Operands &operands) {
1061 assert(init_param.input_count == 2 && init_param.output_count == 1);
1063 OperandIndexSequence outputs{init_param.outputs[0]};
1065 // Each input should be interpreted as follows:
1067 // 0 -> Input Tensor Index
1068 // 1 -> Block size Index
1069 OperandIndexSequence inputs{init_param.inputs[0]};
1071 operation::SpaceToDepth::Param param;
1072 param.block_size = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>();
1074 return new operation::SpaceToDepth{inputs, outputs, param};
1077 _map[ANEURALNETWORKS_L2_POOL_2D] = getPool2DGenerator(operation::Pool2D::PoolType::L2);
1079 _map[ANEURALNETWORKS_EMBEDDING_LOOKUP] = [](const OperationFactory::Param &init_param,
1081 assert(init_param.input_count == 2 && init_param.output_count == 1);
1083 OperandIndexSequence outputs{init_param.outputs[0]};
1085 // Each input should be interpreted as follows:
1087 // 0 -> Lookups Index
1088 // 1 -> Values Index
1089 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
1091 return new operation::EmbeddingLookup{inputs, outputs};
1094 _map[ANEURALNETWORKS_L2_NORMALIZATION] = [](const OperationFactory::Param &init_param,
1096 assert(init_param.input_count == 1 && init_param.output_count == 1);
1098 OperandIndexSequence outputs{init_param.outputs[0]};
1100 // Each input should be interpreted as follows:
1101 // 0 -> input Tensor Index
1102 OperandIndexSequence inputs{init_param.inputs[0]};
1104 return new operation::L2Normalization{inputs, outputs};
1107 _map[ANEURALNETWORKS_HASHTABLE_LOOKUP] = [](const OperationFactory::Param &init_param,
1109 assert(init_param.input_count == 3 && init_param.output_count == 2);
1111 // Each output should be interpreted as follows:
1113 // 0 -> Output Index
1115 OperandIndexSequence outputs{init_param.outputs[0], init_param.outputs[1]};
1117 // Each input should be interpreted as follows:
1119 // 0 -> Lookups Index
1121 // 2 -> Values Index
1122 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
1124 return new operation::HashtableLookup{inputs, outputs};
1127 _map[ANEURALNETWORKS_PRELU] = [](const OperationFactory::Param &init_param, Operands &) {
1128 assert(init_param.input_count == 2 && init_param.output_count == 1);
1130 OperandIndexSequence outputs{init_param.outputs[0]};
1132 // Each input should be interpreted as follows:
1134 // 0 -> input Tensor Index
1135 // 1 -> alpha Tensor Index
1136 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
1138 return new operation::PReLU{inputs, outputs};
1141 // ANEURALNETWORKS_PRELU_EX is deprecated
1142 // TODO Remove ANEURALNETWORKS_PRELU_EX
1143 _map[ANEURALNETWORKS_PRELU_EX] = _map[ANEURALNETWORKS_PRELU];
1145 _map[ANEURALNETWORKS_TRANSPOSE_CONV_EX] = [](const OperationFactory::Param &init_param,
1146 Operands &operands) {
1147 assert(init_param.input_count == 6 && init_param.output_count == 1);
1149 OperandIndexSequence outputs{init_param.outputs[0]};
1151 // Each input should be interpreted as follows:
1153 // 0 -> Output Shape Index
1154 // 1 -> Weights Index
1155 // 2 -> Input Tensor Index
1156 // 3 -> Padding Type
1157 // 4 -> Stride width
1158 // 5 -> Stride height
1160 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
1162 operation::TransposeConv::Param param;
1164 const auto padding_index = OperandIndex{init_param.inputs[3]};
1165 const auto hstride_index = OperandIndex{init_param.inputs[4]};
1166 const auto vstride_index = OperandIndex{init_param.inputs[5]};
1168 param.padding.type =
1169 NNAPIConvert::getPaddingType(operands.at(padding_index).asScalar<PaddingCode>());
1170 param.stride = makeStride(operands, hstride_index, vstride_index);
1172 return new operation::TransposeConv{inputs, outputs, param};
1175 _map[ANEURALNETWORKS_SQRT] =
1176 getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::SQRT);
1178 // ANEURALNETWORKS_SQRT_EX is deprecated
1179 // TODO Remove ANEURALNETWORKS_SQRT_EX
1180 _map[ANEURALNETWORKS_SQRT_EX] = _map[ANEURALNETWORKS_SQRT];
1182 _map[ANEURALNETWORKS_LOGICAL_OR] = getElementwiseBinaryGenerator(
1183 operation::ElementwiseBinary::ElementwiseBinaryType::LOGICAL_OR);
1185 // ANEURALNETWORKS_LOGICAL_OR_EX is deprecated
1186 // TODO Remove ANEURALNETWORKS_LOGICAL_OR_EX
1187 _map[ANEURALNETWORKS_LOGICAL_OR_EX] = [](const OperationFactory::Param &init_param,
1188 Operands &operands) {
1189 assert(init_param.input_count == 2 && init_param.output_count == 1);
1191 OperandIndexSequence outputs{init_param.outputs[0]};
1193 // Each input should be interpreted as follows:
1195 // 0 -> input0 Tensor Index
1196 // 1 -> input1 Tensor Index
1197 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
1199 // This operation's operands must be boolean type.
1200 replaceDataType(operands, inputs.at(0), DataType::BOOL8);
1201 replaceDataType(operands, inputs.at(1), DataType::BOOL8);
1202 replaceDataType(operands, outputs.at(0), DataType::BOOL8);
1204 operation::ElementwiseBinary::Param param;
1205 param.op_type = operation::ElementwiseBinary::ElementwiseBinaryType::LOGICAL_OR;
1207 return new operation::ElementwiseBinary{inputs, outputs, param};
1210 _map[ANEURALNETWORKS_LOGICAL_NOT] =
1211 getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::LOGICAL_NOT);
1213 // ANEURALNETWORKS_LOGICAL_NOT_EX is deprecated
1214 // TODO Remove ANEURALNETWORKS_LOGICAL_NOT_EX
1215 _map[ANEURALNETWORKS_LOGICAL_NOT_EX] = [](const OperationFactory::Param &init_param,
1216 Operands &operands) {
1217 assert(init_param.input_count == 1 && init_param.output_count == 1);
1219 OperandIndexSequence outputs{init_param.outputs[0]};
1221 // Each input should be interpreted as follows:
1223 // 0 -> input Tensor Index
1224 OperandIndexSequence inputs{init_param.inputs[0]};
1226 // This operation's operands must be boolean type.
1227 replaceDataType(operands, inputs.at(0), DataType::BOOL8);
1228 replaceDataType(operands, outputs.at(0), DataType::BOOL8);
1230 operation::ElementwiseUnary::Param param;
1231 param.op_type = operation::ElementwiseUnary::Type::LOGICAL_NOT;
1233 return new operation::ElementwiseUnary{inputs, outputs, param};
1236 _map[ANEURALNETWORKS_LSTM] = [](const OperationFactory::Param &init_param, Operands &operands) {
1237 assert(init_param.input_count == 23 && init_param.output_count == 4);
1239 // Each input should be interpreted as follows:
1241 // 0 -> Input Tensor Index
1242 // 1 -> Input to Input Tensor Index
1243 // 2 -> Input to Forget Tensor Index
1244 // 3 -> Input to Cell Tensor Index
1245 // 4 -> Input to Output Tensor Index
1246 // 5 -> Recurrent to Input Weights Tensor Index
1247 // 6 -> Recurrent to Forget Weights Tensor Index
1248 // 7 -> Recurrent to Cell Weights Tensor Index
1249 // 8 -> Recurrent to Output Weights Tensor Index
1250 // 9 -> Cell to Input Weights Tensor Index
1251 // 10 -> Cell to Forget Weights Tensor Index
1252 // 11 -> Cell to Output Weights Tensor Index
1253 // 12 -> Input Gate Bias Tensor Index
1254 // 13 -> Forget Gate Bias Tensor Index
1255 // 14 -> Cell Bias Tensor Index
1256 // 15 -> Output Gate Bias Tensor Index
1257 // 16 -> Projection Weights Tensor Index
1258 // 17 -> Projection Bias Tensor Index
1259 // 18 -> Output State In Tensor Index
1260 // 19 -> Cell State In Tensor Index
1261 OperandIndexSequence inputs;
1262 for (uint32_t n = 0; n < init_param.input_count - 3; ++n)
1264 inputs.append(OperandIndex{init_param.inputs[n]});
1267 // Each output should be interpreted as follows:
1269 // 0 -> Scratch Buffer Tensor Index
1270 // 1 -> Output State Out Tensor Index
1271 // 2 -> Cell State Out Tensor Index
1272 // 3 -> Output Tensor Index
1273 OperandIndexSequence outputs;
1274 for (uint32_t n = 0; n < init_param.output_count; ++n)
1276 outputs.append(OperandIndex{init_param.outputs[n]});
1279 operation::LSTM::Param param;
1280 const auto activation_index = OperandIndex{init_param.inputs[20]};
1281 switch (operands.at(activation_index).asScalar<int32_t>())
1284 param.activation = Activation::NONE;
1287 param.activation = Activation::RELU;
1290 param.activation = Activation::RELU1;
1293 param.activation = Activation::RELU6;
1296 param.activation = Activation::TANH;
1299 param.activation = Activation::SIGMOID;
1302 throw std::runtime_error("Unsupported activation type");
1305 param.cell_threshold = operands.at(OperandIndex{init_param.inputs[21]}).asScalar<float>();
1306 param.projection_threshold = operands.at(OperandIndex{init_param.inputs[22]}).asScalar<float>();
1308 return new operation::LSTM{inputs, outputs, param};
1311 // ANEURALNETWORKS_EQUAL_EX is deprecated
1312 // TODO Remove ANEURALNETWORKS_EQUAL_EX
1313 _map[ANEURALNETWORKS_EQUAL_EX] = [](const OperationFactory::Param &init_param,
1314 Operands &operands) {
1315 assert(init_param.input_count == 2 && init_param.output_count == 1);
1317 OperandIndexSequence outputs{init_param.outputs[0]};
1319 // Each input should be interpreted as follows:
1321 // 0 -> input0 Tensor Index
1322 // 1 -> input1 Tensor Index
1323 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
1325 operation::Comparison::Param param;
1326 param.comparison_type = operation::Comparison::ComparisonType::Equal;
1328 // Output operand type must be boolean
1329 replaceDataType(operands, outputs.at(0), DataType::BOOL8);
1331 return new operation::Comparison{inputs, outputs, param};
1334 _map[ANEURALNETWORKS_SQUARED_DIFFERENCE_EX] = [](const OperationFactory::Param &init_param,
1336 assert(init_param.input_count == 2 && init_param.output_count == 1);
1338 OperandIndexSequence outputs{init_param.outputs[0]};
1340 // Each input should be interpreted as follows:
1342 // 0 -> LHS Tensor Index
1343 // 1 -> RHS Tensor Index
1344 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
1346 return new operation::SquaredDifference{inputs, outputs};
1349 _map[ANEURALNETWORKS_TOPK_V2] = [](const OperationFactory::Param &init_param,
1350 Operands &operands) {
1351 assert(init_param.input_count == 2 && init_param.output_count == 2);
1353 // Each output should be interpreted as follows:
1355 // 0 -> Index for Output Values
1356 // 1 -> Index for Output Indices
1357 OperandIndexSequence outputs{init_param.outputs[0], init_param.outputs[1]};
1359 // Each input should be interpreted as follows:
1361 // 0 -> Index for Input Data
1363 OperandIndexSequence inputs{init_param.inputs[0]};
1365 operation::TopKV2::Param param;
1366 param.k = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>();
1368 return new operation::TopKV2{inputs, outputs, param};
1371 // ANEURALNETWORKS_CAST_EX is deprecated
1372 // TODO Remove ANEURALNETWORKS_CAST_EX
1373 _map[ANEURALNETWORKS_TOPK_V2_EX] = _map[ANEURALNETWORKS_TOPK_V2];
1375 _map[ANEURALNETWORKS_GATHER] = [](const OperationFactory::Param &init_param, Operands &operands) {
1376 assert(init_param.input_count == 3 && init_param.output_count == 1);
1378 OperandIndexSequence outputs{init_param.outputs[0]};
1380 // Each input should be interpreted as follows:
1382 // 0 -> input Tensor Index
1384 // 2 -> indices Tensor Index
1385 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[2]};
1387 operation::Gather::Param param;
1388 param.axis = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<int32_t>();
1390 return new operation::Gather{inputs, outputs, param};
1393 // ANEURALNETWORKS_GATHER_EX is deprecated
1394 // TODO Remove ANEURALNETWORKS_GATHER_EX
1395 _map[ANEURALNETWORKS_GATHER_EX] = _map[ANEURALNETWORKS_GATHER];
1397 _map[ANEURALNETWORKS_NEG] = getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::NEG);
1399 // ANEURALNETWORKS_NEG_EX is deprecated
1400 // TODO Remove ANEURALNETWORKS_NEG_EX
1401 _map[ANEURALNETWORKS_NEG_EX] = _map[ANEURALNETWORKS_NEG];
1403 _map[ANEURALNETWORKS_ABS] = getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::ABS);
1405 // ANEURALNETWORKS_ABS_EX is deprecated
1406 // TODO Remove ANEURALNETWORKS_ABS_EX
1407 _map[ANEURALNETWORKS_ABS_EX] = _map[ANEURALNETWORKS_ABS];
1409 _map[ANEURALNETWORKS_ARGMAX] = [](const OperationFactory::Param &init_param, Operands &operands) {
1410 assert(init_param.input_count == 2 && init_param.output_count == 1);
1412 OperandIndexSequence outputs{init_param.outputs[0]};
1414 // Each input should be interpreted as follows:
1416 // 0 -> Input Tensor Index
1417 // 1 -> Axis Tensor Index
1418 OperandIndexSequence inputs{init_param.inputs[0]};
1420 operation::ArgMax::Param param;
1421 param.axis = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>();
1422 // NNAPI ARGMAX output type is always int32
1423 param.output_type = DataType::INT32;
1425 return new operation::ArgMax{inputs, outputs, param};
1428 // ANEURALNETWORKS_ARGMAX_EX is deprecated
1429 // TODO Remove ANEURALNETWORKS_ARGMAX_EX
1430 _map[ANEURALNETWORKS_ARGMAX_EX] = _map[ANEURALNETWORKS_ARGMAX];
1432 _map[ANEURALNETWORKS_DEQUANTIZE] =
1433 getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::DEQUANTIZE);
1435 _map[ANEURALNETWORKS_MEAN] = [](const OperationFactory::Param &init_param, Operands &operands) {
1436 assert(init_param.input_count == 3 && init_param.output_count == 1);
1438 OperandIndexSequence outputs{init_param.outputs[0]};
1440 // Each input should be interpreted as follows:
1442 // 0 -> ifm Tensor Index
1443 // 1 -> axis Tensor Index
1444 // 2 -> keep_dims Index
1445 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
1447 operation::Reduce::Param param;
1448 param.reduce_type = operation::Reduce::ReduceType::MEAN;
1449 param.keep_dims = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<int32_t>() != 0;
1451 return new operation::Reduce{inputs, outputs, param};
1454 _map[ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION] = [](const OperationFactory::Param &init_param,
1455 Operands &operands) {
1456 assert(init_param.input_count == 5 && init_param.output_count == 1);
1458 OperandIndexSequence outputs{init_param.outputs[0]};
1460 OperandIndexSequence inputs{init_param.inputs[0]};
1462 operation::LocalResponseNormalization::Param param;
1463 param.radius = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>();
1464 param.bias = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<float>();
1465 param.alpha = operands.at(OperandIndex{init_param.inputs[3]}).asScalar<float>();
1466 param.beta = operands.at(OperandIndex{init_param.inputs[4]}).asScalar<float>();
1468 return new operation::LocalResponseNormalization{inputs, outputs, param};
1471 _map[ANEURALNETWORKS_DEPTH_TO_SPACE] = [](const OperationFactory::Param &init_param,
1472 Operands &operands) {
1473 assert(init_param.input_count == 2 && init_param.output_count == 1);
1475 OperandIndexSequence outputs{init_param.outputs[0]};
1477 // Each input should be interpreted as follows:
1479 // 0 -> Input Tensor Index
1480 // 1 -> Block size Index
1481 OperandIndexSequence inputs{init_param.inputs[0]};
1483 operation::DepthToSpace::Param param;
1484 param.block_size = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>();
1486 return new operation::DepthToSpace{inputs, outputs, param};
1489 _map[ANEURALNETWORKS_PACK_EX] = [](const OperationFactory::Param &init_param,
1490 Operands &operands) {
1491 assert(init_param.input_count >= 3 && init_param.output_count == 1);
1493 OperandIndexSequence outputs{init_param.outputs[0]};
1494 OperandIndexSequence inputs;
1495 for (uint32_t n = 0; n < init_param.input_count - 2; ++n)
1497 inputs.append(OperandIndex{init_param.inputs[n]});
1500 operation::Pack::Param param;
1501 const auto num_index = OperandIndex{init_param.inputs[init_param.input_count - 2]};
1502 const auto axis_index = OperandIndex{init_param.inputs[init_param.input_count - 1]};
1503 param.num = operands.at(num_index).asScalar<int32_t>();
1504 param.axis = operands.at(axis_index).asScalar<int32_t>();
1506 return new operation::Pack{inputs, outputs, param};
1509 _map[ANEURALNETWORKS_REDUCE_MIN] =
1510 getReduceGenerator(onert::ir::operation::Reduce::ReduceType::MIN);
1512 // ANEURALNETWORKS_REDUCE_MIN_EX is deprecated
1513 // TODO Remove ANEURALNETWORKS_REDUCE_MIN_EX
1514 _map[ANEURALNETWORKS_REDUCE_MIN_EX] = _map[ANEURALNETWORKS_REDUCE_MIN];
1516 _map[ANEURALNETWORKS_SPLIT] = [](const OperationFactory::Param &init_param, Operands &operands) {
1517 assert(init_param.input_count == 3);
1518 assert(init_param.output_count >= 1); // At least one output tensor and axis
1520 OperandIndexSequence inputs{init_param.inputs[0]};
1521 OperandIndexSequence outputs;
1522 for (uint32_t n = 0; n < init_param.output_count; ++n)
1524 outputs.append(OperandIndex{init_param.outputs[n]});
1527 operation::Split::Param param;
1528 param.axis = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>();
1529 param.num_splits = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<std::int32_t>();
1531 return new operation::Split{inputs, outputs, param};
1534 _map[ANEURALNETWORKS_SPLIT_V_EX] = [](const OperationFactory::Param &init_param,
1535 Operands &operands) {
1536 assert(init_param.input_count == 4);
1537 assert(init_param.output_count >= 1); // At least one output tensor and axis
1539 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
1540 OperandIndexSequence outputs;
1541 for (uint32_t n = 0; n < init_param.output_count; ++n)
1543 outputs.append(OperandIndex{init_param.outputs[n]});
1546 operation::SplitV::Param param;
1547 param.num_splits = operands.at(OperandIndex{init_param.inputs[3]}).asScalar<std::int32_t>();
1548 return new operation::SplitV{inputs, outputs, param};
1551 // ANEURALNETWORKS_SPLIT_EX is deprecated
1552 // TODO Remove ANEURALNETWORKS_SPLIT_EX
1553 _map[ANEURALNETWORKS_SPLIT_EX] = _map[ANEURALNETWORKS_SPLIT];
1555 _map[ANEURALNETWORKS_UNPACK_EX] = [](const OperationFactory::Param &init_param,
1556 Operands &operands) {
1557 assert(init_param.input_count == 3 && init_param.output_count >= 1);
1559 OperandIndexSequence inputs{init_param.inputs[0]};
1560 OperandIndexSequence outputs;
1561 for (uint32_t n = 0; n < init_param.output_count; ++n)
1563 outputs.append(OperandIndex{init_param.outputs[n]});
1566 operation::Unpack::Param param;
1567 const auto num_index = OperandIndex{init_param.inputs[1]};
1568 const auto axis_index = OperandIndex{init_param.inputs[2]};
1569 param.num = operands.at(num_index).asScalar<int32_t>();
1570 param.axis = operands.at(axis_index).asScalar<int32_t>();
1572 return new operation::Unpack{inputs, outputs, param};
1575 _map[ANEURALNETWORKS_PAD] = [](const OperationFactory::Param &init_param, Operands &) {
1576 assert(init_param.input_count >= 2 && init_param.input_count <= 3 &&
1577 init_param.output_count >= 1);
1579 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
1580 if (init_param.input_count == 3)
1582 inputs.append(OperandIndex{init_param.inputs[2]});
1584 OperandIndexSequence outputs{init_param.outputs[0]};
1586 return new operation::Pad{inputs, outputs};
1589 _map[ANEURALNETWORKS_PAD_V2] = _map[ANEURALNETWORKS_PAD];
1591 _map[ANEURALNETWORKS_MINIMUM] =
1592 getElementwiseBinaryGenerator(operation::ElementwiseBinary::ElementwiseBinaryType::MIN);
1594 _map[ANEURALNETWORKS_MAXIMUM] =
1595 getElementwiseBinaryGenerator(operation::ElementwiseBinary::ElementwiseBinaryType::MAX);
1597 _map[ANEURALNETWORKS_ONE_HOT_EX] = [](const OperationFactory::Param &init_param,
1598 Operands &operands) {
1599 assert(init_param.input_count == 5);
1600 assert(init_param.output_count == 1);
1601 // Each input should be interpreted as follows:
1603 // 0 -> indices tensor
1604 // 1 -> depth tensor
1605 // 2 -> on_value tensor
1606 // 3 -> off_value tensor
1608 OperandIndexSequence inputs;
1609 for (uint32_t n = 0; n < init_param.input_count - 1; ++n)
1611 inputs.append(OperandIndex{init_param.inputs[n]});
1613 OperandIndexSequence outputs{init_param.outputs[0]};
1615 operation::OneHot::Param param;
1616 param.axis = operands.at(OperandIndex{init_param.inputs[4]}).asScalar<std::int32_t>();
1618 return new operation::OneHot{inputs, outputs, param};
1621 _map[ANEURALNETWORKS_COS_EX] =
1622 getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::COS);
1624 _map[ANEURALNETWORKS_SIN] = getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::SIN);
1626 _map[ANEURALNETWORKS_SHAPE_EX] = [](const OperationFactory::Param &init_param, Operands &) {
1627 assert(init_param.input_count == 1 && init_param.output_count == 1);
1629 OperandIndexSequence inputs{init_param.inputs[0]};
1630 OperandIndexSequence outputs{init_param.outputs[0]};
1632 return new operation::Shape{inputs, outputs};
1635 _map[ANEURALNETWORKS_REDUCE_PROD] =
1636 getReduceGenerator(onert::ir::operation::Reduce::ReduceType::PROD);
1638 _map[ANEURALNETWORKS_ROUND_EX] =
1639 getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::ROUND);
1641 _map[ANEURALNETWORKS_RANGE_EX] = [](const OperationFactory::Param &init_param, Operands &) {
1642 assert(init_param.input_count == 3 && init_param.output_count == 1);
1644 OperandIndexSequence outputs{init_param.outputs[0]};
1646 // Each input should be interpreted as follows:
1647 // 0 -> start Tensor Index
1648 // 1 -> limit Tensor Index
1649 // 2 -> delta Tensor Index
1651 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
1653 return new operation::Range{inputs, outputs};
1656 // Each input should be interpreted as follows:
1657 // 0 -> LHS Tensor Index
1658 // 1 -> RHS Tensor Index
1659 _map[ANEURALNETWORKS_POW] = createSimpleBinaryOp<operation::Pow>;
1661 // Each input should be interpreted as follows:
1662 // 0 -> A tensor, specifying the input.
1663 // 1 -> A 1-D tensor, specifying the value
1664 _map[ANEURALNETWORKS_FILL_EX] = createSimpleBinaryOp<operation::Fill>;
1666 _map[ANEURALNETWORKS_ZEROS_LIKE_EX] =
1667 getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::ZEROS_LIKE);
1668 // Each input should be interpreted as follows:
1669 // 0 -> Input Tensor Index
1670 // 1 -> Multiple Tensor Index
1671 _map[ANEURALNETWORKS_TILE] = createSimpleBinaryOp<operation::Tile>;
1673 _map[ANEURALNETWORKS_MATRIX_BAND_PART_EX] = [](const OperationFactory::Param &init_param,
1675 assert(init_param.input_count == 3);
1676 assert(init_param.output_count == 1);
1677 // Each input should be interpreted as follows:
1679 // 0 -> A tensor, input
1680 // 1 -> A 0-D tensor, number of lower diagnonals to keep
1681 // 2 -> A 0-D tensor, number of upper diagnonals to keep
1682 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
1683 OperandIndexSequence outputs{init_param.outputs[0]};
1685 return new operation::MatrixBandPart{inputs, outputs};
1688 _map[ANEURALNETWORKS_BATCH_MATMUL_EX] = [](const OperationFactory::Param &init_param,
1689 Operands &operands) {
1690 assert(init_param.input_count == 4 && init_param.output_count == 1);
1692 OperandIndexSequence outputs{init_param.outputs[0]};
1694 // Each input should be interpreted as follows:
1696 // 0 -> Lhs Tensor Index
1697 // 1 -> Rhs Tensor Index
1698 // 2 -> adj_x boolean scalar Index
1699 // 3 -> adj_y boolean scalar Index
1701 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
1703 operation::BatchMatMul::Param param;
1704 param.adj_x = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<bool>();
1705 param.adj_y = operands.at(OperandIndex{init_param.inputs[3]}).asScalar<bool>();
1707 return new operation::BatchMatMul{inputs, outputs, param};
1710 _map[ANEURALNETWORKS_EINSUM_EX] = [](const OperationFactory::Param &init_param,
1711 Operands &operands) {
1712 // Each input should be interpreted as follows:
1714 // 0....n - 1 -> n Input Tensors Index
1716 assert(init_param.input_count >= 1 && init_param.output_count == 1);
1718 OperandIndexSequence inputs;
1719 for (uint32_t n = 0; n < init_param.input_count - 1; ++n)
1721 inputs.append(OperandIndex{init_param.inputs[n]});
1723 OperandIndexSequence outputs{init_param.outputs[0]};
1725 operation::Einsum::Param param;
1726 const OperandIndex equation_index{init_param.inputs[init_param.input_count - 1]};
1727 std::vector<char> equation_vector = operands.at(equation_index).asVector<char>();
1728 param.equation = std::string(equation_vector.begin(), equation_vector.end());
1730 return new operation::Einsum{inputs, outputs, param};
1733 // 0 -> Input Tensor Index
1734 // 1 -> int32, int64, An 1-D int tensor Index
1735 _map[ANEURALNETWORKS_BROADCAST_TO_EX] = createSimpleBinaryOp<operation::BroadcastTo>;
1737 _map[ANEURALNETWORKS_STATELESS_RANDOM_UNIFORM_EX] = [](const OperationFactory::Param &init_param,
1739 assert(init_param.input_count == 2 && init_param.output_count == 1);
1740 OperandIndexSequence outputs{init_param.outputs[0]};
1742 // Each input should be interpreted as follows:
1744 // 0 -> Shape Tensor Index
1745 // 1 -> int32, int64, An 1-D int tensor Index
1747 OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
1749 return new operation::StatelessRandomUniform{inputs, outputs};
1752 _map[ANEURALNETWORKS_FUSED_BATCH_NORM_V3_EX] = [](const OperationFactory::Param &init_param,
1753 Operands &operands) {
1754 // Each input should be interpreted as follows:
1756 // 0....4 -> 5 Input Tensors Index
1757 // n-2 -> is_training
1758 // n-1 -> data_format
1761 assert(init_param.input_count == 8 && init_param.output_count == 1);
1763 OperandIndexSequence inputs;
1764 for (uint32_t n = 0; n < init_param.input_count - 3; ++n)
1766 inputs.append(OperandIndex{init_param.inputs[n]});
1768 OperandIndexSequence outputs{init_param.outputs[0]};
1770 operation::FusedBatchNorm::Param param;
1771 const OperandIndex is_training_index{init_param.inputs[init_param.input_count - 3]};
1772 param.is_training = operands.at(is_training_index).asScalar<bool>();
1774 const OperandIndex data_format_index{init_param.inputs[init_param.input_count - 2]};
1775 std::vector<char> data_format_vector = operands.at(data_format_index).asVector<char>();
1776 param.data_format = std::string(data_format_vector.begin(), data_format_vector.end());
1778 const OperandIndex epsilon_index{init_param.inputs[init_param.input_count - 1]};
1779 param.epsilon = operands.at(epsilon_index).asScalar<float>();
1780 return new operation::FusedBatchNorm{inputs, outputs, param};
1783 _map[ANEURALNETWORKS_LOG_SOFTMAX] = [](const OperationFactory::Param &init_param,
1784 Operands &operands) {
1785 assert(init_param.input_count == 3 && init_param.output_count == 1);
1787 // Each input should be interpreted as follows:
1789 // 0 -> A tensor specifying the input logits.
1790 // 1 -> A scalar, specifying the positive scaling factor for the exponent, beta.
1791 // 2 -> An scalar specifying the axis to reduce across.
1793 OperandIndexSequence inputs{init_param.inputs[0]};
1794 OperandIndexSequence outputs{init_param.outputs[0]};
1796 const auto beta_index = OperandIndex{init_param.inputs[1]};
1797 const auto axis_index = OperandIndex{init_param.inputs[2]};
1799 operation::LogSoftmax::Param param;
1800 param.beta = operands.at(beta_index).asScalar<float>();
1801 param.axis = operands.at(axis_index).asScalar<int>();
1803 return new operation::LogSoftmax{inputs, outputs, param};
1806 _map[ANEURALNETWORKS_QUANTIZE] =
1807 getElementwiseUnaryGenerator(operation::ElementwiseUnary::Type::QUANTIZE);
1810 Operation *OperationFactory::create(ANeuralNetworksOperationType type,
1811 const OperationFactory::Param ¶m, Operands &operands)
1813 auto it = _map.find(type);
1814 if (it == _map.end())
1816 throw std::runtime_error("Unsupported operation type: " + std::to_string(type));
1818 return it->second(param, operands);