Imported Upstream version 1.7.0
[platform/core/ml/nnfw.git] / runtime / onert / frontend / nnapi / wrapper / OperationFactory.cc
index 10e7c03..94791f8 100644 (file)
@@ -82,6 +82,30 @@ uint32_t getUint32Scalar(Operands &operands, const OperandIndex index)
   return static_cast<uint32_t>(int32_value);
 }
 
+OperationFactory::Generator
+getReduceGenerator(const onert::ir::operation::Reduce::ReduceType reduce_type)
+{
+  return [reduce_type](const OperationFactory::Param &init_param, Operands &operands) {
+    assert(init_param.input_count == 3);
+    assert(init_param.output_count == 1);
+
+    // Each input should be interpreted as follows:
+    //
+    //  0 -> Input Tensor Index
+    //  1 -> Reduced Axes Tensor Index
+    //  2 -> keep_dims Index
+
+    OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
+    OperandIndexSequence outputs{init_param.outputs[0]};
+
+    operation::Reduce::Param param;
+    param.reduce_type = reduce_type;
+    param.keep_dims = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<int8_t>() != 0;
+
+    return new operation::Reduce{inputs, outputs, param};
+  };
+}
+
 } // namespace
 
 OperationFactory &OperationFactory::get()
@@ -348,7 +372,6 @@ OperationFactory::OperationFactory()
     operation::Concat::Param param;
     const OperandIndex axis_index{init_param.inputs[init_param.input_count - 1]};
     param.axis = operands.at(axis_index).asScalar<int32_t>();
-    param.rank = operands.at(outputs.at(0)).shape().rank();
 
     return new operation::Concat{inputs, outputs, param};
   };
@@ -362,12 +385,12 @@ OperationFactory::OperationFactory()
     //  1 -> A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32, defining the shape of the output
     //  tensor
 
-    // TODO Second input should be shape tensor (init_param.inputs[1])
-    //      Currently unused since assume that it is same with output tensor size
-    OperandIndexSequence inputs{init_param.inputs[0] /* , init_param.inputs[1] */};
+    OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
     OperandIndexSequence outputs{init_param.outputs[0]};
 
-    return new operation::Reshape{inputs, outputs};
+    operation::Reshape::Param param{};
+
+    return new operation::Reshape{inputs, outputs, param};
   };
 
   _map[ANEURALNETWORKS_FULLY_CONNECTED] = [](const OperationFactory::Param &init_param,
@@ -421,12 +444,12 @@ OperationFactory::OperationFactory()
     //  0 -> input Tensor Index
     OperandIndexSequence inputs{init_param.inputs[0]};
 
-    // NNAPI uses QUANT8_ASYMM to represent UINT8 type for ANEURALNETWORKS_CAST's input/output
-    if (operands.at(inputs.at(0)).typeInfo().type() == DataType::QUANT8_ASYMM)
+    // NNAPI uses QUANT_UINT8_ASYMM to represent UINT8 type for ANEURALNETWORKS_CAST's input/output
+    if (operands.at(inputs.at(0)).typeInfo().type() == DataType::QUANT_UINT8_ASYMM)
     {
       replaceDataType(operands, inputs.at(0), DataType::UINT8);
     }
-    if (operands.at(outputs.at(0)).typeInfo().type() == DataType::QUANT8_ASYMM)
+    if (operands.at(outputs.at(0)).typeInfo().type() == DataType::QUANT_UINT8_ASYMM)
     {
       replaceDataType(operands, outputs.at(0), DataType::UINT8);
     }
@@ -529,29 +552,10 @@ OperationFactory::OperationFactory()
     return new operation::Add{inputs, outputs, param};
   };
 
-  _map[ANEURALNETWORKS_REDUCE_SUM] = [](const OperationFactory::Param &init_param,
-                                        Operands &operands) {
-    assert(init_param.input_count == 3);
-    assert(init_param.output_count == 1);
-
-    // Each input should be interpreted as follows:
-    //
-    //  0 -> Input Tensor Index
-    //  1 -> Reduced Axes Tensor Index
-    //  2 -> keep_dims Index
-
-    OperandIndexSequence inputs{init_param.inputs[0]};
-    OperandIndexSequence outputs{init_param.outputs[0]};
-    std::vector<std::int32_t> axes =
-        operands.at(OperandIndex{init_param.inputs[1]}).asVector<std::int32_t>();
-
-    operation::ReduceSum::Param param;
-    param.axes.assign(axes.cbegin(), axes.cend());
-    param.keep_dims = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<int8_t>() != 0;
-    param.rank = operands.at(inputs.at(0)).shape().rank();
+  _map[ANEURALNETWORKS_ADDV2_EX] = _map[ANEURALNETWORKS_ADD];
 
-    return new operation::ReduceSum{inputs, outputs, param};
-  };
+  _map[ANEURALNETWORKS_REDUCE_SUM] =
+      getReduceGenerator(onert::ir::operation::Reduce::ReduceType::SUM);
 
   // ANEURALNETWORKS_REDUCE_SUM_EX is deprecated
   // TODO Remove ANEURALNETWORKS_REDUCE_SUM_EX
@@ -578,7 +582,7 @@ OperationFactory::OperationFactory()
     return new operation::Sub{inputs, outputs, param};
   };
 
-  _map[ANEURALNETWORKS_SLICE] = [](const OperationFactory::Param &init_param, Operands &operands) {
+  _map[ANEURALNETWORKS_SLICE] = [](const OperationFactory::Param &init_param, Operands &) {
     assert(init_param.input_count == 3 && init_param.output_count == 1);
 
     OperandIndexSequence outputs{init_param.outputs[0]};
@@ -590,10 +594,7 @@ OperationFactory::OperationFactory()
     //  2 -> Sizes Tensor Index
     OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
 
-    operation::Slice::Param param;
-    param.rank = operands.at(inputs.at(0)).shape().rank();
-
-    return new operation::Slice{inputs, outputs, param};
+    return new operation::Slice{inputs, outputs};
   };
 
   _map[ANEURALNETWORKS_STRIDED_SLICE] = [](const OperationFactory::Param &init_param,
@@ -632,7 +633,6 @@ OperationFactory::OperationFactory()
     param.end_mask = operands.at(OperandIndex{init_param.inputs[5]}).asScalar<std::int32_t>();
     param.shrink_axis_mask =
         operands.at(OperandIndex{init_param.inputs[6]}).asScalar<std::int32_t>();
-    param.rank = operands.at(inputs.at(0)).shape().rank();
 
     return new operation::StridedSlice{inputs, outputs, param};
   };
@@ -659,7 +659,6 @@ OperationFactory::OperationFactory()
 
     operation::Transpose::Param param;
     param.perm.assign(perm.cbegin(), perm.cend());
-    param.rank = operands.at(inputs.at(0)).shape().rank();
 
     return new operation::Transpose{inputs, outputs, param};
   };
@@ -738,6 +737,19 @@ OperationFactory::OperationFactory()
     return new operation::Tanh{inputs, outputs};
   };
 
+  _map[ANEURALNETWORKS_LOG] = [](const OperationFactory::Param &init_param, Operands &) {
+    assert(init_param.input_count == 1 && init_param.output_count == 1);
+
+    OperandIndexSequence outputs{init_param.outputs[0]};
+
+    // Each input should be interpreted as follows:
+    //
+    //  0 -> Input Tensor Index
+    OperandIndexSequence inputs{init_param.inputs[0]};
+
+    return new operation::Log{inputs, outputs};
+  };
+
   _map[ANEURALNETWORKS_LOGISTIC] = [](const OperationFactory::Param &init_param, Operands &) {
     assert(init_param.input_count == 1 && init_param.output_count == 1);
 
@@ -789,6 +801,20 @@ OperationFactory::OperationFactory()
   // TODO Remove ANEURALNETWORKS_EXP_EX
   _map[ANEURALNETWORKS_EXP_EX] = _map[ANEURALNETWORKS_EXP];
 
+  _map[ANEURALNETWORKS_EXPAND_DIMS] = [](const OperationFactory::Param &init_param, Operands &) {
+    assert(init_param.input_count == 2 && init_param.output_count == 1);
+
+    OperandIndexSequence outputs{init_param.outputs[0]};
+
+    // Each input should be interpreted as follows:
+    //
+    //  0 -> Input Tensor Index
+    //  1 -> Axis Tensor Index
+    OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
+
+    return new operation::ExpandDims{inputs, outputs};
+  };
+
   _map[ANEURALNETWORKS_GREATER] = [](const OperationFactory::Param &init_param, Operands &) {
     assert(init_param.input_count == 2 && init_param.output_count == 1);
 
@@ -903,28 +929,14 @@ OperationFactory::OperationFactory()
     return new operation::Comparison{inputs, outputs, param};
   };
 
-  _map[ANEURALNETWORKS_REDUCE_MAX] = [](const OperationFactory::Param &init_param,
-                                        Operands &operands) {
-    assert(init_param.input_count == 3 && init_param.output_count == 1);
-
-    OperandIndexSequence outputs{init_param.outputs[0]};
-
-    // Each input should be interpreted as follows:
-    //
-    //  0 -> Input Tensor Index
-    //  1 -> Axis Tensor Index
-    //  2 -> keep_dims Index
-    OperandIndexSequence inputs{init_param.inputs[0]};
-    std::vector<std::int32_t> axes =
-        operands.at(OperandIndex{init_param.inputs[1]}).asVector<std::int32_t>();
+  _map[ANEURALNETWORKS_REDUCE_ALL] =
+      getReduceGenerator(onert::ir::operation::Reduce::ReduceType::ALL);
 
-    operation::ReduceMax::Param param;
-    param.axes.assign(axes.cbegin(), axes.cend());
-    param.keep_dims = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<int8_t>() != 0;
-    param.rank = operands.at(inputs.at(0)).shape().rank();
+  _map[ANEURALNETWORKS_REDUCE_ANY] =
+      getReduceGenerator(onert::ir::operation::Reduce::ReduceType::ANY);
 
-    return new operation::ReduceMax{inputs, outputs, param};
-  };
+  _map[ANEURALNETWORKS_REDUCE_MAX] =
+      getReduceGenerator(onert::ir::operation::Reduce::ReduceType::MAX);
 
   // ANEURALNETWORKS_REDUCE_MAX_EX is deprecated
   // TODO Remove ANEURALNETWORKS_REDUCE_MAX_EX
@@ -1019,6 +1031,36 @@ OperationFactory::OperationFactory()
     return new operation::RSQRT{inputs, outputs};
   };
 
+  _map[ANEURALNETWORKS_SELECT] = [](const OperationFactory::Param &init_param, Operands &) {
+    assert(init_param.input_count == 3 && init_param.output_count == 1);
+
+    OperandIndexSequence outputs{init_param.outputs[0]};
+
+    // Each input should be interpreted as follows:
+    //
+    //  0 -> Condition Tensor Index
+    //  1 -> Input X(true) Tensor Index
+    //  2 -> Input Y(false) Tensor Index
+    OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
+
+    return new operation::Select{inputs, outputs};
+  };
+
+  _map[ANEURALNETWORKS_SELECT_V2_EX] = [](const OperationFactory::Param &init_param, Operands &) {
+    assert(init_param.input_count == 3 && init_param.output_count == 1);
+
+    OperandIndexSequence outputs{init_param.outputs[0]};
+
+    // Each input should be interpreted as follows:
+    //
+    //  0 -> Condition Tensor Index
+    //  1 -> Input X(true) Tensor Index
+    //  2 -> Input Y(false) Tensor Index
+    OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
+
+    return new operation::Select{inputs, outputs};
+  };
+
   // ANEURALNETWORKS_RSQRT_EX is deprecated
   // TODO Remove ANEURALNETWORKS_RSQRT_EX
   _map[ANEURALNETWORKS_RSQRT_EX] = _map[ANEURALNETWORKS_RSQRT];
@@ -1082,6 +1124,20 @@ OperationFactory::OperationFactory()
     return new operation::ReLU6{inputs, outputs};
   };
 
+  _map[ANEURALNETWORKS_REVERSE_EX] = [](const OperationFactory::Param &init_param, Operands &) {
+    assert(init_param.input_count == 2 && init_param.output_count == 1);
+
+    // Each input should be interpreted as follows:
+    //
+    // 0 -> Input Tensor Index
+    // 1 -> Axis Tensor Index
+
+    OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
+    OperandIndexSequence outputs{init_param.outputs[0]};
+
+    return new operation::Reverse{inputs, outputs};
+  };
+
   _map[ANEURALNETWORKS_RNN] = [](const OperationFactory::Param &init_param, Operands &operands) {
     assert(init_param.input_count == 6 && init_param.output_count == 2);
 
@@ -1250,7 +1306,7 @@ OperationFactory::OperationFactory()
   };
 
   _map[ANEURALNETWORKS_L2_NORMALIZATION] = [](const OperationFactory::Param &init_param,
-                                              Operands &operands) {
+                                              Operands &) {
     assert(init_param.input_count == 1 && init_param.output_count == 1);
 
     OperandIndexSequence outputs{init_param.outputs[0]};
@@ -1259,10 +1315,7 @@ OperationFactory::OperationFactory()
     //  0 -> input Tensor Index
     OperandIndexSequence inputs{init_param.inputs[0]};
 
-    operation::L2Normalization::Param param;
-    param.rank = operands.at(inputs.at(0)).shape().rank();
-
-    return new operation::L2Normalization{inputs, outputs, param};
+    return new operation::L2Normalization{inputs, outputs};
   };
 
   _map[ANEURALNETWORKS_HASHTABLE_LOOKUP] = [](const OperationFactory::Param &init_param,
@@ -1588,7 +1641,6 @@ OperationFactory::OperationFactory()
 
     operation::Gather::Param param;
     param.axis = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<int32_t>();
-    param.rank = operands.at(inputs.at(0)).shape().rank();
 
     return new operation::Gather{inputs, outputs, param};
   };
@@ -1644,7 +1696,6 @@ OperationFactory::OperationFactory()
 
     operation::ArgMax::Param param;
     param.axis = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>();
-    param.rank = operands.at(inputs.at(0)).shape().rank();
 
     return new operation::ArgMax{inputs, outputs, param};
   };
@@ -1676,16 +1727,13 @@ OperationFactory::OperationFactory()
     //  0 -> ifm Tensor Index
     //  1 -> axis Tensor Index
     //  2 -> keep_dims Index
-    OperandIndexSequence inputs{init_param.inputs[0]};
-    std::vector<std::int32_t> axes =
-        operands.at(OperandIndex{init_param.inputs[1]}).asVector<std::int32_t>();
+    OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
 
-    operation::Mean::Param param;
-    param.axes.assign(axes.cbegin(), axes.cend());
+    operation::Reduce::Param param;
+    param.reduce_type = operation::Reduce::ReduceType::MEAN;
     param.keep_dims = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<int32_t>() != 0;
-    param.rank = operands.at(inputs.at(0)).shape().rank();
 
-    return new operation::Mean{inputs, outputs, param};
+    return new operation::Reduce{inputs, outputs, param};
   };
 
   _map[ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION] = [](const OperationFactory::Param &init_param,
@@ -1739,33 +1787,12 @@ OperationFactory::OperationFactory()
     const auto axis_index = OperandIndex{init_param.inputs[init_param.input_count - 1]};
     param.num = operands.at(num_index).asScalar<int32_t>();
     param.axis = operands.at(axis_index).asScalar<int32_t>();
-    param.rank = operands.at(outputs.at(0)).shape().rank();
 
     return new operation::Pack{inputs, outputs, param};
   };
 
-  _map[ANEURALNETWORKS_REDUCE_MIN] = [](const OperationFactory::Param &init_param,
-                                        Operands &operands) {
-    assert(init_param.input_count == 3 && init_param.output_count == 1);
-
-    OperandIndexSequence outputs{init_param.outputs[0]};
-
-    // Each input should be interpreted as follows:
-    //
-    //  0 -> Input Tensor Index
-    //  1 -> Axis Tensor Index
-    //  2 -> keep_dims Index
-    OperandIndexSequence inputs{init_param.inputs[0]};
-    std::vector<std::int32_t> axes =
-        operands.at(OperandIndex{init_param.inputs[1]}).asVector<std::int32_t>();
-
-    operation::ReduceMin::Param param;
-    param.axes.assign(axes.cbegin(), axes.cend());
-    param.keep_dims = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<int8_t>() != 0;
-    param.rank = operands.at(inputs.at(0)).shape().rank();
-
-    return new operation::ReduceMin{inputs, outputs, param};
-  };
+  _map[ANEURALNETWORKS_REDUCE_MIN] =
+      getReduceGenerator(onert::ir::operation::Reduce::ReduceType::MIN);
 
   // ANEURALNETWORKS_REDUCE_MIN_EX is deprecated
   // TODO Remove ANEURALNETWORKS_REDUCE_MIN_EX
@@ -1785,7 +1812,6 @@ OperationFactory::OperationFactory()
     operation::Split::Param param;
     param.axis = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>();
     param.num_splits = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<std::int32_t>();
-    param.rank = operands.at(inputs.at(0)).shape().rank();
 
     return new operation::Split{inputs, outputs, param};
   };
@@ -1810,21 +1836,17 @@ OperationFactory::OperationFactory()
     const auto axis_index = OperandIndex{init_param.inputs[2]};
     param.num = operands.at(num_index).asScalar<int32_t>();
     param.axis = operands.at(axis_index).asScalar<int32_t>();
-    param.rank = operands.at(inputs.at(0)).shape().rank();
 
     return new operation::Unpack{inputs, outputs, param};
   };
 
-  _map[ANEURALNETWORKS_PAD] = [](const OperationFactory::Param &init_param, Operands &operands) {
+  _map[ANEURALNETWORKS_PAD] = [](const OperationFactory::Param &init_param, Operands &) {
     assert(init_param.input_count == 2 && init_param.output_count >= 1);
 
     OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
     OperandIndexSequence outputs{init_param.outputs[0]};
 
-    operation::Pad::Param param;
-    param.rank = operands.at(inputs.at(0)).shape().rank();
-
-    return new operation::Pad{inputs, outputs, param};
+    return new operation::Pad{inputs, outputs};
   };
 
   _map[ANEURALNETWORKS_MINIMUM] = [](const OperationFactory::Param &init_param, Operands &) {
@@ -1852,22 +1874,32 @@ OperationFactory::OperationFactory()
     // Each input should be interpreted as follows:
     //
     // 0 -> indices tensor
-    // 1 -> depth scalar
-    // 2 -> on_value scalar
-    // 3 -> off_value scalar
+    // 1 -> depth tensor
+    // 2 -> on_value tensor
+    // 3 -> off_value tensor
     // 4 -> axis scalar
-    OperandIndexSequence inputs{init_param.inputs[0]};
+    OperandIndexSequence inputs;
+    for (uint32_t n = 0; n < init_param.input_count - 1; ++n)
+    {
+      inputs.append(OperandIndex{init_param.inputs[n]});
+    }
     OperandIndexSequence outputs{init_param.outputs[0]};
 
     operation::OneHot::Param param;
-    param.depth = operands.at(OperandIndex{init_param.inputs[1]}).asScalar<std::int32_t>();
-    param.on_value = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<float>();
-    param.off_value = operands.at(OperandIndex{init_param.inputs[3]}).asScalar<float>();
     param.axis = operands.at(OperandIndex{init_param.inputs[4]}).asScalar<std::int32_t>();
 
     return new operation::OneHot{inputs, outputs, param};
   };
 
+  _map[ANEURALNETWORKS_COS_EX] = [](const OperationFactory::Param &init_param, Operands &) {
+    assert(init_param.input_count == 1 && init_param.output_count == 1);
+
+    OperandIndexSequence inputs{init_param.inputs[0]};
+    OperandIndexSequence outputs{init_param.outputs[0]};
+
+    return new operation::Cos{inputs, outputs};
+  };
+
   _map[ANEURALNETWORKS_SIN] = [](const OperationFactory::Param &init_param, Operands &) {
     assert(init_param.input_count == 1 && init_param.output_count == 1);
 
@@ -1885,6 +1917,222 @@ OperationFactory::OperationFactory()
 
     return new operation::Shape{inputs, outputs};
   };
+
+  _map[ANEURALNETWORKS_REDUCE_PROD] =
+      getReduceGenerator(onert::ir::operation::Reduce::ReduceType::PROD);
+
+  _map[ANEURALNETWORKS_ROUND_EX] = [](const OperationFactory::Param &init_param, Operands &) {
+    assert(init_param.input_count == 1 && init_param.output_count == 1);
+
+    OperandIndexSequence outputs{init_param.outputs[0]};
+
+    // Each input should be interpreted as follows:
+    //  0 -> input Tensor Index
+    OperandIndexSequence inputs{init_param.inputs[0]};
+
+    return new operation::Round{inputs, outputs};
+  };
+
+  _map[ANEURALNETWORKS_RANGE_EX] = [](const OperationFactory::Param &init_param, Operands &) {
+    assert(init_param.input_count == 3 && init_param.output_count == 1);
+
+    OperandIndexSequence outputs{init_param.outputs[0]};
+
+    // Each input should be interpreted as follows:
+    //  0 -> start Tensor Index
+    //  1 -> limit Tensor Index
+    //  2 -> delta Tensor Index
+
+    OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
+
+    return new operation::Range{inputs, outputs};
+  };
+
+  _map[ANEURALNETWORKS_POW] = [](const OperationFactory::Param &init_param, Operands &) {
+    assert(init_param.input_count == 2 && init_param.output_count == 1);
+
+    OperandIndexSequence outputs{init_param.outputs[0]};
+
+    // Each input should be interpreted as follows:
+    //
+    //  0 -> LHS Tensor Index
+    //  1 -> RHS Tensor Index
+
+    OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
+
+    return new operation::Pow{inputs, outputs};
+  };
+
+  _map[ANEURALNETWORKS_FILL_EX] = [](const OperationFactory::Param &init_param, Operands &) {
+    assert(init_param.input_count == 2 && init_param.output_count == 1);
+
+    // Each input should be interpreted as follows:
+    //
+    //  0 -> A tensor, specifying the input.
+    //  1 -> A 1-D tensor, specifying the value
+
+    OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
+    OperandIndexSequence outputs{init_param.outputs[0]};
+
+    return new operation::Fill{inputs, outputs};
+  };
+
+  _map[ANEURALNETWORKS_ZEROS_LIKE_EX] = [](const OperationFactory::Param &init_param, Operands &) {
+    assert(init_param.input_count == 1 && init_param.output_count == 1);
+
+    OperandIndexSequence outputs{init_param.outputs[0]};
+
+    // Each input should be interpreted as follows:
+    //  0 -> input Tensor Index
+    OperandIndexSequence inputs{init_param.inputs[0]};
+
+    return new operation::ZerosLike{inputs, outputs};
+  };
+
+  _map[ANEURALNETWORKS_TILE] = [](const OperationFactory::Param &init_param, Operands &) {
+    assert(init_param.input_count == 2 && init_param.output_count == 1);
+
+    OperandIndexSequence outputs{init_param.outputs[0]};
+
+    // Each input should be interpreted as follows:
+    //
+    //  0 -> Input Tensor Index
+    //  1 -> Multiple Tensor Index
+
+    OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
+
+    return new operation::Tile{inputs, outputs};
+  };
+
+  _map[ANEURALNETWORKS_MATRIX_BAND_PART_EX] = [](const OperationFactory::Param &init_param,
+                                                 Operands &) {
+    assert(init_param.input_count == 3);
+    assert(init_param.output_count == 1);
+    // Each input should be interpreted as follows:
+    //
+    // 0 -> A tensor, input
+    // 1 -> A 0-D tensor, number of lower diagnonals to keep
+    // 2 -> A 0-D tensor, number of upper diagnonals to keep
+    OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2]};
+    OperandIndexSequence outputs{init_param.outputs[0]};
+
+    return new operation::MatrixBandPart{inputs, outputs};
+  };
+
+  _map[ANEURALNETWORKS_BATCH_MATMUL_EX] = [](const OperationFactory::Param &init_param,
+                                             Operands &operands) {
+    assert(init_param.input_count == 4 && init_param.output_count == 1);
+
+    OperandIndexSequence outputs{init_param.outputs[0]};
+
+    // Each input should be interpreted as follows:
+    //
+    //  0 -> Lhs Tensor Index
+    //  1 -> Rhs Tensor Index
+    //  2 -> adj_x boolean scalar Index
+    //  3 -> adj_y boolean scalar Index
+
+    OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
+
+    operation::BatchMatMul::Param param;
+    param.adj_x = operands.at(OperandIndex{init_param.inputs[2]}).asScalar<bool>();
+    param.adj_y = operands.at(OperandIndex{init_param.inputs[3]}).asScalar<bool>();
+
+    return new operation::BatchMatMul{inputs, outputs, param};
+  };
+
+  _map[ANEURALNETWORKS_EINSUM_EX] = [](const OperationFactory::Param &init_param,
+                                       Operands &operands) {
+    // Each input should be interpreted as follows:
+    //
+    //  0....n - 1 -> n Input Tensors Index
+    //  n -> equation
+    assert(init_param.input_count >= 1 && init_param.output_count == 1);
+
+    OperandIndexSequence inputs;
+    for (uint32_t n = 0; n < init_param.input_count - 1; ++n)
+    {
+      inputs.append(OperandIndex{init_param.inputs[n]});
+    }
+    OperandIndexSequence outputs{init_param.outputs[0]};
+
+    operation::Einsum::Param param;
+    const OperandIndex equation_index{init_param.inputs[init_param.input_count - 1]};
+    std::vector<char> equation_vector = operands.at(equation_index).asVector<char>();
+    param.equation = std::string(equation_vector.begin(), equation_vector.end());
+
+    return new operation::Einsum{inputs, outputs, param};
+  };
+
+  _map[ANEURALNETWORKS_BROADCAST_TO_EX] = [](const OperationFactory::Param &init_param,
+                                             Operands &) {
+    assert(init_param.input_count == 2 && init_param.output_count == 1);
+
+    OperandIndexSequence outputs{init_param.outputs[0]};
+
+    // Each input should be interpreted as follows:
+    //
+    //  0 -> Input Tensor Index
+    //  1 -> int32, int64, An 1-D int tensor Index
+
+    OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1]};
+
+    return new operation::BroadcastTo{inputs, outputs};
+  };
+
+  _map[ANEURALNETWORKS_FUSED_BATCH_NORM_V3_EX] = [](const OperationFactory::Param &init_param,
+                                                    Operands &operands) {
+    // Each input should be interpreted as follows:
+    //
+    //  0....4  -> 5 Input Tensors Index
+    //  n-2     -> is_training
+    //  n-1     -> data_format
+    //  n       -> epsilon
+
+    assert(init_param.input_count == 8 && init_param.output_count == 1);
+
+    OperandIndexSequence inputs;
+    for (uint32_t n = 0; n < init_param.input_count - 3; ++n)
+    {
+      inputs.append(OperandIndex{init_param.inputs[n]});
+    }
+    OperandIndexSequence outputs{init_param.outputs[0]};
+
+    operation::FusedBatchNorm::Param param;
+    const OperandIndex is_training_index{init_param.inputs[init_param.input_count - 3]};
+    param.is_training = operands.at(is_training_index).asScalar<bool>();
+
+    const OperandIndex data_format_index{init_param.inputs[init_param.input_count - 2]};
+    std::vector<char> data_format_vector = operands.at(data_format_index).asVector<char>();
+    param.data_format = std::string(data_format_vector.begin(), data_format_vector.end());
+
+    const OperandIndex epsilon_index{init_param.inputs[init_param.input_count - 1]};
+    param.epsilon = operands.at(epsilon_index).asScalar<float>();
+    return new operation::FusedBatchNorm{inputs, outputs, param};
+  };
+
+  _map[ANEURALNETWORKS_LOG_SOFTMAX] = [](const OperationFactory::Param &init_param,
+                                         Operands &operands) {
+    assert(init_param.input_count == 3 && init_param.output_count == 1);
+
+    // Each input should be interpreted as follows:
+    //
+    //  0 -> A tensor specifying the input logits.
+    //  1 -> A scalar, specifying the positive scaling factor for the exponent, beta.
+    //  2 -> An scalar specifying the axis to reduce across.
+
+    OperandIndexSequence inputs{init_param.inputs[0]};
+    OperandIndexSequence outputs{init_param.outputs[0]};
+
+    const auto beta_index = OperandIndex{init_param.inputs[1]};
+    const auto axis_index = OperandIndex{init_param.inputs[2]};
+
+    operation::LogSoftmax::Param param;
+    param.beta = operands.at(beta_index).asScalar<float>();
+    param.axis = operands.at(axis_index).asScalar<int>();
+
+    return new operation::LogSoftmax{inputs, outputs, param};
+  };
 }
 
 Operation *OperationFactory::create(ANeuralNetworksOperationType type,