Add NNAPI_Delegation for SQUEEZE (#3065)
authorPrasanna R/System SW /SRI-Bangalore/Engineer/삼성전자 <prasanna.r@samsung.com>
Wed, 17 Oct 2018 05:48:33 +0000 (11:18 +0530)
committer이춘석/동작제어Lab(SR)/Staff Engineer/삼성전자 <chunseok.lee@samsung.com>
Wed, 17 Oct 2018 05:48:33 +0000 (14:48 +0900)
This patch adds NNAPI Delegation for SQUEEZE op.
Related issue: #2891 #2884

Signed-off-by: prasannar <prasanna.r@samsung.com>
libs/support/tflite/src/nnapi_delegate.cpp

index e7eff3b..59f76bf 100644 (file)
@@ -231,6 +231,17 @@ void AddOpsAndParams(tflite::Interpreter* interpreter,
       augmented_inputs.push_back(next_id++);
     };
 
+    auto add_vector_int32 = [&](const int* values, uint32_t num_values) {
+      ANeuralNetworksOperandType operand_type{
+          .type = ANEURALNETWORKS_TENSOR_INT32,
+          .dimensionCount = 1,
+          .dimensions = &num_values};
+      CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type))
+      CHECK_NN(ANeuralNetworksModel_setOperandValue(
+          nn_model, next_id, values, sizeof(int32_t) * num_values));
+      augmented_inputs.push_back(next_id++);
+    };
+
     // Handle state tensors of RNN, LSTM, SVDF.
     // For each state_out tensor, a corresponding state_in operand needs to be
     // created for NNAPI.
@@ -340,6 +351,14 @@ void AddOpsAndParams(tflite::Interpreter* interpreter,
       add_scalar_int32(builtin->activation);
     };
 
+    auto add_squeeze_params = [&add_vector_int32](void* data) {
+      const auto* builtin = reinterpret_cast<TfLiteSqueezeParams*>(data);
+      // Note that we add the squeeze dimensions even if the dimensions were
+      // unspecified (empty), as NNAPI requires the operand.
+      add_vector_int32(builtin->squeeze_dims,
+                       static_cast<uint32_t>(builtin->num_squeeze_dims));
+    };
+
     // Handle optional input tensors.
     auto add_optional_tensors = [&nn_model, &augmented_inputs,
                                  &next_id](int nn_type) {
@@ -487,6 +506,11 @@ void AddOpsAndParams(tflite::Interpreter* interpreter,
         nn_op_type = ANEURALNETWORKS_SUB;
         add_add_params();
         break;
+      case tflite::BuiltinOperator_SQUEEZE:
+        nnapi_version = 11;  // require NNAPI 1.1
+        nn_op_type = ANEURALNETWORKS_SQUEEZE;
+        add_squeeze_params(node.builtin_data);
+        break;
       case tflite::BuiltinOperator_STRIDED_SLICE:
         add_strided_slice_params(node.builtin_data);
         nn_op_type = ANEURALNETWORKS_STRIDED_SLICE;
@@ -558,7 +582,6 @@ void AddOpsAndParams(tflite::Interpreter* interpreter,
       case tflite::BuiltinOperator_PADV2:
       case tflite::BuiltinOperator_CALL:
       case tflite::BuiltinOperator_SKIP_GRAM:
-      case tflite::BuiltinOperator_SQUEEZE:
       case tflite::BuiltinOperator_LOG_SOFTMAX:
       case tflite::BuiltinOperator_DELEGATE:
       case tflite::BuiltinOperator_PRELU: