augmented_inputs.push_back(next_id++);
};
+ auto add_vector_int32 = [&](const int* values, uint32_t num_values) {
+ ANeuralNetworksOperandType operand_type{
+ .type = ANEURALNETWORKS_TENSOR_INT32,
+ .dimensionCount = 1,
+ .dimensions = &num_values};
+ CHECK_NN(ANeuralNetworksModel_addOperand(nn_model, &operand_type))
+ CHECK_NN(ANeuralNetworksModel_setOperandValue(
+ nn_model, next_id, values, sizeof(int32_t) * num_values));
+ augmented_inputs.push_back(next_id++);
+ };
+
// Handle state tensors of RNN, LSTM, SVDF.
// For each state_out tensor, a corresponding state_in operand needs to be
// created for NNAPI.
add_scalar_int32(builtin->activation);
};
+ auto add_squeeze_params = [&add_vector_int32](void* data) {
+ const auto* builtin = reinterpret_cast<TfLiteSqueezeParams*>(data);
+ // Note that we add the squeeze dimensions even if the dimensions were
+ // unspecified (empty), as NNAPI requires the operand.
+ add_vector_int32(builtin->squeeze_dims,
+ static_cast<uint32_t>(builtin->num_squeeze_dims));
+ };
+
// Handle optional input tensors.
auto add_optional_tensors = [&nn_model, &augmented_inputs,
&next_id](int nn_type) {
nn_op_type = ANEURALNETWORKS_SUB;
add_add_params();
break;
+ case tflite::BuiltinOperator_SQUEEZE:
+ nnapi_version = 11; // require NNAPI 1.1
+ nn_op_type = ANEURALNETWORKS_SQUEEZE;
+ add_squeeze_params(node.builtin_data);
+ break;
case tflite::BuiltinOperator_STRIDED_SLICE:
add_strided_slice_params(node.builtin_data);
nn_op_type = ANEURALNETWORKS_STRIDED_SLICE;
case tflite::BuiltinOperator_PADV2:
case tflite::BuiltinOperator_CALL:
case tflite::BuiltinOperator_SKIP_GRAM:
- case tflite::BuiltinOperator_SQUEEZE:
case tflite::BuiltinOperator_LOG_SOFTMAX:
case tflite::BuiltinOperator_DELEGATE:
case tflite::BuiltinOperator_PRELU: