This commit adds nnapi delegate for Gather operation.
-axis parameter is added
Signed-off-by: SungJin Choi <lotieye.choi@samsung.com>
augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
reinterpret_cast<uint32_t *>(node.outputs->data)));
continue;
+ case tflite::BuiltinOperator_GATHER:
+ add_gather_ex_params(node.builtin_data);
+ CHECK_NN(ANeuralNetworksModel_addOperationEx(
+ nn_model, ANEURALNETWORKS_GATHER_EX, static_cast<uint32_t>(augmented_inputs.size()),
+ augmented_inputs.data(), static_cast<uint32_t>(node.outputs->size),
+ reinterpret_cast<uint32_t *>(node.outputs->data)));
+ continue;
case tflite::BuiltinOperator_CONCAT_EMBEDDINGS:
case tflite::BuiltinOperator_LSH_PROJECTION:
case tflite::BuiltinOperator_SVDF:
case tflite::BuiltinOperator_CALL:
case tflite::BuiltinOperator_SKIP_GRAM:
case tflite::BuiltinOperator_RELU_N1_TO_1:
- case tflite::BuiltinOperator_GATHER:
case tflite::BuiltinOperator_SPACE_TO_BATCH_ND:
case tflite::BuiltinOperator_BATCH_TO_SPACE_ND:
case tflite::BuiltinOperator_TRANSPOSE:
add_scalar_int32(builtin->in_data_type);
add_scalar_int32(builtin->out_data_type);
};
+
+ auto add_gather_ex_params = [&add_scalar_int32](void* data) {
+ auto builtin = reinterpret_cast<TfLiteGatherParams*>(data);
+ add_scalar_int32(builtin->axis);
+ if (builtin->axis != 0) {
+ FATAL("GATHER does not support axis>0 in NNAPI");
+ }
+ };
break;
case OperationType::GATHER:
{
- if (!allParametersPresent(2, 1))
+ if (!allParametersPresent(3, 1))
{
return ANEURALNETWORKS_BAD_DATA;
}
const RunTimeOperandInfo &input = mOperands[ins[0]];
const RunTimeOperandInfo &coords = mOperands[ins[1]];
+ int32_t axis = getScalarData<int32_t>(mOperands[ins[2]]);
RunTimeOperandInfo &output = mOperands[outs[0]];
Shape outShape = output.shape();
- success = gatherPrepare(input.shape(), coords.shape(), &outShape) &&
+ success = gatherPrepare(input.shape(), coords.shape(), &outShape, axis) &&
setInfoAndAllocateIfNeeded(&output, outShape) &&
gatherGeneric(reinterpret_cast<const uint8_t *>(input.buffer), input.shape(),
reinterpret_cast<const int32_t *>(coords.buffer), coords.shape(),
return true;
}
-bool gatherPrepare(const Shape &inputShape, const Shape &coordsShape, Shape *outputShape)
+bool gatherPrepare(const Shape &inputShape, const Shape &coordsShape, Shape *outputShape,
+ int32_t axis)
{
// Only INT32 positions are supported.
NN_OPS_CHECK(coordsShape.type == OperandType::TENSOR_INT32);
NN_OPS_CHECK(inputShape.type == outputShape->type);
// TODO: Currently, only 0D or 1D coordsShape are currently supported. Other dimensions are needed
NN_OPS_CHECK(getNumberOfDimensions(coordsShape) <= 1);
+ // Only axis==0 are supported.
+ NN_OPS_CHECK(axis == 0);
// TODO: other dimension
switch (inputShape.type)
// calculate dimension (axis is supposed to be 0)
const int num_dimensions =
(getNumberOfDimensions(inputShape) - 1) + getNumberOfDimensions(coordsShape);
- NN_OPS_CHECK(num_dimensions >= 0);
+ NN_OPS_CHECK(num_dimensions >= axis);
int output_index = 0;
for (uint32_t i = 0; i < getNumberOfDimensions(coordsShape); i++, output_index++)
: PositiveRemainder(std::min(std::max(index, -dim), dim - 1), dim));
}
-bool gatherPrepare(const Shape &input, const Shape &coords, Shape *output);
+bool gatherPrepare(const Shape &input, const Shape &coords, Shape *output, int32_t axis);
bool topk_v2Prepare(const Shape &inputShape, const int32_t k, Shape *outputIndexesShape,
Shape *outputValuesShape);
case ANEURALNETWORKS_SUB_EX:
type = OperationType::SUB;
break;
+ case ANEURALNETWORKS_GATHER_EX:
+ type = OperationType::GATHER;
+ break;
case ANEURALNETWORKS_TOPK_V2_EX:
type = OperationType::TOPK_V2;
break;