return kTfLiteOk;
} break;
default:
- context->ReportError(context, "Only float32 supported currently.");
+ context->ReportError(context, "Only float32 supported currently, got %d.",
+ input->type);
return kTfLiteError;
}
}
return kTfLiteOk;
} break;
default:
- context->ReportError(context, "Only float32 supported currently.");
+ context->ReportError(context, "Only float32 supported currently, got %d.",
+ input->type);
return kTfLiteError;
}
}
return kTfLiteOk;
} break;
default:
- context->ReportError(context, "Only float32 supported currently.");
+ context->ReportError(context, "Only float32 supported currently, got %d.",
+ input->type);
return kTfLiteError;
}
}
return kTfLiteOk;
} break;
default:
- context->ReportError(context, "Only float32 supported currently.");
+ context->ReportError(context, "Only float32 supported currently, got %d.",
+ input->type);
return kTfLiteError;
}
}
break;
}
default:
- context->ReportError(context, "Only float32 supported currently.");
+ context->ReportError(context, "Only float32 supported currently, got %d.",
+ input->type);
return kTfLiteError;
}
return kTfLiteOk;
Softmax4DFloat(input, output, params);
return kTfLiteOk;
}
- context->ReportError(context,
- "Only 2D and 4D tensors supported currently.");
+ context->ReportError(
+ context, "Only 2D and 4D tensors supported currently, got %dD.",
+ NumDimensions(input));
return kTfLiteError;
}
case kTfLiteUInt8: {
Softmax4DQuantized(input, output, params, data);
return kTfLiteOk;
}
- context->ReportError(context,
- "Only 2D and 4D tensors supported currently.");
+ context->ReportError(
+ context, "Only 2D and 4D tensors supported currently, got %dD.",
+ NumDimensions(input));
return kTfLiteError;
}
default:
- context->ReportError(context,
- "Only float32 and uint8_t supported currently.");
+ context->ReportError(
+ context, "Only float32 and uint8_t supported currently, got %d.",
+ input->type);
return kTfLiteError;
}
}
GetTensorData<float>(output), GetTensorDims(output));
return kTfLiteOk;
default:
- context->ReportError(context, "Only float32 supported currently.");
+ context->ReportError(context, "Only float32 supported currently., got %d",
+ input->type);
return kTfLiteError;
}
}
const TfLiteTensor* output = GetOutput(context, node, 0);
if (input->type != kTfLiteFloat32) {
- context->ReportError(context, "Only float32 supported currently.");
+ context->ReportError(context, "Only float32 supported currently, got %d.",
+ input->type);
return kTfLiteError;
}
TF_LITE_ENSURE_EQ(context, input->dims->size, 4);
output->type = kTfLiteInt64;
break;
default:
- context->ReportError(context, "Unknown index output data type");
+ context->ReportError(context, "Unknown index output data type: %d",
+ params->output_type);
return kTfLiteError;
}
break;
default:
- context->ReportError(context, "Only float32 and int types are supported");
+ context->ReportError(
+ context,
+ "Unkonwn input type: %d, only float32 and int types are supported",
+ input->type);
return kTfLiteError;
}
hidden_state, output);
}
default:
- context->ReportError(context, "Type not currently supported.");
+ context->ReportError(context, "Type %d not currently supported.",
+ input_weights->type);
return kTfLiteError;
}
return kTfLiteOk;
}
break;
default:
- context->ReportError(context,
- "Type is currently not supported by BatchToSpace.");
+ context->ReportError(
+ context, "Type %d is currently not supported by BatchToSpace.",
+ op_context.input->type);
return kTfLiteError;
}
#undef TF_LITE_BATCH_TO_SPACE_ND
break;
default:
context->ReportError(context,
- "Does not support type other than float|int");
+ "Does not support type %d, requires float|int",
+ input1->type);
return kTfLiteError;
}
return kTfLiteOk;
break;
default:
context->ReportError(context,
- "Does not support type other than float|int");
+ "Does not support type %d, requires float|int",
+ input1->type);
return kTfLiteError;
}
return kTfLiteOk;
break;
default:
context->ReportError(context,
- "Does not support type other than float|int");
+ "Does not support type %d, requires float|int",
+ input1->type);
return kTfLiteError;
}
return kTfLiteOk;
break;
default:
context->ReportError(context,
- "Does not support type other than float|int");
+ "Does not support type %d, requires float|int",
+ input1->type);
return kTfLiteError;
}
return kTfLiteOk;
bias, im2col, hwcn_weights, output);
break;
default:
- context->ReportError(context, "Type not currently supported.");
+ context->ReportError(context, "Type %d not currently supported.",
+ input->type);
return kTfLiteError;
}
return kTfLiteOk;
bias, output);
break;
default:
- context->ReportError(context, "Type not currently supported.");
+ context->ReportError(context, "Type %d not currently supported.",
+ input->type);
return kTfLiteError;
}
return kTfLiteOk;
if (output->type == kTfLiteFloat32) {
EvalFloat<kernel_type>(context, node, params, data, input1, input2, output);
} else {
- context->ReportError(context,
- "Div only supports FLOAT32 and quantized UINT8 now.");
+ context->ReportError(
+ context, "Div only supports FLOAT32 and quantized UINT8 now, got %d.",
+ output->type);
return kTfLiteError;
}
return kTfLiteOk;
}
default: {
- context->ReportError(context, "Only float32 is supported currently");
+ context->ReportError(context, "Input type is %d, requires float32",
+ input->type);
return kTfLiteError;
}
}
return EvalQuantized<kernel_type>(context, node, params, data, input,
filter, bias, output);
default:
- context->ReportError(context, "Type not currently supported.");
+ context->ReportError(context, "Type %d not currently supported.",
+ filter->type);
return kTfLiteError;
}
return kTfLiteOk;
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1);
} break;
default:
- context->ReportError(context,
- "Only float32 and string types are supported");
+ context->ReportError(
+ context, "Only float32 and string types are supported, got %d",
+ input->type);
return kTfLiteError;
}
const int num_dimensions =
}
#undef TF_LITE_L2NORM
} else {
- context->ReportError(context, "Inputs and outputs not all float types.");
+ context->ReportError(context, "Output type is %d, requires float.",
+ output->type);
return kTfLiteError;
}
}
#undef TF_LITE_LOCAL_RESPONSE_NORM
} else {
- context->ReportError(context, "Inputs and outputs not all float types.");
+ context->ReportError(context, "Output type is %d, requires float.",
+ output->type);
return kTfLiteError;
}
EvalQuantized<kernel_type>(context, node, params, data, input1, input2,
output);
} else {
- context->ReportError(context,
- "Mul only supports FLOAT32 and quantized UINT8 now.");
+ context->ReportError(
+ context, "Mul only supports FLOAT32 and quantized UINT8 now, got %d.",
+ output->type);
return kTfLiteError;
}
break;
default:
context->ReportError(
- context, "Neg only currently supports int64, int32, and float32.",
+ context,
+ "Neg only currently supports int64, int32, and float32, got %d.",
input->type);
return kTfLiteError;
}
}
} break;
default:
- context->ReportError(context, "Type is currently not supported by Pad.");
+ context->ReportError(context,
+ "Type %d is currently not supported by Pad.",
+ op_context.input->type);
return kTfLiteError;
}
#undef TF_LITE_PAD
output);
break;
default:
- context->ReportError(context, "Type not currently supported.");
+ context->ReportError(context, "Type %d not currently supported.",
+ input->type);
return kTfLiteError;
}
return kTfLiteOk;
MaxEvalQuantized<kernel_type>(context, node, params, data, input, output);
break;
default:
- context->ReportError(context, "Type not currently supported.");
+ context->ReportError(context, "Type %d not currently supported.",
+ input->type);
return kTfLiteError;
}
return kTfLiteOk;
// We don't have a quantized implementation, so just fall through to the
// 'default' case.
default:
- context->ReportError(context, "Type not currently supported.");
+ context->ReportError(context, "Type %d not currently supported.",
+ input->type);
return kTfLiteError;
}
return kTfLiteOk;
}
#undef TF_LITE_RESIZE_BILINEAR
} else {
- context->ReportError(context, "Inputs and outputs not all float types.");
+ context->ReportError(context, "Output type is %d, requires float.",
+ output->type);
return kTfLiteError;
}
break; \
default: \
context->ReportError(context, \
- "Does not support type other than bool|float|int"); \
+ "Does not support type other than bool|float|int, " \
+ "got %d", \
+ type); \
return kTfLiteError; \
}
TF_LITE_ENSURE_STATUS(CalculateOutputShapeVector<int64_t>(
context, input, begin, size, &output_shape_vector));
} else {
- context->ReportError(context, "Type is currently not supported by Slice.");
+ context->ReportError(
+ context, "Type %d is currently not supported by Slice.", begin->type);
return kTfLiteError;
}
GetBeginAndSizeVectors<int64_t>(NumDimensions(input), begin, size, &begins,
&sizes);
} else {
- context->ReportError(context, "Type is currently not supported by Slice.");
+ context->ReportError(
+ context, "Type %d is currently not supported by Slice.", begin->type);
return kTfLiteError;
}
TF_LITE_SLICE(bool);
break;
default:
- context->ReportError(context,
- "Type is currently not supported by Slice.");
+ context->ReportError(
+ context, "Type %d is currently not supported by Slice.", input->type);
return kTfLiteError;
}
#undef TF_LITE_SLICE
}
break;
default:
- context->ReportError(context,
- "Type is currently not supported by SpaceToBatch.");
+ context->ReportError(
+ context, "Type %d is currently not supported by SpaceToBatch.",
+ op_context.input->type);
return kTfLiteError;
}
#undef TF_LITE_SPACE_TO_BATCH_ND
}
break;
default:
- context->ReportError(context, "Type not currently supported.");
+ context->ReportError(context, "Type %d not currently supported.",
+ input->type);
return kTfLiteError;
}
#undef TF_LITE_SPACE_TO_DEPTH
break;
}
default:
- context->ReportError(context,
- "Only float32 and uint8 are currently supported.");
+ context->ReportError(
+ context, "Only float32 and uint8 are currently supported, got %d.",
+ op_context.input->type);
return kTfLiteError;
}
#undef TF_LITE_SPLIT
break;
default:
context->ReportError(context,
- "Type is currently not supported "
- "by StridedSlice.");
+ "Type %d is currently not supported "
+ "by StridedSlice.",
+ op_context.input->type);
return kTfLiteError;
}
#undef TF_LITE_STRIDED_SLICE
EvalQuantized<kernel_type>(context, node, params, data, input1, input2,
output);
} else {
- context->ReportError(context,
- "Inputs and outputs not all float|uint8 types.");
+ context->ReportError(
+ context, "output type %d is not support, requires float|uint8 types.",
+ output->type);
return kTfLiteError;
}
output_values->data.i64);
break;
default:
- context->ReportError(context, "Type is currently not supported by TopK.");
+ context->ReportError(context,
+ "Type %d is currently not supported by TopK.",
+ output_values->type);
return kTfLiteError;
}
break;
default:
context->ReportError(context,
- "Type is currently not supported by Transpose.");
+ "Type %d is currently not supported by Transpose.",
+ op_context.input->type);
return kTfLiteError;
}
#undef TF_LITE_TRANSPOSE
hidden_state, output);
}
default:
- context->ReportError(context, "Type not currently supported.");
+ context->ReportError(context, "Type %d not currently supported.",
+ input_weights->type);
return kTfLiteError;
}
return kTfLiteOk;