From 158528e2bf1131abc4571e5e31b38e4dfb9199d4 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Mon, 21 May 2018 07:00:53 -0700 Subject: [PATCH] Enhance error reporting. PiperOrigin-RevId: 197390052 --- tensorflow/contrib/lite/kernels/activations.cc | 36 ++++++++++++++-------- tensorflow/contrib/lite/kernels/arg_max.cc | 8 +++-- tensorflow/contrib/lite/kernels/basic_rnn.cc | 3 +- .../contrib/lite/kernels/batch_to_space_nd.cc | 5 +-- tensorflow/contrib/lite/kernels/comparisons.cc | 12 +++++--- tensorflow/contrib/lite/kernels/conv.cc | 3 +- tensorflow/contrib/lite/kernels/depthwise_conv.cc | 3 +- tensorflow/contrib/lite/kernels/div.cc | 5 +-- tensorflow/contrib/lite/kernels/elementwise.cc | 3 +- tensorflow/contrib/lite/kernels/fully_connected.cc | 3 +- tensorflow/contrib/lite/kernels/gather.cc | 5 +-- tensorflow/contrib/lite/kernels/l2norm.cc | 3 +- .../contrib/lite/kernels/local_response_norm.cc | 3 +- tensorflow/contrib/lite/kernels/mul.cc | 5 +-- tensorflow/contrib/lite/kernels/neg.cc | 3 +- tensorflow/contrib/lite/kernels/pad.cc | 4 ++- tensorflow/contrib/lite/kernels/pooling.cc | 9 ++++-- tensorflow/contrib/lite/kernels/resize_bilinear.cc | 3 +- tensorflow/contrib/lite/kernels/select.cc | 4 ++- tensorflow/contrib/lite/kernels/slice.cc | 10 +++--- .../contrib/lite/kernels/space_to_batch_nd.cc | 5 +-- tensorflow/contrib/lite/kernels/space_to_depth.cc | 3 +- tensorflow/contrib/lite/kernels/split.cc | 5 +-- tensorflow/contrib/lite/kernels/strided_slice.cc | 5 +-- tensorflow/contrib/lite/kernels/sub.cc | 5 +-- tensorflow/contrib/lite/kernels/topk_v2.cc | 4 ++- tensorflow/contrib/lite/kernels/transpose.cc | 3 +- .../lite/kernels/unidirectional_sequence_rnn.cc | 3 +- 28 files changed, 106 insertions(+), 57 deletions(-) diff --git a/tensorflow/contrib/lite/kernels/activations.cc b/tensorflow/contrib/lite/kernels/activations.cc index 4972159..add36b4 100644 --- a/tensorflow/contrib/lite/kernels/activations.cc +++ b/tensorflow/contrib/lite/kernels/activations.cc @@ -191,7 +191,8 @@ TfLiteStatus ReluEval(TfLiteContext* context, TfLiteNode* node) { return kTfLiteOk; } break; default: - context->ReportError(context, "Only float32 supported currently."); + context->ReportError(context, "Only float32 supported currently, got %d.", + input->type); return kTfLiteError; } } @@ -211,7 +212,8 @@ TfLiteStatus Relu1Eval(TfLiteContext* context, TfLiteNode* node) { return kTfLiteOk; } break; default: - context->ReportError(context, "Only float32 supported currently."); + context->ReportError(context, "Only float32 supported currently, got %d.", + input->type); return kTfLiteError; } } @@ -229,7 +231,8 @@ TfLiteStatus Relu6Eval(TfLiteContext* context, TfLiteNode* node) { return kTfLiteOk; } break; default: - context->ReportError(context, "Only float32 supported currently."); + context->ReportError(context, "Only float32 supported currently, got %d.", + input->type); return kTfLiteError; } } @@ -256,7 +259,8 @@ TfLiteStatus TanhEval(TfLiteContext* context, TfLiteNode* node) { return kTfLiteOk; } break; default: - context->ReportError(context, "Only float32 supported currently."); + context->ReportError(context, "Only float32 supported currently, got %d.", + input->type); return kTfLiteError; } } @@ -285,7 +289,8 @@ TfLiteStatus SigmoidEval(TfLiteContext* context, TfLiteNode* node) { break; } default: - context->ReportError(context, "Only float32 supported currently."); + context->ReportError(context, "Only float32 supported currently, got %d.", + input->type); return kTfLiteError; } return kTfLiteOk; @@ -377,8 +382,9 @@ TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) { Softmax4DFloat(input, output, params); return kTfLiteOk; } - context->ReportError(context, - "Only 2D and 4D tensors supported currently."); + context->ReportError( + context, "Only 2D and 4D tensors supported currently, got %dD.", + NumDimensions(input)); return kTfLiteError; } case kTfLiteUInt8: { @@ -390,13 +396,15 @@ TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) { Softmax4DQuantized(input, output, params, data); return kTfLiteOk; } - context->ReportError(context, - "Only 2D and 4D tensors supported currently."); + context->ReportError( + context, "Only 2D and 4D tensors supported currently, got %dD.", + NumDimensions(input)); return kTfLiteError; } default: - context->ReportError(context, - "Only float32 and uint8_t supported currently."); + context->ReportError( + context, "Only float32 and uint8_t supported currently, got %d.", + input->type); return kTfLiteError; } } @@ -411,7 +419,8 @@ TfLiteStatus LogSoftmaxEval(TfLiteContext* context, TfLiteNode* node) { GetTensorData(output), GetTensorDims(output)); return kTfLiteOk; default: - context->ReportError(context, "Only float32 supported currently."); + context->ReportError(context, "Only float32 supported currently., got %d", + input->type); return kTfLiteError; } } @@ -422,7 +431,8 @@ TfLiteStatus PreluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* output = GetOutput(context, node, 0); if (input->type != kTfLiteFloat32) { - context->ReportError(context, "Only float32 supported currently."); + context->ReportError(context, "Only float32 supported currently, got %d.", + input->type); return kTfLiteError; } TF_LITE_ENSURE_EQ(context, input->dims->size, 4); diff --git a/tensorflow/contrib/lite/kernels/arg_max.cc b/tensorflow/contrib/lite/kernels/arg_max.cc index 738d475..26f57e8 100644 --- a/tensorflow/contrib/lite/kernels/arg_max.cc +++ b/tensorflow/contrib/lite/kernels/arg_max.cc @@ -52,7 +52,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { output->type = kTfLiteInt64; break; default: - context->ReportError(context, "Unknown index output data type"); + context->ReportError(context, "Unknown index output data type: %d", + params->output_type); return kTfLiteError; } @@ -64,7 +65,10 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { break; default: - context->ReportError(context, "Only float32 and int types are supported"); + context->ReportError( + context, + "Unkonwn input type: %d, only float32 and int types are supported", + input->type); return kTfLiteError; } diff --git a/tensorflow/contrib/lite/kernels/basic_rnn.cc b/tensorflow/contrib/lite/kernels/basic_rnn.cc index 0907547..7dc0c56 100644 --- a/tensorflow/contrib/lite/kernels/basic_rnn.cc +++ b/tensorflow/contrib/lite/kernels/basic_rnn.cc @@ -210,7 +210,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { hidden_state, output); } default: - context->ReportError(context, "Type not currently supported."); + context->ReportError(context, "Type %d not currently supported.", + input_weights->type); return kTfLiteError; } return kTfLiteOk; diff --git a/tensorflow/contrib/lite/kernels/batch_to_space_nd.cc b/tensorflow/contrib/lite/kernels/batch_to_space_nd.cc index 262e1ae..c8cee88 100644 --- a/tensorflow/contrib/lite/kernels/batch_to_space_nd.cc +++ b/tensorflow/contrib/lite/kernels/batch_to_space_nd.cc @@ -163,8 +163,9 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } break; default: - context->ReportError(context, - "Type is currently not supported by BatchToSpace."); + context->ReportError( + context, "Type %d is currently not supported by BatchToSpace.", + op_context.input->type); return kTfLiteError; } #undef TF_LITE_BATCH_TO_SPACE_ND diff --git a/tensorflow/contrib/lite/kernels/comparisons.cc b/tensorflow/contrib/lite/kernels/comparisons.cc index b948334..3b81062 100644 --- a/tensorflow/contrib/lite/kernels/comparisons.cc +++ b/tensorflow/contrib/lite/kernels/comparisons.cc @@ -85,7 +85,8 @@ TfLiteStatus GreaterEval(TfLiteContext* context, TfLiteNode* node) { break; default: context->ReportError(context, - "Does not support type other than float|int"); + "Does not support type %d, requires float|int", + input1->type); return kTfLiteError; } return kTfLiteOk; @@ -109,7 +110,8 @@ TfLiteStatus GreaterEqualEval(TfLiteContext* context, TfLiteNode* node) { break; default: context->ReportError(context, - "Does not support type other than float|int"); + "Does not support type %d, requires float|int", + input1->type); return kTfLiteError; } return kTfLiteOk; @@ -133,7 +135,8 @@ TfLiteStatus LessEval(TfLiteContext* context, TfLiteNode* node) { break; default: context->ReportError(context, - "Does not support type other than float|int"); + "Does not support type %d, requires float|int", + input1->type); return kTfLiteError; } return kTfLiteOk; @@ -157,7 +160,8 @@ TfLiteStatus LessEqualEval(TfLiteContext* context, TfLiteNode* node) { break; default: context->ReportError(context, - "Does not support type other than float|int"); + "Does not support type %d, requires float|int", + input1->type); return kTfLiteError; } return kTfLiteOk; diff --git a/tensorflow/contrib/lite/kernels/conv.cc b/tensorflow/contrib/lite/kernels/conv.cc index 2b7e455..0b35a22 100644 --- a/tensorflow/contrib/lite/kernels/conv.cc +++ b/tensorflow/contrib/lite/kernels/conv.cc @@ -488,7 +488,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { bias, im2col, hwcn_weights, output); break; default: - context->ReportError(context, "Type not currently supported."); + context->ReportError(context, "Type %d not currently supported.", + input->type); return kTfLiteError; } return kTfLiteOk; diff --git a/tensorflow/contrib/lite/kernels/depthwise_conv.cc b/tensorflow/contrib/lite/kernels/depthwise_conv.cc index 3ad8d7d..abb2549 100644 --- a/tensorflow/contrib/lite/kernels/depthwise_conv.cc +++ b/tensorflow/contrib/lite/kernels/depthwise_conv.cc @@ -247,7 +247,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { bias, output); break; default: - context->ReportError(context, "Type not currently supported."); + context->ReportError(context, "Type %d not currently supported.", + input->type); return kTfLiteError; } return kTfLiteOk; diff --git a/tensorflow/contrib/lite/kernels/div.cc b/tensorflow/contrib/lite/kernels/div.cc index e52e4fe..d264821 100644 --- a/tensorflow/contrib/lite/kernels/div.cc +++ b/tensorflow/contrib/lite/kernels/div.cc @@ -118,8 +118,9 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { if (output->type == kTfLiteFloat32) { EvalFloat(context, node, params, data, input1, input2, output); } else { - context->ReportError(context, - "Div only supports FLOAT32 and quantized UINT8 now."); + context->ReportError( + context, "Div only supports FLOAT32 and quantized UINT8 now, got %d.", + output->type); return kTfLiteError; } diff --git a/tensorflow/contrib/lite/kernels/elementwise.cc b/tensorflow/contrib/lite/kernels/elementwise.cc index b719a08..0bd5046 100644 --- a/tensorflow/contrib/lite/kernels/elementwise.cc +++ b/tensorflow/contrib/lite/kernels/elementwise.cc @@ -48,7 +48,8 @@ TfLiteStatus SinEval(TfLiteContext* context, TfLiteNode* node) { return kTfLiteOk; } default: { - context->ReportError(context, "Only float32 is supported currently"); + context->ReportError(context, "Input type is %d, requires float32", + input->type); return kTfLiteError; } } diff --git a/tensorflow/contrib/lite/kernels/fully_connected.cc b/tensorflow/contrib/lite/kernels/fully_connected.cc index a486b81..3374923 100644 --- a/tensorflow/contrib/lite/kernels/fully_connected.cc +++ b/tensorflow/contrib/lite/kernels/fully_connected.cc @@ -357,7 +357,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { return EvalQuantized(context, node, params, data, input, filter, bias, output); default: - context->ReportError(context, "Type not currently supported."); + context->ReportError(context, "Type %d not currently supported.", + filter->type); return kTfLiteError; } return kTfLiteOk; diff --git a/tensorflow/contrib/lite/kernels/gather.cc b/tensorflow/contrib/lite/kernels/gather.cc index c452d3e..6a23414 100644 --- a/tensorflow/contrib/lite/kernels/gather.cc +++ b/tensorflow/contrib/lite/kernels/gather.cc @@ -59,8 +59,9 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1); } break; default: - context->ReportError(context, - "Only float32 and string types are supported"); + context->ReportError( + context, "Only float32 and string types are supported, got %d", + input->type); return kTfLiteError; } const int num_dimensions = diff --git a/tensorflow/contrib/lite/kernels/l2norm.cc b/tensorflow/contrib/lite/kernels/l2norm.cc index 7cea63d..3205c1c 100644 --- a/tensorflow/contrib/lite/kernels/l2norm.cc +++ b/tensorflow/contrib/lite/kernels/l2norm.cc @@ -94,7 +94,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } #undef TF_LITE_L2NORM } else { - context->ReportError(context, "Inputs and outputs not all float types."); + context->ReportError(context, "Output type is %d, requires float.", + output->type); return kTfLiteError; } diff --git a/tensorflow/contrib/lite/kernels/local_response_norm.cc b/tensorflow/contrib/lite/kernels/local_response_norm.cc index c15a517..36dca29 100644 --- a/tensorflow/contrib/lite/kernels/local_response_norm.cc +++ b/tensorflow/contrib/lite/kernels/local_response_norm.cc @@ -77,7 +77,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } #undef TF_LITE_LOCAL_RESPONSE_NORM } else { - context->ReportError(context, "Inputs and outputs not all float types."); + context->ReportError(context, "Output type is %d, requires float.", + output->type); return kTfLiteError; } diff --git a/tensorflow/contrib/lite/kernels/mul.cc b/tensorflow/contrib/lite/kernels/mul.cc index 6c4c3a1..62f4e94 100644 --- a/tensorflow/contrib/lite/kernels/mul.cc +++ b/tensorflow/contrib/lite/kernels/mul.cc @@ -159,8 +159,9 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { EvalQuantized(context, node, params, data, input1, input2, output); } else { - context->ReportError(context, - "Mul only supports FLOAT32 and quantized UINT8 now."); + context->ReportError( + context, "Mul only supports FLOAT32 and quantized UINT8 now, got %d.", + output->type); return kTfLiteError; } diff --git a/tensorflow/contrib/lite/kernels/neg.cc b/tensorflow/contrib/lite/kernels/neg.cc index b8b53f3..4124c05 100644 --- a/tensorflow/contrib/lite/kernels/neg.cc +++ b/tensorflow/contrib/lite/kernels/neg.cc @@ -59,7 +59,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { break; default: context->ReportError( - context, "Neg only currently supports int64, int32, and float32.", + context, + "Neg only currently supports int64, int32, and float32, got %d.", input->type); return kTfLiteError; } diff --git a/tensorflow/contrib/lite/kernels/pad.cc b/tensorflow/contrib/lite/kernels/pad.cc index ecac2dd..83668cb 100644 --- a/tensorflow/contrib/lite/kernels/pad.cc +++ b/tensorflow/contrib/lite/kernels/pad.cc @@ -199,7 +199,9 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } } break; default: - context->ReportError(context, "Type is currently not supported by Pad."); + context->ReportError(context, + "Type %d is currently not supported by Pad.", + op_context.input->type); return kTfLiteError; } #undef TF_LITE_PAD diff --git a/tensorflow/contrib/lite/kernels/pooling.cc b/tensorflow/contrib/lite/kernels/pooling.cc index 645d9f4..311e9b8 100644 --- a/tensorflow/contrib/lite/kernels/pooling.cc +++ b/tensorflow/contrib/lite/kernels/pooling.cc @@ -246,7 +246,8 @@ TfLiteStatus AverageEval(TfLiteContext* context, TfLiteNode* node) { output); break; default: - context->ReportError(context, "Type not currently supported."); + context->ReportError(context, "Type %d not currently supported.", + input->type); return kTfLiteError; } return kTfLiteOk; @@ -267,7 +268,8 @@ TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) { MaxEvalQuantized(context, node, params, data, input, output); break; default: - context->ReportError(context, "Type not currently supported."); + context->ReportError(context, "Type %d not currently supported.", + input->type); return kTfLiteError; } return kTfLiteOk; @@ -288,7 +290,8 @@ TfLiteStatus L2Eval(TfLiteContext* context, TfLiteNode* node) { // We don't have a quantized implementation, so just fall through to the // 'default' case. default: - context->ReportError(context, "Type not currently supported."); + context->ReportError(context, "Type %d not currently supported.", + input->type); return kTfLiteError; } return kTfLiteOk; diff --git a/tensorflow/contrib/lite/kernels/resize_bilinear.cc b/tensorflow/contrib/lite/kernels/resize_bilinear.cc index e4bd0f5..f2092ea 100644 --- a/tensorflow/contrib/lite/kernels/resize_bilinear.cc +++ b/tensorflow/contrib/lite/kernels/resize_bilinear.cc @@ -104,7 +104,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } #undef TF_LITE_RESIZE_BILINEAR } else { - context->ReportError(context, "Inputs and outputs not all float types."); + context->ReportError(context, "Output type is %d, requires float.", + output->type); return kTfLiteError; } diff --git a/tensorflow/contrib/lite/kernels/select.cc b/tensorflow/contrib/lite/kernels/select.cc index 9bc8a1a..9b6cee3 100644 --- a/tensorflow/contrib/lite/kernels/select.cc +++ b/tensorflow/contrib/lite/kernels/select.cc @@ -97,7 +97,9 @@ TfLiteStatus SelectEval(TfLiteContext* context, TfLiteNode* node) { break; \ default: \ context->ReportError(context, \ - "Does not support type other than bool|float|int"); \ + "Does not support type other than bool|float|int, " \ + "got %d", \ + type); \ return kTfLiteError; \ } diff --git a/tensorflow/contrib/lite/kernels/slice.cc b/tensorflow/contrib/lite/kernels/slice.cc index b28934e..6a20e80 100644 --- a/tensorflow/contrib/lite/kernels/slice.cc +++ b/tensorflow/contrib/lite/kernels/slice.cc @@ -85,7 +85,8 @@ TfLiteStatus ResizeOutputShape(TfLiteContext* context, TF_LITE_ENSURE_STATUS(CalculateOutputShapeVector( context, input, begin, size, &output_shape_vector)); } else { - context->ReportError(context, "Type is currently not supported by Slice."); + context->ReportError( + context, "Type %d is currently not supported by Slice.", begin->type); return kTfLiteError; } @@ -148,7 +149,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { GetBeginAndSizeVectors(NumDimensions(input), begin, size, &begins, &sizes); } else { - context->ReportError(context, "Type is currently not supported by Slice."); + context->ReportError( + context, "Type %d is currently not supported by Slice.", begin->type); return kTfLiteError; } @@ -179,8 +181,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TF_LITE_SLICE(bool); break; default: - context->ReportError(context, - "Type is currently not supported by Slice."); + context->ReportError( + context, "Type %d is currently not supported by Slice.", input->type); return kTfLiteError; } #undef TF_LITE_SLICE diff --git a/tensorflow/contrib/lite/kernels/space_to_batch_nd.cc b/tensorflow/contrib/lite/kernels/space_to_batch_nd.cc index 1e35869..c926959 100644 --- a/tensorflow/contrib/lite/kernels/space_to_batch_nd.cc +++ b/tensorflow/contrib/lite/kernels/space_to_batch_nd.cc @@ -152,8 +152,9 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } break; default: - context->ReportError(context, - "Type is currently not supported by SpaceToBatch."); + context->ReportError( + context, "Type %d is currently not supported by SpaceToBatch.", + op_context.input->type); return kTfLiteError; } #undef TF_LITE_SPACE_TO_BATCH_ND diff --git a/tensorflow/contrib/lite/kernels/space_to_depth.cc b/tensorflow/contrib/lite/kernels/space_to_depth.cc index aafce89..9dbe9b9 100644 --- a/tensorflow/contrib/lite/kernels/space_to_depth.cc +++ b/tensorflow/contrib/lite/kernels/space_to_depth.cc @@ -113,7 +113,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } break; default: - context->ReportError(context, "Type not currently supported."); + context->ReportError(context, "Type %d not currently supported.", + input->type); return kTfLiteError; } #undef TF_LITE_SPACE_TO_DEPTH diff --git a/tensorflow/contrib/lite/kernels/split.cc b/tensorflow/contrib/lite/kernels/split.cc index c6b94c2..43387df 100644 --- a/tensorflow/contrib/lite/kernels/split.cc +++ b/tensorflow/contrib/lite/kernels/split.cc @@ -138,8 +138,9 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { break; } default: - context->ReportError(context, - "Only float32 and uint8 are currently supported."); + context->ReportError( + context, "Only float32 and uint8 are currently supported, got %d.", + op_context.input->type); return kTfLiteError; } #undef TF_LITE_SPLIT diff --git a/tensorflow/contrib/lite/kernels/strided_slice.cc b/tensorflow/contrib/lite/kernels/strided_slice.cc index 9417be3..725dd81 100644 --- a/tensorflow/contrib/lite/kernels/strided_slice.cc +++ b/tensorflow/contrib/lite/kernels/strided_slice.cc @@ -235,8 +235,9 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { break; default: context->ReportError(context, - "Type is currently not supported " - "by StridedSlice."); + "Type %d is currently not supported " + "by StridedSlice.", + op_context.input->type); return kTfLiteError; } #undef TF_LITE_STRIDED_SLICE diff --git a/tensorflow/contrib/lite/kernels/sub.cc b/tensorflow/contrib/lite/kernels/sub.cc index 9531ecb..d788159 100644 --- a/tensorflow/contrib/lite/kernels/sub.cc +++ b/tensorflow/contrib/lite/kernels/sub.cc @@ -174,8 +174,9 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { EvalQuantized(context, node, params, data, input1, input2, output); } else { - context->ReportError(context, - "Inputs and outputs not all float|uint8 types."); + context->ReportError( + context, "output type %d is not support, requires float|uint8 types.", + output->type); return kTfLiteError; } diff --git a/tensorflow/contrib/lite/kernels/topk_v2.cc b/tensorflow/contrib/lite/kernels/topk_v2.cc index 0feb42b..fb0e49c 100644 --- a/tensorflow/contrib/lite/kernels/topk_v2.cc +++ b/tensorflow/contrib/lite/kernels/topk_v2.cc @@ -214,7 +214,9 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { output_values->data.i64); break; default: - context->ReportError(context, "Type is currently not supported by TopK."); + context->ReportError(context, + "Type %d is currently not supported by TopK.", + output_values->type); return kTfLiteError; } diff --git a/tensorflow/contrib/lite/kernels/transpose.cc b/tensorflow/contrib/lite/kernels/transpose.cc index 8316a23..800b056 100644 --- a/tensorflow/contrib/lite/kernels/transpose.cc +++ b/tensorflow/contrib/lite/kernels/transpose.cc @@ -136,7 +136,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { break; default: context->ReportError(context, - "Type is currently not supported by Transpose."); + "Type %d is currently not supported by Transpose.", + op_context.input->type); return kTfLiteError; } #undef TF_LITE_TRANSPOSE diff --git a/tensorflow/contrib/lite/kernels/unidirectional_sequence_rnn.cc b/tensorflow/contrib/lite/kernels/unidirectional_sequence_rnn.cc index 22c80df..8429dba 100644 --- a/tensorflow/contrib/lite/kernels/unidirectional_sequence_rnn.cc +++ b/tensorflow/contrib/lite/kernels/unidirectional_sequence_rnn.cc @@ -283,7 +283,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { hidden_state, output); } default: - context->ReportError(context, "Type not currently supported."); + context->ReportError(context, "Type %d not currently supported.", + input_weights->type); return kTfLiteError; } return kTfLiteOk; -- 2.7.4