From 52e2698ac969a0f82c6ce901f80f04818ca8ac4e Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Fri, 11 May 2018 19:38:48 -0700 Subject: [PATCH] Making GetInput from kernel_util.h return a pointer to const data. PiperOrigin-RevId: 196340200 --- tensorflow/contrib/lite/g3doc/custom_operators.md | 4 +- tensorflow/contrib/lite/kernels/activations.cc | 40 ++++++------- tensorflow/contrib/lite/kernels/add.cc | 12 ++-- tensorflow/contrib/lite/kernels/arg_max.cc | 8 +-- .../contrib/lite/kernels/audio_spectrogram.cc | 4 +- tensorflow/contrib/lite/kernels/basic_rnn.cc | 16 +++--- .../contrib/lite/kernels/batch_to_space_nd.cc | 6 +- .../lite/kernels/bidirectional_sequence_lstm.cc | 65 +++++++++++----------- tensorflow/contrib/lite/kernels/cast.cc | 4 +- tensorflow/contrib/lite/kernels/comparisons.cc | 20 +++---- tensorflow/contrib/lite/kernels/depthwise_conv.cc | 20 +++---- tensorflow/contrib/lite/kernels/dequantize.cc | 2 +- tensorflow/contrib/lite/kernels/div.cc | 12 ++-- tensorflow/contrib/lite/kernels/elementwise.cc | 8 +-- .../contrib/lite/kernels/embedding_lookup.cc | 8 +-- .../lite/kernels/embedding_lookup_sparse.cc | 20 +++---- tensorflow/contrib/lite/kernels/exp.cc | 2 +- tensorflow/contrib/lite/kernels/floor.cc | 4 +- tensorflow/contrib/lite/kernels/fully_connected.cc | 27 +++++---- tensorflow/contrib/lite/kernels/gather.cc | 8 +-- .../contrib/lite/kernels/hashtable_lookup.cc | 12 ++-- .../kernels/internal/reference/reference_ops.h | 10 ++-- tensorflow/contrib/lite/kernels/internal/tensor.h | 28 ++++++++++ tensorflow/contrib/lite/kernels/kernel_util.cc | 15 +++-- tensorflow/contrib/lite/kernels/kernel_util.h | 19 ++++--- tensorflow/contrib/lite/kernels/l2norm.cc | 4 +- .../contrib/lite/kernels/local_response_norm.cc | 4 +- tensorflow/contrib/lite/kernels/lsh_projection.cc | 12 ++-- tensorflow/contrib/lite/kernels/lstm.cc | 40 ++++++------- tensorflow/contrib/lite/kernels/maximum_minimum.cc | 4 +- tensorflow/contrib/lite/kernels/mean.cc | 4 +- tensorflow/contrib/lite/kernels/mfcc.cc | 8 +-- tensorflow/contrib/lite/kernels/mul.cc | 12 ++-- tensorflow/contrib/lite/kernels/neg.cc | 4 +- tensorflow/contrib/lite/kernels/pad.cc | 4 +- tensorflow/contrib/lite/kernels/pooling.cc | 22 ++++---- tensorflow/contrib/lite/kernels/reshape.cc | 4 +- tensorflow/contrib/lite/kernels/resize_bilinear.cc | 14 +++-- tensorflow/contrib/lite/kernels/select.cc | 12 ++-- tensorflow/contrib/lite/kernels/slice.cc | 28 +++++----- .../contrib/lite/kernels/space_to_batch_nd.cc | 6 +- tensorflow/contrib/lite/kernels/space_to_depth.cc | 4 +- tensorflow/contrib/lite/kernels/split.cc | 8 +-- tensorflow/contrib/lite/kernels/squeeze.cc | 11 ++-- tensorflow/contrib/lite/kernels/strided_slice.cc | 8 +-- tensorflow/contrib/lite/kernels/sub.cc | 12 ++-- tensorflow/contrib/lite/kernels/svdf.cc | 12 ++-- tensorflow/contrib/lite/kernels/topk_v2.cc | 12 ++-- tensorflow/contrib/lite/kernels/transpose.cc | 4 +- .../lite/kernels/unidirectional_sequence_lstm.cc | 40 ++++++------- .../lite/kernels/unidirectional_sequence_rnn.cc | 16 +++--- .../lite/models/smartreply/ops/extract_feature.cc | 4 +- 52 files changed, 365 insertions(+), 322 deletions(-) diff --git a/tensorflow/contrib/lite/g3doc/custom_operators.md b/tensorflow/contrib/lite/g3doc/custom_operators.md index d7cc854..972e57f 100644 --- a/tensorflow/contrib/lite/g3doc/custom_operators.md +++ b/tensorflow/contrib/lite/g3doc/custom_operators.md @@ -39,7 +39,7 @@ TfLiteStatus SinPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* input = GetInput(context, node, 0); + const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); int num_dims = NumDimensions(input); @@ -54,7 +54,7 @@ TfLiteStatus SinPrepare(TfLiteContext* context, TfLiteNode* node) { TfLiteStatus SinEval(TfLiteContext* context, TfLiteNode* node) { using namespace tflite; - TfLiteTensor* input = GetInput(context, node,0); + const TfLiteTensor* input = GetInput(context, node,0); TfLiteTensor* output = GetOutput(context, node,0); float* input_data = input->data.f; diff --git a/tensorflow/contrib/lite/kernels/activations.cc b/tensorflow/contrib/lite/kernels/activations.cc index 39a54c9..4972159 100644 --- a/tensorflow/contrib/lite/kernels/activations.cc +++ b/tensorflow/contrib/lite/kernels/activations.cc @@ -55,7 +55,7 @@ void Free(TfLiteContext* context, void* buffer) { TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* input = GetInput(context, node, 0); + const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_EQ(context, input->type, output->type); @@ -68,7 +68,7 @@ TfLiteStatus TanhPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* input = GetInput(context, node, 0); + const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_EQ(context, input->type, output->type); @@ -95,7 +95,7 @@ TfLiteStatus SigmoidPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* input = GetInput(context, node, 0); + const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_EQ(context, input->type, output->type); @@ -126,7 +126,7 @@ TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* input = GetInput(context, node, 0); + const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_EQ(context, input->type, output->type); @@ -153,9 +153,9 @@ TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { TfLiteStatus PreluPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* input = GetInput(context, node, 0); + const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); - TfLiteTensor* alpha = GetInput(context, node, 1); + const TfLiteTensor* alpha = GetInput(context, node, 1); output->type = input->type; @@ -179,7 +179,7 @@ TfLiteStatus PreluPrepare(TfLiteContext* context, TfLiteNode* node) { } TfLiteStatus ReluEval(TfLiteContext* context, TfLiteNode* node) { - TfLiteTensor* input = GetInput(context, node, 0); + const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { @@ -197,7 +197,7 @@ TfLiteStatus ReluEval(TfLiteContext* context, TfLiteNode* node) { } TfLiteStatus Relu1Eval(TfLiteContext* context, TfLiteNode* node) { - TfLiteTensor* input = GetInput(context, node, 0); + const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { @@ -217,7 +217,7 @@ TfLiteStatus Relu1Eval(TfLiteContext* context, TfLiteNode* node) { } TfLiteStatus Relu6Eval(TfLiteContext* context, TfLiteNode* node) { - TfLiteTensor* input = GetInput(context, node, 0); + const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { @@ -236,7 +236,7 @@ TfLiteStatus Relu6Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteStatus TanhEval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast(node->user_data); - TfLiteTensor* input = GetInput(context, node, 0); + const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { @@ -265,7 +265,7 @@ TfLiteStatus TanhEval(TfLiteContext* context, TfLiteNode* node) { TfLiteStatus SigmoidEval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast(node->user_data); - TfLiteTensor* input = GetInput(context, node, 0); + const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { @@ -292,7 +292,7 @@ TfLiteStatus SigmoidEval(TfLiteContext* context, TfLiteNode* node) { } // Takes a 2D tensor and perform softmax along the second dimension. -void Softmax2DFloat(TfLiteTensor* input, TfLiteTensor* output, +void Softmax2DFloat(const TfLiteTensor* input, TfLiteTensor* output, TfLiteSoftmaxParams* params) { const int batch_size = input->dims->data[0]; const int input_size = input->dims->data[1]; @@ -327,7 +327,7 @@ void Softmax2DFloat(TfLiteTensor* input, TfLiteTensor* output, } } -void Softmax2DQuantized(TfLiteTensor* input, TfLiteTensor* output, +void Softmax2DQuantized(const TfLiteTensor* input, TfLiteTensor* output, TfLiteSoftmaxParams* params, OpData* data) { // TODO(ahentz): this is arguably a dirty trick. Since the implementation // always traverses the last dimension of a 4D tensor, we will pretend our 2D @@ -343,14 +343,14 @@ void Softmax2DQuantized(TfLiteTensor* input, TfLiteTensor* output, } // Takes a 4D tensor and perform softmax along the forth dimension. -void Softmax4DFloat(TfLiteTensor* input, TfLiteTensor* output, +void Softmax4DFloat(const TfLiteTensor* input, TfLiteTensor* output, TfLiteSoftmaxParams* params) { optimized_ops::Softmax(GetTensorData(input), GetTensorDims(input), params->beta, GetTensorData(output), GetTensorDims(output)); } -void Softmax4DQuantized(TfLiteTensor* input, TfLiteTensor* output, +void Softmax4DQuantized(const TfLiteTensor* input, TfLiteTensor* output, TfLiteSoftmaxParams* params, OpData* data) { optimized_ops::Softmax(GetTensorData(input), GetTensorDims(input), data->input_multiplier, data->input_left_shift, @@ -362,7 +362,7 @@ TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast(node->builtin_data); OpData* data = reinterpret_cast(node->user_data); - TfLiteTensor* input = GetInput(context, node, 0); + const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); // TODO(ahentz): consider an implementation that works for many (all?) @@ -402,7 +402,7 @@ TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) { } TfLiteStatus LogSoftmaxEval(TfLiteContext* context, TfLiteNode* node) { - TfLiteTensor* input = GetInput(context, node, 0); + const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: @@ -417,9 +417,9 @@ TfLiteStatus LogSoftmaxEval(TfLiteContext* context, TfLiteNode* node) { } TfLiteStatus PreluEval(TfLiteContext* context, TfLiteNode* node) { - TfLiteTensor* input = GetInput(context, node, 0); - TfLiteTensor* alpha = GetInput(context, node, 1); - TfLiteTensor* output = GetOutput(context, node, 0); + const TfLiteTensor* input = GetInput(context, node, 0); + const TfLiteTensor* alpha = GetInput(context, node, 1); + const TfLiteTensor* output = GetOutput(context, node, 0); if (input->type != kTfLiteFloat32) { context->ReportError(context, "Only float32 supported currently."); diff --git a/tensorflow/contrib/lite/kernels/add.cc b/tensorflow/contrib/lite/kernels/add.cc index e0aa070..7ca1e35 100644 --- a/tensorflow/contrib/lite/kernels/add.cc +++ b/tensorflow/contrib/lite/kernels/add.cc @@ -57,8 +57,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); - TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); + const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); + const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, input1->type, input2->type); @@ -80,7 +80,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { template void EvalAddFloat(TfLiteContext* context, TfLiteNode* node, TfLiteAddParams* params, const OpData* data, - TfLiteTensor* input1, TfLiteTensor* input2, + const TfLiteTensor* input1, const TfLiteTensor* input2, TfLiteTensor* output) { float output_activation_min, output_activation_max; CalculateActivationRangeFloat(params->activation, &output_activation_min, @@ -109,7 +109,7 @@ void EvalAddFloat(TfLiteContext* context, TfLiteNode* node, template void EvalAddQuantized(TfLiteContext* context, TfLiteNode* node, TfLiteAddParams* params, const OpData* data, - TfLiteTensor* input1, TfLiteTensor* input2, + const TfLiteTensor* input1, const TfLiteTensor* input2, TfLiteTensor* output) { auto input1_offset = -input1->params.zero_point; auto input2_offset = -input2->params.zero_point; @@ -164,8 +164,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast(node->builtin_data); OpData* data = reinterpret_cast(node->user_data); - TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); - TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); + const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); + const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (output->type == kTfLiteFloat32) { diff --git a/tensorflow/contrib/lite/kernels/arg_max.cc b/tensorflow/contrib/lite/kernels/arg_max.cc index a2c5e4c..566d370 100644 --- a/tensorflow/contrib/lite/kernels/arg_max.cc +++ b/tensorflow/contrib/lite/kernels/arg_max.cc @@ -33,8 +33,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* input = GetInput(context, node, kInputTensor); - TfLiteTensor* axis = GetInput(context, node, kAxis); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* axis = GetInput(context, node, kAxis); // Make sure the axis is only 1 dimension. TF_LITE_ENSURE_EQ(context, NumElements(axis), 1); @@ -79,8 +79,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { // The current impl actually ignores the axis argument. // Only determine the index of the maximum value in the last dimension. TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TfLiteTensor* input = GetInput(context, node, kInputTensor); - TfLiteTensor* axis = GetInput(context, node, kAxis); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* axis = GetInput(context, node, kAxis); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); #define TF_LITE_ARG_MAX(data_type, axis_type, output_type) \ diff --git a/tensorflow/contrib/lite/kernels/audio_spectrogram.cc b/tensorflow/contrib/lite/kernels/audio_spectrogram.cc index 602f388..91d8dd3 100644 --- a/tensorflow/contrib/lite/kernels/audio_spectrogram.cc +++ b/tensorflow/contrib/lite/kernels/audio_spectrogram.cc @@ -72,7 +72,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(input), 2); @@ -102,7 +102,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast(node->user_data); - TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE(context, params->spectrogram->Initialize(params->window_size, diff --git a/tensorflow/contrib/lite/kernels/basic_rnn.cc b/tensorflow/contrib/lite/kernels/basic_rnn.cc index a54ab8d..d812cd7 100644 --- a/tensorflow/contrib/lite/kernels/basic_rnn.cc +++ b/tensorflow/contrib/lite/kernels/basic_rnn.cc @@ -49,11 +49,11 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, node->inputs->size, 4); TF_LITE_ENSURE_EQ(context, node->outputs->size, 2); - TfLiteTensor* input = GetInput(context, node, kInputTensor); - TfLiteTensor* input_weights = GetInput(context, node, kWeightsTensor); - TfLiteTensor* recurrent_weights = + const TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* input_weights = GetInput(context, node, kWeightsTensor); + const TfLiteTensor* recurrent_weights = GetInput(context, node, kRecurrentWeightsTensor); - TfLiteTensor* bias = GetInput(context, node, kBiasTensor); + const TfLiteTensor* bias = GetInput(context, node, kBiasTensor); // Check all the parameters of tensor match within themselves and match the // input configuration. @@ -186,11 +186,11 @@ TfLiteStatus EvalQuantized(const TfLiteTensor* input, TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast(node->builtin_data); - TfLiteTensor* input = GetInput(context, node, kInputTensor); - TfLiteTensor* input_weights = GetInput(context, node, kWeightsTensor); - TfLiteTensor* recurrent_weights = + const TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* input_weights = GetInput(context, node, kWeightsTensor); + const TfLiteTensor* recurrent_weights = GetInput(context, node, kRecurrentWeightsTensor); - TfLiteTensor* bias = GetInput(context, node, kBiasTensor); + const TfLiteTensor* bias = GetInput(context, node, kBiasTensor); TfLiteTensor* hidden_state = GetOutput(context, node, kHiddenStateTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); diff --git a/tensorflow/contrib/lite/kernels/batch_to_space_nd.cc b/tensorflow/contrib/lite/kernels/batch_to_space_nd.cc index bd40575..262e1ae 100644 --- a/tensorflow/contrib/lite/kernels/batch_to_space_nd.cc +++ b/tensorflow/contrib/lite/kernels/batch_to_space_nd.cc @@ -40,9 +40,9 @@ struct BatchToSpaceNDContext { crops = GetInput(context, node, 2); output = GetOutput(context, node, 0); } - TfLiteTensor* input; - TfLiteTensor* block_shape; - TfLiteTensor* crops; + const TfLiteTensor* input; + const TfLiteTensor* block_shape; + const TfLiteTensor* crops; TfLiteTensor* output; }; diff --git a/tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm.cc b/tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm.cc index a35ba23..1cd4884 100644 --- a/tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm.cc +++ b/tensorflow/contrib/lite/kernels/bidirectional_sequence_lstm.cc @@ -143,13 +143,13 @@ TfLiteStatus CheckLstmTensorDimensions( TF_LITE_ENSURE_EQ(context, input_to_input_weights->dims->data[1], n_input); } - TfLiteTensor* input_to_forget_weights = + const TfLiteTensor* input_to_forget_weights = GetInput(context, node, input_to_forget_weights_tensor); TF_LITE_ENSURE_EQ(context, input_to_forget_weights->dims->size, 2); TF_LITE_ENSURE_EQ(context, input_to_forget_weights->dims->data[0], n_cell); TF_LITE_ENSURE_EQ(context, input_to_forget_weights->dims->data[1], n_input); - TfLiteTensor* input_to_cell_weights = + const TfLiteTensor* input_to_cell_weights = GetInput(context, node, input_to_cell_weights_tensor); TF_LITE_ENSURE_EQ(context, input_to_cell_weights->dims->size, 2); TF_LITE_ENSURE_EQ(context, input_to_cell_weights->dims->data[0], n_cell); @@ -165,7 +165,7 @@ TfLiteStatus CheckLstmTensorDimensions( n_output); } - TfLiteTensor* recurrent_to_forget_weights = + const TfLiteTensor* recurrent_to_forget_weights = GetInput(context, node, recurrent_to_forget_weights_tensor); TF_LITE_ENSURE_EQ(context, recurrent_to_forget_weights->dims->size, 2); TF_LITE_ENSURE_EQ(context, recurrent_to_forget_weights->dims->data[0], @@ -173,7 +173,7 @@ TfLiteStatus CheckLstmTensorDimensions( TF_LITE_ENSURE_EQ(context, recurrent_to_forget_weights->dims->data[1], n_output); - TfLiteTensor* recurrent_to_cell_weights = + const TfLiteTensor* recurrent_to_cell_weights = GetInput(context, node, recurrent_to_cell_weights_tensor); TF_LITE_ENSURE_EQ(context, recurrent_to_cell_weights->dims->size, 2); TF_LITE_ENSURE_EQ(context, recurrent_to_cell_weights->dims->data[0], n_cell); @@ -231,16 +231,17 @@ TfLiteStatus CheckLstmTensorDimensions( TF_LITE_ENSURE_EQ(context, input_gate_bias->dims->data[0], n_cell); } - TfLiteTensor* forget_gate_bias = + const TfLiteTensor* forget_gate_bias = GetInput(context, node, forget_gate_bias_tensor); TF_LITE_ENSURE_EQ(context, forget_gate_bias->dims->size, 1); TF_LITE_ENSURE_EQ(context, forget_gate_bias->dims->data[0], n_cell); - TfLiteTensor* cell_bias = GetInput(context, node, cell_gate_bias_tensor); + const TfLiteTensor* cell_bias = + GetInput(context, node, cell_gate_bias_tensor); TF_LITE_ENSURE_EQ(context, cell_bias->dims->size, 1); TF_LITE_ENSURE_EQ(context, cell_bias->dims->data[0], n_cell); - TfLiteTensor* output_gate_bias = + const TfLiteTensor* output_gate_bias = GetInput(context, node, output_gate_bias_tensor); TF_LITE_ENSURE_EQ(context, output_gate_bias->dims->size, 1); TF_LITE_ENSURE_EQ(context, output_gate_bias->dims->data[0], n_cell); @@ -312,20 +313,20 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { // Inferring batch size, number of outputs and sequence length and // number of cells from the input tensors. - TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); TF_LITE_ENSURE(context, input->dims->size > 1); const int max_time = input->dims->data[0]; const int n_batch = input->dims->data[1]; const int n_input = input->dims->data[2]; - TfLiteTensor* fw_input_to_output_weights = + const TfLiteTensor* fw_input_to_output_weights = GetInput(context, node, kFwInputToOutputWeightsTensor); const int n_fw_cell = fw_input_to_output_weights->dims->data[0]; TF_LITE_ENSURE_EQ(context, fw_input_to_output_weights->dims->size, 2); TF_LITE_ENSURE_EQ(context, fw_input_to_output_weights->dims->data[1], n_input); - TfLiteTensor* fw_recurrent_to_output_weights = + const TfLiteTensor* fw_recurrent_to_output_weights = GetInput(context, node, kFwRecurrentToOutputWeightsTensor); TF_LITE_ENSURE_EQ(context, fw_recurrent_to_output_weights->dims->size, 2); TF_LITE_ENSURE_EQ(context, fw_recurrent_to_output_weights->dims->data[0], @@ -388,14 +389,14 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, fw_scratch_buffer, fw_scratch_buffer_size)); // Same for the backward cell. - TfLiteTensor* bw_input_to_output_weights = + const TfLiteTensor* bw_input_to_output_weights = GetInput(context, node, kBwInputToOutputWeightsTensor); const int n_bw_cell = bw_input_to_output_weights->dims->data[0]; TF_LITE_ENSURE_EQ(context, bw_input_to_output_weights->dims->size, 2); TF_LITE_ENSURE_EQ(context, bw_input_to_output_weights->dims->data[1], n_input); - TfLiteTensor* bw_recurrent_to_output_weights = + const TfLiteTensor* bw_recurrent_to_output_weights = GetInput(context, node, kBwRecurrentToOutputWeightsTensor); TF_LITE_ENSURE_EQ(context, bw_recurrent_to_output_weights->dims->size, 2); TF_LITE_ENSURE_EQ(context, bw_recurrent_to_output_weights->dims->data[0], @@ -463,7 +464,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast(node->builtin_data); // Input tensor. - TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); const int max_time = input->dims->data[0]; const int n_batch = input->dims->data[1]; const int n_input = input->dims->data[2]; @@ -471,20 +472,20 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { // Tensors for the forward cell. TfLiteTensor* fw_input_to_input_weights = GetOptionalInputTensor(context, node, kFwInputToInputWeightsTensor); - TfLiteTensor* fw_input_to_forget_weights = + const TfLiteTensor* fw_input_to_forget_weights = GetInput(context, node, kFwInputToForgetWeightsTensor); - TfLiteTensor* fw_input_to_cell_weights = + const TfLiteTensor* fw_input_to_cell_weights = GetInput(context, node, kFwInputToCellWeightsTensor); - TfLiteTensor* fw_input_to_output_weights = + const TfLiteTensor* fw_input_to_output_weights = GetInput(context, node, kFwInputToOutputWeightsTensor); TfLiteTensor* fw_recurrent_to_input_weights = GetOptionalInputTensor(context, node, kFwRecurrentToInputWeightsTensor); - TfLiteTensor* fw_recurrent_to_forget_weights = + const TfLiteTensor* fw_recurrent_to_forget_weights = GetInput(context, node, kFwRecurrentToForgetWeightsTensor); - TfLiteTensor* fw_recurrent_to_cell_weights = + const TfLiteTensor* fw_recurrent_to_cell_weights = GetInput(context, node, kFwRecurrentToCellWeightsTensor); - TfLiteTensor* fw_recurrent_to_output_weights = + const TfLiteTensor* fw_recurrent_to_output_weights = GetInput(context, node, kFwRecurrentToOutputWeightsTensor); TfLiteTensor* fw_cell_to_input_weights = @@ -496,10 +497,11 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* fw_input_gate_bias = GetOptionalInputTensor(context, node, kFwInputGateBiasTensor); - TfLiteTensor* fw_forget_gate_bias = + const TfLiteTensor* fw_forget_gate_bias = GetInput(context, node, kFwForgetGateBiasTensor); - TfLiteTensor* fw_cell_bias = GetInput(context, node, kFwCellGateBiasTensor); - TfLiteTensor* fw_output_gate_bias = + const TfLiteTensor* fw_cell_bias = + GetInput(context, node, kFwCellGateBiasTensor); + const TfLiteTensor* fw_output_gate_bias = GetInput(context, node, kFwOutputGateBiasTensor); TfLiteTensor* fw_projection_weights = @@ -515,20 +517,20 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { // Tensors for the backward cell. TfLiteTensor* bw_input_to_input_weights = GetOptionalInputTensor(context, node, kBwInputToInputWeightsTensor); - TfLiteTensor* bw_input_to_forget_weights = + const TfLiteTensor* bw_input_to_forget_weights = GetInput(context, node, kBwInputToForgetWeightsTensor); - TfLiteTensor* bw_input_to_cell_weights = + const TfLiteTensor* bw_input_to_cell_weights = GetInput(context, node, kBwInputToCellWeightsTensor); - TfLiteTensor* bw_input_to_output_weights = + const TfLiteTensor* bw_input_to_output_weights = GetInput(context, node, kBwInputToOutputWeightsTensor); TfLiteTensor* bw_recurrent_to_input_weights = GetOptionalInputTensor(context, node, kBwRecurrentToInputWeightsTensor); - TfLiteTensor* bw_recurrent_to_forget_weights = + const TfLiteTensor* bw_recurrent_to_forget_weights = GetInput(context, node, kBwRecurrentToForgetWeightsTensor); - TfLiteTensor* bw_recurrent_to_cell_weights = + const TfLiteTensor* bw_recurrent_to_cell_weights = GetInput(context, node, kBwRecurrentToCellWeightsTensor); - TfLiteTensor* bw_recurrent_to_output_weights = + const TfLiteTensor* bw_recurrent_to_output_weights = GetInput(context, node, kBwRecurrentToOutputWeightsTensor); TfLiteTensor* bw_cell_to_input_weights = @@ -540,10 +542,11 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* bw_input_gate_bias = GetOptionalInputTensor(context, node, kBwInputGateBiasTensor); - TfLiteTensor* bw_forget_gate_bias = + const TfLiteTensor* bw_forget_gate_bias = GetInput(context, node, kBwForgetGateBiasTensor); - TfLiteTensor* bw_cell_bias = GetInput(context, node, kBwCellGateBiasTensor); - TfLiteTensor* bw_output_gate_bias = + const TfLiteTensor* bw_cell_bias = + GetInput(context, node, kBwCellGateBiasTensor); + const TfLiteTensor* bw_output_gate_bias = GetInput(context, node, kBwOutputGateBiasTensor); TfLiteTensor* bw_projection_weights = diff --git a/tensorflow/contrib/lite/kernels/cast.cc b/tensorflow/contrib/lite/kernels/cast.cc index 17ef2c5..673eedc 100644 --- a/tensorflow/contrib/lite/kernels/cast.cc +++ b/tensorflow/contrib/lite/kernels/cast.cc @@ -32,7 +32,7 @@ constexpr int kOutputTensor = 0; TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // TODO(ahentz): these two checks would make the new implementation @@ -77,7 +77,7 @@ TfLiteStatus copyToTensor(const FromT* in, TfLiteTensor* out, } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const int num_elements = NumElements(input); TF_LITE_ENSURE_EQ(context, num_elements, NumElements(output)); diff --git a/tensorflow/contrib/lite/kernels/comparisons.cc b/tensorflow/contrib/lite/kernels/comparisons.cc index 2885ce0..b948334 100644 --- a/tensorflow/contrib/lite/kernels/comparisons.cc +++ b/tensorflow/contrib/lite/kernels/comparisons.cc @@ -32,8 +32,8 @@ TfLiteStatus ComparisonPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); - TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); + const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); + const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // Don't support string and bool. @@ -68,8 +68,8 @@ TfLiteStatus ComparisonPrepare(TfLiteContext* context, TfLiteNode* node) { GetTensorData(output), GetTensorDims(output)); TfLiteStatus GreaterEval(TfLiteContext* context, TfLiteNode* node) { - TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); - TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); + const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); + const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); // TODO(renjieliu): Support quantized data. @@ -92,8 +92,8 @@ TfLiteStatus GreaterEval(TfLiteContext* context, TfLiteNode* node) { } TfLiteStatus GreaterEqualEval(TfLiteContext* context, TfLiteNode* node) { - TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); - TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); + const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); + const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); // TODO(renjieliu): Support quantized data. @@ -116,8 +116,8 @@ TfLiteStatus GreaterEqualEval(TfLiteContext* context, TfLiteNode* node) { } TfLiteStatus LessEval(TfLiteContext* context, TfLiteNode* node) { - TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); - TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); + const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); + const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); // TODO(renjieliu): Support quantized data. @@ -140,8 +140,8 @@ TfLiteStatus LessEval(TfLiteContext* context, TfLiteNode* node) { } TfLiteStatus LessEqualEval(TfLiteContext* context, TfLiteNode* node) { - TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); - TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); + const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); + const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); // TODO(renjieliu): Support quantized data. diff --git a/tensorflow/contrib/lite/kernels/depthwise_conv.cc b/tensorflow/contrib/lite/kernels/depthwise_conv.cc index eeda1bc..3ad8d7d 100644 --- a/tensorflow/contrib/lite/kernels/depthwise_conv.cc +++ b/tensorflow/contrib/lite/kernels/depthwise_conv.cc @@ -83,9 +83,9 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { bool hasBias = NumInputs(node) == 3; TF_LITE_ENSURE(context, hasBias || NumInputs(node) == 2); - TfLiteTensor* input = GetInput(context, node, kInputTensor); - TfLiteTensor* filter = GetInput(context, node, kFilterTensor); - TfLiteTensor* bias = nullptr; + const TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); + const TfLiteTensor* bias = nullptr; TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); @@ -169,8 +169,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { template void EvalFloat(TfLiteContext* context, TfLiteNode* node, TfLiteDepthwiseConvParams* params, OpData* data, - TfLiteTensor* input, TfLiteTensor* filter, TfLiteTensor* bias, - TfLiteTensor* output) { + const TfLiteTensor* input, const TfLiteTensor* filter, + const TfLiteTensor* bias, TfLiteTensor* output) { float output_activation_min, output_activation_max; CalculateActivationRangeFloat(params->activation, &output_activation_min, &output_activation_max); @@ -196,8 +196,8 @@ void EvalFloat(TfLiteContext* context, TfLiteNode* node, template void EvalQuantized(TfLiteContext* context, TfLiteNode* node, TfLiteDepthwiseConvParams* params, OpData* data, - TfLiteTensor* input, TfLiteTensor* filter, - TfLiteTensor* bias, TfLiteTensor* output) { + const TfLiteTensor* input, const TfLiteTensor* filter, + const TfLiteTensor* bias, TfLiteTensor* output) { auto input_offset = -input->params.zero_point; auto filter_offset = -filter->params.zero_point; auto output_offset = output->params.zero_point; @@ -230,9 +230,9 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast(node->user_data); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TfLiteTensor* input = GetInput(context, node, kInputTensor); - TfLiteTensor* filter = GetInput(context, node, kFilterTensor); - TfLiteTensor* bias = + const TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); + const TfLiteTensor* bias = (NumInputs(node) == 3) ? GetInput(context, node, kBiasTensor) : nullptr; // TODO(aselle): Consider whether float conv and quantized conv should be diff --git a/tensorflow/contrib/lite/kernels/dequantize.cc b/tensorflow/contrib/lite/kernels/dequantize.cc index e685f24..672b217 100644 --- a/tensorflow/contrib/lite/kernels/dequantize.cc +++ b/tensorflow/contrib/lite/kernels/dequantize.cc @@ -32,7 +32,7 @@ struct OpContext { input = GetInput(context, node, 0); output = GetOutput(context, node, 0); } - TfLiteTensor* input; + const TfLiteTensor* input; TfLiteTensor* output; }; diff --git a/tensorflow/contrib/lite/kernels/div.cc b/tensorflow/contrib/lite/kernels/div.cc index ec380c8..e52e4fe 100644 --- a/tensorflow/contrib/lite/kernels/div.cc +++ b/tensorflow/contrib/lite/kernels/div.cc @@ -57,8 +57,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); - TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); + const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); + const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, input1->type, input2->type); @@ -80,7 +80,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { template void EvalFloat(TfLiteContext* context, TfLiteNode* node, TfLiteDivParams* params, const OpData* data, - TfLiteTensor* input1, TfLiteTensor* input2, + const TfLiteTensor* input1, const TfLiteTensor* input2, TfLiteTensor* output) { float output_activation_min, output_activation_max; CalculateActivationRangeFloat(params->activation, &output_activation_min, @@ -106,15 +106,13 @@ void EvalFloat(TfLiteContext* context, TfLiteNode* node, #undef TF_LITE_DIV } - - template TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast(node->builtin_data); OpData* data = reinterpret_cast(node->user_data); - TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); - TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); + const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); + const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (output->type == kTfLiteFloat32) { diff --git a/tensorflow/contrib/lite/kernels/elementwise.cc b/tensorflow/contrib/lite/kernels/elementwise.cc index 6588256..b719a08 100644 --- a/tensorflow/contrib/lite/kernels/elementwise.cc +++ b/tensorflow/contrib/lite/kernels/elementwise.cc @@ -26,7 +26,7 @@ namespace elementwise { TfLiteStatus SinPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* input = GetInput(context, node, 0); + const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_EQ(context, input->type, output->type); // Quantized float is not supported yet. @@ -36,13 +36,13 @@ TfLiteStatus SinPrepare(TfLiteContext* context, TfLiteNode* node) { } TfLiteStatus SinEval(TfLiteContext* context, TfLiteNode* node) { - TfLiteTensor* input = GetInput(context, node, 0); + const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { size_t elements = NumElements(input); - float* in = GetTensorData(input); - float* in_end = in + elements; + const float* in = GetTensorData(input); + const float* in_end = in + elements; float* out = output->data.f; for (; in < in_end; in++, out++) *out = std::sin(*in); return kTfLiteOk; diff --git a/tensorflow/contrib/lite/kernels/embedding_lookup.cc b/tensorflow/contrib/lite/kernels/embedding_lookup.cc index 4e8cb39..7539c0b 100644 --- a/tensorflow/contrib/lite/kernels/embedding_lookup.cc +++ b/tensorflow/contrib/lite/kernels/embedding_lookup.cc @@ -51,11 +51,11 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* lookup = GetInput(context, node, 0); + const TfLiteTensor* lookup = GetInput(context, node, 0); TF_LITE_ENSURE_EQ(context, NumDimensions(lookup), 1); TF_LITE_ENSURE_EQ(context, lookup->type, kTfLiteInt32); - TfLiteTensor* value = GetInput(context, node, 1); + const TfLiteTensor* value = GetInput(context, node, 1); TF_LITE_ENSURE(context, NumDimensions(value) >= 2); TfLiteTensor* output = GetOutput(context, node, 0); @@ -71,8 +71,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* output = GetOutput(context, node, 0); - TfLiteTensor* lookup = GetInput(context, node, 0); - TfLiteTensor* value = GetInput(context, node, 1); + const TfLiteTensor* lookup = GetInput(context, node, 0); + const TfLiteTensor* value = GetInput(context, node, 1); const int row_size = SizeOfDimension(value, 0); const int row_bytes = value->bytes / row_size; diff --git a/tensorflow/contrib/lite/kernels/embedding_lookup_sparse.cc b/tensorflow/contrib/lite/kernels/embedding_lookup_sparse.cc index 6c770e7..d3be369 100644 --- a/tensorflow/contrib/lite/kernels/embedding_lookup_sparse.cc +++ b/tensorflow/contrib/lite/kernels/embedding_lookup_sparse.cc @@ -81,19 +81,19 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 5); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* ids = GetInput(context, node, 0); + const TfLiteTensor* ids = GetInput(context, node, 0); TF_LITE_ENSURE_EQ(context, NumDimensions(ids), 1); TF_LITE_ENSURE_EQ(context, ids->type, kTfLiteInt32); - TfLiteTensor* indices = GetInput(context, node, 1); + const TfLiteTensor* indices = GetInput(context, node, 1); TF_LITE_ENSURE_EQ(context, NumDimensions(indices), 2); TF_LITE_ENSURE_EQ(context, indices->type, kTfLiteInt32); - TfLiteTensor* shape = GetInput(context, node, 2); + const TfLiteTensor* shape = GetInput(context, node, 2); TF_LITE_ENSURE_EQ(context, NumDimensions(shape), 1); TF_LITE_ENSURE_EQ(context, shape->type, kTfLiteInt32); - TfLiteTensor* weights = GetInput(context, node, 3); + const TfLiteTensor* weights = GetInput(context, node, 3); TF_LITE_ENSURE_EQ(context, NumDimensions(weights), 1); TF_LITE_ENSURE_EQ(context, weights->type, kTfLiteFloat32); @@ -102,7 +102,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, SizeOfDimension(indices, 0), SizeOfDimension(weights, 0)); - TfLiteTensor* value = GetInput(context, node, 4); + const TfLiteTensor* value = GetInput(context, node, 4); TF_LITE_ENSURE(context, NumDimensions(value) >= 2); // Mark the output as a dynamic tensor. @@ -139,11 +139,11 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast(node->builtin_data); TfLiteTensor* output = GetOutput(context, node, 0); - TfLiteTensor* ids = GetInput(context, node, 0); - TfLiteTensor* indices = GetInput(context, node, 1); - TfLiteTensor* dense_shape = GetInput(context, node, 2); - TfLiteTensor* weights = GetInput(context, node, 3); - TfLiteTensor* value = GetInput(context, node, 4); + const TfLiteTensor* ids = GetInput(context, node, 0); + const TfLiteTensor* indices = GetInput(context, node, 1); + const TfLiteTensor* dense_shape = GetInput(context, node, 2); + const TfLiteTensor* weights = GetInput(context, node, 3); + const TfLiteTensor* value = GetInput(context, node, 4); const int lookup_rank = SizeOfDimension(indices, 1); const int embedding_rank = NumDimensions(value); diff --git a/tensorflow/contrib/lite/kernels/exp.cc b/tensorflow/contrib/lite/kernels/exp.cc index a9e79b7..ce03cdf 100644 --- a/tensorflow/contrib/lite/kernels/exp.cc +++ b/tensorflow/contrib/lite/kernels/exp.cc @@ -36,7 +36,7 @@ struct ExpContext { input = GetInput(context, node, 0); output = GetOutput(context, node, 0); } - TfLiteTensor* input; + const TfLiteTensor* input; TfLiteTensor* output; }; diff --git a/tensorflow/contrib/lite/kernels/floor.cc b/tensorflow/contrib/lite/kernels/floor.cc index 4b4395f..697b777 100644 --- a/tensorflow/contrib/lite/kernels/floor.cc +++ b/tensorflow/contrib/lite/kernels/floor.cc @@ -27,7 +27,7 @@ constexpr int kInputTensor = 0; constexpr int kOutputTensor = 0; TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); @@ -38,7 +38,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); optimized_ops::Floor(GetTensorData(input), GetTensorDims(input), diff --git a/tensorflow/contrib/lite/kernels/fully_connected.cc b/tensorflow/contrib/lite/kernels/fully_connected.cc index 470b52b..39b1086 100644 --- a/tensorflow/contrib/lite/kernels/fully_connected.cc +++ b/tensorflow/contrib/lite/kernels/fully_connected.cc @@ -89,8 +89,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, node->inputs->size, 3); TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); - TfLiteTensor* input = GetInput(context, node, kInputTensor); - TfLiteTensor* filter = GetInput(context, node, kWeightsTensor); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* filter = GetInput(context, node, kWeightsTensor); TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); @@ -158,8 +158,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TfLiteStatus EvalPie(TfLiteContext* context, TfLiteNode* node, TfLiteFullyConnectedParams* params, OpData* data, - TfLiteTensor* input, TfLiteTensor* filter, - TfLiteTensor* bias, TfLiteTensor* output) { + const TfLiteTensor* input, const TfLiteTensor* filter, + const TfLiteTensor* bias, TfLiteTensor* output) { int total_input_size = 1; for (int i = 0; i < input->dims->size; i++) { total_input_size *= input->dims->data[i]; @@ -191,8 +191,10 @@ TfLiteStatus EvalPie(TfLiteContext* context, TfLiteNode* node, TfLiteStatus EvalPieQuantized(TfLiteContext* context, TfLiteNode* node, TfLiteFullyConnectedParams* params, OpData* data, - TfLiteTensor* input, TfLiteTensor* filter, - TfLiteTensor* bias, TfLiteTensor* input_quantized, + const TfLiteTensor* input, + const TfLiteTensor* filter, + const TfLiteTensor* bias, + TfLiteTensor* input_quantized, TfLiteTensor* output) { // Check the types for this hybrid Op. TF_LITE_ENSURE_EQ(context, input->type, kTfLiteFloat32); @@ -271,8 +273,9 @@ TfLiteStatus EvalPieQuantized(TfLiteContext* context, TfLiteNode* node, template TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node, TfLiteFullyConnectedParams* params, OpData* data, - TfLiteTensor* input, TfLiteTensor* filter, - TfLiteTensor* bias, TfLiteTensor* output) { + const TfLiteTensor* input, + const TfLiteTensor* filter, const TfLiteTensor* bias, + TfLiteTensor* output) { gemmlowp::GemmContext* gemm_context = gemm_support::GetFromContext(context); int32_t input_offset = -input->params.zero_point; @@ -311,8 +314,8 @@ TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node, template TfLiteStatus EvalFloat(TfLiteContext* context, TfLiteNode* node, TfLiteFullyConnectedParams* params, OpData* data, - TfLiteTensor* input, TfLiteTensor* filter, - TfLiteTensor* bias, TfLiteTensor* output) { + const TfLiteTensor* input, const TfLiteTensor* filter, + const TfLiteTensor* bias, TfLiteTensor* output) { float output_activation_min, output_activation_max; CalculateActivationRangeFloat(params->activation, &output_activation_min, &output_activation_max); @@ -342,8 +345,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { reinterpret_cast(node->builtin_data); OpData* data = reinterpret_cast(node->user_data); - TfLiteTensor* input = GetInput(context, node, kInputTensor); - TfLiteTensor* filter = GetInput(context, node, kWeightsTensor); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* filter = GetInput(context, node, kWeightsTensor); TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); diff --git a/tensorflow/contrib/lite/kernels/gather.cc b/tensorflow/contrib/lite/kernels/gather.cc index 0e4187d..c452d3e 100644 --- a/tensorflow/contrib/lite/kernels/gather.cc +++ b/tensorflow/contrib/lite/kernels/gather.cc @@ -35,8 +35,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { const auto* params = reinterpret_cast(node->builtin_data); - TfLiteTensor* input = GetInput(context, node, kInputTensor); - TfLiteTensor* positions = GetInput(context, node, kInputPositions); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* positions = GetInput(context, node, kInputPositions); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // Only INT32 positions are supported. TF_LITE_ENSURE_EQ(context, positions->type, kTfLiteInt32); @@ -81,8 +81,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TfLiteTensor* input = GetInput(context, node, kInputTensor); - TfLiteTensor* positions = GetInput(context, node, kInputPositions); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* positions = GetInput(context, node, kInputPositions); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const int input_rank = NumDimensions(input); #define TF_LITE_GATHER(data_type, index_type) \ diff --git a/tensorflow/contrib/lite/kernels/hashtable_lookup.cc b/tensorflow/contrib/lite/kernels/hashtable_lookup.cc index 3b82601..41211d4 100644 --- a/tensorflow/contrib/lite/kernels/hashtable_lookup.cc +++ b/tensorflow/contrib/lite/kernels/hashtable_lookup.cc @@ -60,15 +60,15 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2); - TfLiteTensor* lookup = GetInput(context, node, 0); + const TfLiteTensor* lookup = GetInput(context, node, 0); TF_LITE_ENSURE_EQ(context, NumDimensions(lookup), 1); TF_LITE_ENSURE_EQ(context, lookup->type, kTfLiteInt32); - TfLiteTensor* key = GetInput(context, node, 1); + const TfLiteTensor* key = GetInput(context, node, 1); TF_LITE_ENSURE_EQ(context, NumDimensions(key), 1); TF_LITE_ENSURE_EQ(context, key->type, kTfLiteInt32); - TfLiteTensor* value = GetInput(context, node, 2); + const TfLiteTensor* value = GetInput(context, node, 2); TF_LITE_ENSURE(context, NumDimensions(value) >= 1); TF_LITE_ENSURE_EQ(context, SizeOfDimension(key, 0), SizeOfDimension(value, 0)); @@ -102,9 +102,9 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* output = GetOutput(context, node, 0); TfLiteTensor* hits = GetOutput(context, node, 1); - TfLiteTensor* lookup = GetInput(context, node, 0); - TfLiteTensor* key = GetInput(context, node, 1); - TfLiteTensor* value = GetInput(context, node, 2); + const TfLiteTensor* lookup = GetInput(context, node, 0); + const TfLiteTensor* key = GetInput(context, node, 1); + const TfLiteTensor* value = GetInput(context, node, 2); const int num_rows = SizeOfDimension(value, 0); const int row_bytes = value->bytes / num_rows; diff --git a/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h b/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h index 273b574..26a7c16 100644 --- a/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h +++ b/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h @@ -3270,11 +3270,11 @@ inline void Exp(const T* input_data, const size_t num_elements, } template -inline bool Mean(T* input_data, const int* input_dims, const int input_num_dims, - T* output_data, const int* output_dims, - const int output_num_dims, const int* axis, - const int num_axis_dimensions, bool keep_dims, int* temp_index, - int* resolved_axis, U* temp_sum) { +inline bool Mean(const T* input_data, const int* input_dims, + const int input_num_dims, T* output_data, + const int* output_dims, const int output_num_dims, + const int* axis, const int num_axis_dimensions, bool keep_dims, + int* temp_index, int* resolved_axis, U* temp_sum) { // resets output data. size_t num_outputs = 1; for (int idx = 0; idx < output_num_dims; ++idx) { diff --git a/tensorflow/contrib/lite/kernels/internal/tensor.h b/tensorflow/contrib/lite/kernels/internal/tensor.h index 62cea14..ce887ce 100644 --- a/tensorflow/contrib/lite/kernels/internal/tensor.h +++ b/tensorflow/contrib/lite/kernels/internal/tensor.h @@ -49,6 +49,34 @@ inline bool* GetTensorData(TfLiteTensor* tensor) { return tensor != nullptr ? tensor->data.b : nullptr; } +template +inline const T* GetTensorData(const TfLiteTensor* tensor); + +template <> +inline const float* GetTensorData(const TfLiteTensor* tensor) { + return tensor != nullptr ? tensor->data.f : nullptr; +} + +template <> +inline const uint8_t* GetTensorData(const TfLiteTensor* tensor) { + return tensor != nullptr ? tensor->data.uint8 : nullptr; +} + +template <> +inline const int32_t* GetTensorData(const TfLiteTensor* tensor) { + return tensor != nullptr ? tensor->data.i32 : nullptr; +} + +template <> +inline const int64_t* GetTensorData(const TfLiteTensor* tensor) { + return tensor != nullptr ? tensor->data.i64 : nullptr; +} + +template <> +inline const bool* GetTensorData(const TfLiteTensor* tensor) { + return tensor != nullptr ? tensor->data.b : nullptr; +} + inline int RemapDim(int max_dimensions, int d) { return max_dimensions - d - 1; } diff --git a/tensorflow/contrib/lite/kernels/kernel_util.cc b/tensorflow/contrib/lite/kernels/kernel_util.cc index 955e8c5..239b533 100644 --- a/tensorflow/contrib/lite/kernels/kernel_util.cc +++ b/tensorflow/contrib/lite/kernels/kernel_util.cc @@ -22,9 +22,12 @@ limitations under the License. namespace tflite { -TfLiteStatus GetQuantizedConvolutionMultipler( - TfLiteContext* context, TfLiteTensor* input, TfLiteTensor* filter, - TfLiteTensor* bias, TfLiteTensor* output, double* multiplier) { +TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context, + const TfLiteTensor* input, + const TfLiteTensor* filter, + const TfLiteTensor* bias, + TfLiteTensor* output, + double* multiplier) { const double input_product_scale = input->params.scale * filter->params.scale; const double bias_scale = bias->params.scale; const double output_scale = output->params.scale; @@ -87,13 +90,13 @@ void CalculateActivationRangeFloat(TfLiteFusedActivation activation, } } -bool HaveSameShapes(TfLiteTensor* input1, TfLiteTensor* input2) { +bool HaveSameShapes(const TfLiteTensor* input1, const TfLiteTensor* input2) { return TfLiteIntArrayEqual(input1->dims, input2->dims); } TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context, - TfLiteTensor* input1, - TfLiteTensor* input2, + const TfLiteTensor* input1, + const TfLiteTensor* input2, TfLiteIntArray** output_shape) { int64_t dims1 = NumDimensions(input1); int64_t dims2 = NumDimensions(input2); diff --git a/tensorflow/contrib/lite/kernels/kernel_util.h b/tensorflow/contrib/lite/kernels/kernel_util.h index e225443..de0e368 100644 --- a/tensorflow/contrib/lite/kernels/kernel_util.h +++ b/tensorflow/contrib/lite/kernels/kernel_util.h @@ -24,8 +24,8 @@ inline int NumDimensions(const TfLiteTensor* t) { return t->dims->size; } inline int SizeOfDimension(const TfLiteTensor* t, int dim) { return t->dims->data[dim]; } -inline TfLiteTensor* GetInput(TfLiteContext* context, TfLiteNode* node, - int index) { +inline const TfLiteTensor* GetInput(TfLiteContext* context, TfLiteNode* node, + int index) { return &context->tensors[node->inputs->data[index]]; } inline TfLiteTensor* GetOutput(TfLiteContext* context, TfLiteNode* node, @@ -78,9 +78,12 @@ inline void SetTensorToDynamic(TfLiteTensor* tensor) { // Calculates the multiplication factor for a quantized convolution (or // quantized depthwise convolution) involving the given tensors. Returns an // error if the scales of the tensors are not compatible. -TfLiteStatus GetQuantizedConvolutionMultipler( - TfLiteContext* context, TfLiteTensor* input, TfLiteTensor* filter, - TfLiteTensor* bias, TfLiteTensor* output, double* multiplier); +TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context, + const TfLiteTensor* input, + const TfLiteTensor* filter, + const TfLiteTensor* bias, + TfLiteTensor* output, + double* multiplier); // Calculates the useful range of an activation layer given its activation // tensor. @@ -92,13 +95,13 @@ void CalculateActivationRangeFloat(TfLiteFusedActivation activation, float* activation_max); // Return true if the given tensors have the same shape. -bool HaveSameShapes(TfLiteTensor* input1, TfLiteTensor* input2); +bool HaveSameShapes(const TfLiteTensor* input1, const TfLiteTensor* input2); // Calculate the output_shape that is necessary for element-wise operations // with broadcasting involving the two input tensors. TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context, - TfLiteTensor* input1, - TfLiteTensor* input2, + const TfLiteTensor* input1, + const TfLiteTensor* input2, TfLiteIntArray** output_shape); } // namespace tflite diff --git a/tensorflow/contrib/lite/kernels/l2norm.cc b/tensorflow/contrib/lite/kernels/l2norm.cc index e67f4e0..7cea63d 100644 --- a/tensorflow/contrib/lite/kernels/l2norm.cc +++ b/tensorflow/contrib/lite/kernels/l2norm.cc @@ -40,7 +40,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE(context, NumDimensions(input) <= 4); @@ -64,7 +64,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { template TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (output->type == kTfLiteFloat32) { diff --git a/tensorflow/contrib/lite/kernels/local_response_norm.cc b/tensorflow/contrib/lite/kernels/local_response_norm.cc index c1c70d0..c15a517 100644 --- a/tensorflow/contrib/lite/kernels/local_response_norm.cc +++ b/tensorflow/contrib/lite/kernels/local_response_norm.cc @@ -38,7 +38,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4); @@ -60,7 +60,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast(node->builtin_data); - TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (output->type == kTfLiteFloat32) { diff --git a/tensorflow/contrib/lite/kernels/lsh_projection.cc b/tensorflow/contrib/lite/kernels/lsh_projection.cc index 0ee3577..25d2dc2 100644 --- a/tensorflow/contrib/lite/kernels/lsh_projection.cc +++ b/tensorflow/contrib/lite/kernels/lsh_projection.cc @@ -77,16 +77,16 @@ TfLiteStatus Resize(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE(context, NumInputs(node) == 2 || NumInputs(node) == 3); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* hash = GetInput(context, node, 0); + const TfLiteTensor* hash = GetInput(context, node, 0); TF_LITE_ENSURE_EQ(context, NumDimensions(hash), 2); // Support up to 32 bits. TF_LITE_ENSURE(context, SizeOfDimension(hash, 1) <= 32); - TfLiteTensor* input = GetInput(context, node, 1); + const TfLiteTensor* input = GetInput(context, node, 1); TF_LITE_ENSURE(context, NumDimensions(input) >= 1); if (NumInputs(node) == 3) { - TfLiteTensor* weight = GetInput(context, node, 2); + const TfLiteTensor* weight = GetInput(context, node, 2); TF_LITE_ENSURE_EQ(context, NumDimensions(weight), 1); TF_LITE_ENSURE_EQ(context, SizeOfDimension(weight, 0), SizeOfDimension(input, 0)); @@ -173,9 +173,9 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { reinterpret_cast(node->builtin_data); int32_t* out_buf = GetOutput(context, node, 0)->data.i32; - TfLiteTensor* hash = GetInput(context, node, 0); - TfLiteTensor* input = GetInput(context, node, 1); - TfLiteTensor* weight = + const TfLiteTensor* hash = GetInput(context, node, 0); + const TfLiteTensor* input = GetInput(context, node, 1); + const TfLiteTensor* weight = NumInputs(node) == 2 ? nullptr : GetInput(context, node, 2); switch (params->type) { diff --git a/tensorflow/contrib/lite/kernels/lstm.cc b/tensorflow/contrib/lite/kernels/lstm.cc index a1521ef..8d447a2 100644 --- a/tensorflow/contrib/lite/kernels/lstm.cc +++ b/tensorflow/contrib/lite/kernels/lstm.cc @@ -100,13 +100,13 @@ TfLiteStatus CheckInputTensorDimensions(TfLiteContext* context, TF_LITE_ENSURE_EQ(context, input_to_input_weights->dims->data[1], n_input); } - TfLiteTensor* input_to_forget_weights = + const TfLiteTensor* input_to_forget_weights = GetInput(context, node, kInputToForgetWeightsTensor); TF_LITE_ENSURE_EQ(context, input_to_forget_weights->dims->size, 2); TF_LITE_ENSURE_EQ(context, input_to_forget_weights->dims->data[0], n_cell); TF_LITE_ENSURE_EQ(context, input_to_forget_weights->dims->data[1], n_input); - TfLiteTensor* input_to_cell_weights = + const TfLiteTensor* input_to_cell_weights = GetInput(context, node, kInputToCellWeightsTensor); TF_LITE_ENSURE_EQ(context, input_to_cell_weights->dims->size, 2); TF_LITE_ENSURE_EQ(context, input_to_cell_weights->dims->data[0], n_cell); @@ -122,7 +122,7 @@ TfLiteStatus CheckInputTensorDimensions(TfLiteContext* context, n_output); } - TfLiteTensor* recurrent_to_forget_weights = + const TfLiteTensor* recurrent_to_forget_weights = GetInput(context, node, kRecurrentToForgetWeightsTensor); TF_LITE_ENSURE_EQ(context, recurrent_to_forget_weights->dims->size, 2); TF_LITE_ENSURE_EQ(context, recurrent_to_forget_weights->dims->data[0], @@ -130,7 +130,7 @@ TfLiteStatus CheckInputTensorDimensions(TfLiteContext* context, TF_LITE_ENSURE_EQ(context, recurrent_to_forget_weights->dims->data[1], n_output); - TfLiteTensor* recurrent_to_cell_weights = + const TfLiteTensor* recurrent_to_cell_weights = GetInput(context, node, kRecurrentToCellWeightsTensor); TF_LITE_ENSURE_EQ(context, recurrent_to_cell_weights->dims->size, 2); TF_LITE_ENSURE_EQ(context, recurrent_to_cell_weights->dims->data[0], n_cell); @@ -188,16 +188,16 @@ TfLiteStatus CheckInputTensorDimensions(TfLiteContext* context, TF_LITE_ENSURE_EQ(context, input_gate_bias->dims->data[0], n_cell); } - TfLiteTensor* forget_gate_bias = + const TfLiteTensor* forget_gate_bias = GetInput(context, node, kForgetGateBiasTensor); TF_LITE_ENSURE_EQ(context, forget_gate_bias->dims->size, 1); TF_LITE_ENSURE_EQ(context, forget_gate_bias->dims->data[0], n_cell); - TfLiteTensor* cell_bias = GetInput(context, node, kCellGateBiasTensor); + const TfLiteTensor* cell_bias = GetInput(context, node, kCellGateBiasTensor); TF_LITE_ENSURE_EQ(context, cell_bias->dims->size, 1); TF_LITE_ENSURE_EQ(context, cell_bias->dims->data[0], n_cell); - TfLiteTensor* output_gate_bias = + const TfLiteTensor* output_gate_bias = GetInput(context, node, kOutputGateBiasTensor); TF_LITE_ENSURE_EQ(context, output_gate_bias->dims->size, 1); TF_LITE_ENSURE_EQ(context, output_gate_bias->dims->data[0], n_cell); @@ -241,18 +241,18 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { // Inferring batch size, number of outputs and number of cells from the // input tensors. - TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); TF_LITE_ENSURE(context, input->dims->size > 1); const int n_batch = input->dims->data[0]; const int n_input = input->dims->data[1]; - TfLiteTensor* input_to_output_weights = + const TfLiteTensor* input_to_output_weights = GetInput(context, node, kInputToOutputWeightsTensor); const int n_cell = input_to_output_weights->dims->data[0]; TF_LITE_ENSURE_EQ(context, input_to_output_weights->dims->size, 2); TF_LITE_ENSURE_EQ(context, input_to_output_weights->dims->data[1], n_input); - TfLiteTensor* recurrent_to_output_weights = + const TfLiteTensor* recurrent_to_output_weights = GetInput(context, node, kRecurrentToOutputWeightsTensor); TF_LITE_ENSURE_EQ(context, recurrent_to_output_weights->dims->size, 2); TF_LITE_ENSURE_EQ(context, recurrent_to_output_weights->dims->data[0], @@ -322,24 +322,24 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { // The LSTM Op engine. TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast(node->builtin_data); - TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* input_to_input_weights = GetOptionalInputTensor(context, node, kInputToInputWeightsTensor); - TfLiteTensor* input_to_forget_weights = + const TfLiteTensor* input_to_forget_weights = GetInput(context, node, kInputToForgetWeightsTensor); - TfLiteTensor* input_to_cell_weights = + const TfLiteTensor* input_to_cell_weights = GetInput(context, node, kInputToCellWeightsTensor); - TfLiteTensor* input_to_output_weights = + const TfLiteTensor* input_to_output_weights = GetInput(context, node, kInputToOutputWeightsTensor); TfLiteTensor* recurrent_to_input_weights = GetOptionalInputTensor(context, node, kRecurrentToInputWeightsTensor); - TfLiteTensor* recurrent_to_forget_weights = + const TfLiteTensor* recurrent_to_forget_weights = GetInput(context, node, kRecurrentToForgetWeightsTensor); - TfLiteTensor* recurrent_to_cell_weights = + const TfLiteTensor* recurrent_to_cell_weights = GetInput(context, node, kRecurrentToCellWeightsTensor); - TfLiteTensor* recurrent_to_output_weights = + const TfLiteTensor* recurrent_to_output_weights = GetInput(context, node, kRecurrentToOutputWeightsTensor); TfLiteTensor* cell_to_input_weights = @@ -351,10 +351,10 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* input_gate_bias = GetOptionalInputTensor(context, node, kInputGateBiasTensor); - TfLiteTensor* forget_gate_bias = + const TfLiteTensor* forget_gate_bias = GetInput(context, node, kForgetGateBiasTensor); - TfLiteTensor* cell_bias = GetInput(context, node, kCellGateBiasTensor); - TfLiteTensor* output_gate_bias = + const TfLiteTensor* cell_bias = GetInput(context, node, kCellGateBiasTensor); + const TfLiteTensor* output_gate_bias = GetInput(context, node, kOutputGateBiasTensor); TfLiteTensor* projection_weights = diff --git a/tensorflow/contrib/lite/kernels/maximum_minimum.cc b/tensorflow/contrib/lite/kernels/maximum_minimum.cc index 5a28d66..8d67621 100644 --- a/tensorflow/contrib/lite/kernels/maximum_minimum.cc +++ b/tensorflow/contrib/lite/kernels/maximum_minimum.cc @@ -41,8 +41,8 @@ struct OpContext { input2 = GetInput(context, node, kInputTensor2); output = GetOutput(context, node, kOutputTensor); } - TfLiteTensor* input1; - TfLiteTensor* input2; + const TfLiteTensor* input1; + const TfLiteTensor* input2; TfLiteTensor* output; }; diff --git a/tensorflow/contrib/lite/kernels/mean.cc b/tensorflow/contrib/lite/kernels/mean.cc index 98f80e3..03e5db2 100644 --- a/tensorflow/contrib/lite/kernels/mean.cc +++ b/tensorflow/contrib/lite/kernels/mean.cc @@ -40,8 +40,8 @@ struct MeanContext { output = GetOutput(context, node, 0); } TfLiteMeanParams* params; - TfLiteTensor* input; - TfLiteTensor* axis; + const TfLiteTensor* input; + const TfLiteTensor* axis; TfLiteTensor* output; }; diff --git a/tensorflow/contrib/lite/kernels/mfcc.cc b/tensorflow/contrib/lite/kernels/mfcc.cc index 018db0d..3f5bc4d 100644 --- a/tensorflow/contrib/lite/kernels/mfcc.cc +++ b/tensorflow/contrib/lite/kernels/mfcc.cc @@ -67,8 +67,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* inputWav = GetInput(context, node, kInputTensorWav); - TfLiteTensor* inputRate = GetInput(context, node, kInputTensorRate); + const TfLiteTensor* inputWav = GetInput(context, node, kInputTensorWav); + const TfLiteTensor* inputRate = GetInput(context, node, kInputTensorRate); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(inputWav), 3); @@ -94,8 +94,8 @@ template TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast(node->user_data); - TfLiteTensor* inputWav = GetInput(context, node, kInputTensorWav); - TfLiteTensor* inputRate = GetInput(context, node, kInputTensorRate); + const TfLiteTensor* inputWav = GetInput(context, node, kInputTensorWav); + const TfLiteTensor* inputRate = GetInput(context, node, kInputTensorRate); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const int32 sample_rate = *GetTensorData(inputRate); diff --git a/tensorflow/contrib/lite/kernels/mul.cc b/tensorflow/contrib/lite/kernels/mul.cc index 5457501..6c4c3a1 100644 --- a/tensorflow/contrib/lite/kernels/mul.cc +++ b/tensorflow/contrib/lite/kernels/mul.cc @@ -57,8 +57,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); - TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); + const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); + const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, input1->type, input2->type); @@ -80,7 +80,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { template void EvalFloat(TfLiteContext* context, TfLiteNode* node, TfLiteMulParams* params, const OpData* data, - TfLiteTensor* input1, TfLiteTensor* input2, + const TfLiteTensor* input1, const TfLiteTensor* input2, TfLiteTensor* output) { float output_activation_min, output_activation_max; CalculateActivationRangeFloat(params->activation, &output_activation_min, @@ -109,7 +109,7 @@ void EvalFloat(TfLiteContext* context, TfLiteNode* node, template void EvalQuantized(TfLiteContext* context, TfLiteNode* node, TfLiteMulParams* params, const OpData* data, - TfLiteTensor* input1, TfLiteTensor* input2, + const TfLiteTensor* input1, const TfLiteTensor* input2, TfLiteTensor* output) { auto input1_offset = -input1->params.zero_point; auto input2_offset = -input2->params.zero_point; @@ -149,8 +149,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast(node->builtin_data); OpData* data = reinterpret_cast(node->user_data); - TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); - TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); + const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); + const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (output->type == kTfLiteFloat32) { diff --git a/tensorflow/contrib/lite/kernels/neg.cc b/tensorflow/contrib/lite/kernels/neg.cc index 692da81..b8b53f3 100644 --- a/tensorflow/contrib/lite/kernels/neg.cc +++ b/tensorflow/contrib/lite/kernels/neg.cc @@ -27,7 +27,7 @@ constexpr int kOutputTensor = 0; TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); output->type = input->type; @@ -44,7 +44,7 @@ void Negate(const T* in_data, int num_elements, T* out_data) { } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const int num_elements = NumElements(input); switch (input->type) { diff --git a/tensorflow/contrib/lite/kernels/pad.cc b/tensorflow/contrib/lite/kernels/pad.cc index 9e1e465..b1eb6f7 100644 --- a/tensorflow/contrib/lite/kernels/pad.cc +++ b/tensorflow/contrib/lite/kernels/pad.cc @@ -46,8 +46,8 @@ struct PadContext { dims = NumDimensions(input); } TfLiteTensor* constant_values; - TfLiteTensor* input; - TfLiteTensor* paddings; + const TfLiteTensor* input; + const TfLiteTensor* paddings; TfLiteTensor* output; int dims; }; diff --git a/tensorflow/contrib/lite/kernels/pooling.cc b/tensorflow/contrib/lite/kernels/pooling.cc index 0bf27c3..645d9f4 100644 --- a/tensorflow/contrib/lite/kernels/pooling.cc +++ b/tensorflow/contrib/lite/kernels/pooling.cc @@ -69,7 +69,7 @@ TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); TfLiteTensor* output = GetOutput(context, node, 0); - TfLiteTensor* input = GetInput(context, node, 0); + const TfLiteTensor* input = GetInput(context, node, 0); TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4); TF_LITE_ENSURE_EQ(context, input->type, output->type); @@ -122,7 +122,7 @@ TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { template void AverageEvalFloat(TfLiteContext* context, TfLiteNode* node, TfLitePoolParams* params, OpData* data, - TfLiteTensor* input, TfLiteTensor* output) { + const TfLiteTensor* input, TfLiteTensor* output) { float activation_min, activation_max; CalculateActivationRangeFloat(params->activation, &activation_min, &activation_max); @@ -143,7 +143,7 @@ void AverageEvalFloat(TfLiteContext* context, TfLiteNode* node, template void AverageEvalQuantized(TfLiteContext* context, TfLiteNode* node, TfLitePoolParams* params, OpData* data, - TfLiteTensor* input, TfLiteTensor* output) { + const TfLiteTensor* input, TfLiteTensor* output) { int32_t activation_min; int32_t activation_max; CalculateActivationRangeUint8(params->activation, output, &activation_min, @@ -165,8 +165,8 @@ void AverageEvalQuantized(TfLiteContext* context, TfLiteNode* node, template void MaxEvalFloat(TfLiteContext* context, TfLiteNode* node, - TfLitePoolParams* params, OpData* data, TfLiteTensor* input, - TfLiteTensor* output) { + TfLitePoolParams* params, OpData* data, + const TfLiteTensor* input, TfLiteTensor* output) { float activation_min, activation_max; CalculateActivationRangeFloat(params->activation, &activation_min, &activation_max); @@ -187,7 +187,7 @@ void MaxEvalFloat(TfLiteContext* context, TfLiteNode* node, template void MaxEvalQuantized(TfLiteContext* context, TfLiteNode* node, TfLitePoolParams* params, OpData* data, - TfLiteTensor* input, TfLiteTensor* output) { + const TfLiteTensor* input, TfLiteTensor* output) { int32_t activation_min; int32_t activation_max; CalculateActivationRangeUint8(params->activation, output, &activation_min, @@ -209,8 +209,8 @@ void MaxEvalQuantized(TfLiteContext* context, TfLiteNode* node, template void L2EvalFloat(TfLiteContext* context, TfLiteNode* node, - TfLitePoolParams* params, OpData* data, TfLiteTensor* input, - TfLiteTensor* output) { + TfLitePoolParams* params, OpData* data, + const TfLiteTensor* input, TfLiteTensor* output) { float activation_min, activation_max; CalculateActivationRangeFloat(params->activation, &activation_min, &activation_max); @@ -236,7 +236,7 @@ TfLiteStatus AverageEval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast(node->user_data); TfLiteTensor* output = GetOutput(context, node, 0); - TfLiteTensor* input = GetInput(context, node, 0); + const TfLiteTensor* input = GetInput(context, node, 0); switch (input->type) { // Already know in/out types are same. case kTfLiteFloat32: AverageEvalFloat(context, node, params, data, input, output); @@ -258,7 +258,7 @@ TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast(node->user_data); TfLiteTensor* output = GetOutput(context, node, 0); - TfLiteTensor* input = GetInput(context, node, 0); + const TfLiteTensor* input = GetInput(context, node, 0); switch (input->type) { // Already know in/out types are same. case kTfLiteFloat32: MaxEvalFloat(context, node, params, data, input, output); @@ -279,7 +279,7 @@ TfLiteStatus L2Eval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast(node->user_data); TfLiteTensor* output = GetOutput(context, node, 0); - TfLiteTensor* input = GetInput(context, node, 0); + const TfLiteTensor* input = GetInput(context, node, 0); switch (input->type) { // Already know in/out types are same. case kTfLiteFloat32: L2EvalFloat(context, node, params, data, input, output); diff --git a/tensorflow/contrib/lite/kernels/reshape.cc b/tensorflow/contrib/lite/kernels/reshape.cc index 438f70d..3287040 100644 --- a/tensorflow/contrib/lite/kernels/reshape.cc +++ b/tensorflow/contrib/lite/kernels/reshape.cc @@ -35,7 +35,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE(context, NumInputs(node) == 1 || NumInputs(node) == 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // Tensorflow's Reshape allows one of the shape components to have the @@ -70,7 +70,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); memcpy(output->data.raw, input->data.raw, input->bytes); diff --git a/tensorflow/contrib/lite/kernels/resize_bilinear.cc b/tensorflow/contrib/lite/kernels/resize_bilinear.cc index 9e3e19c..e4bd0f5 100644 --- a/tensorflow/contrib/lite/kernels/resize_bilinear.cc +++ b/tensorflow/contrib/lite/kernels/resize_bilinear.cc @@ -36,8 +36,10 @@ constexpr int kInputTensor = 0; constexpr int kSizeTensor = 1; constexpr int kOutputTensor = 0; -TfLiteStatus ResizeOutputTensor(TfLiteContext* context, TfLiteTensor* input, - TfLiteTensor* size, TfLiteTensor* output) { +TfLiteStatus ResizeOutputTensor(TfLiteContext* context, + const TfLiteTensor* input, + const TfLiteTensor* size, + TfLiteTensor* output) { TfLiteIntArray* output_size = TfLiteIntArrayCreate(4); output_size->data[0] = input->dims->data[0]; const int32* size_data = GetTensorData(size); @@ -51,8 +53,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* input = GetInput(context, node, kInputTensor); - TfLiteTensor* size = GetInput(context, node, kSizeTensor); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* size = GetInput(context, node, kSizeTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // TODO(ahentz): Our current implementations rely on the inputs being 4D. @@ -78,9 +80,9 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast(node->builtin_data); - TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TfLiteTensor* size = GetInput(context, node, kSizeTensor); + const TfLiteTensor* size = GetInput(context, node, kSizeTensor); if (IsDynamicTensor(output)) { TF_LITE_ENSURE_OK(context, diff --git a/tensorflow/contrib/lite/kernels/select.cc b/tensorflow/contrib/lite/kernels/select.cc index 029ad9a..9bc8a1a 100644 --- a/tensorflow/contrib/lite/kernels/select.cc +++ b/tensorflow/contrib/lite/kernels/select.cc @@ -33,10 +33,10 @@ TfLiteStatus SelectPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* input_condition = + const TfLiteTensor* input_condition = GetInput(context, node, kInputTensorCondition); - TfLiteTensor* input_x = GetInput(context, node, kInputTensorX); - TfLiteTensor* input_y = GetInput(context, node, kInputTensorY); + const TfLiteTensor* input_x = GetInput(context, node, kInputTensorX); + const TfLiteTensor* input_y = GetInput(context, node, kInputTensorY); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // Input must be bool. @@ -62,10 +62,10 @@ TfLiteStatus SelectPrepare(TfLiteContext* context, TfLiteNode* node) { } TfLiteStatus SelectEval(TfLiteContext* context, TfLiteNode* node) { - TfLiteTensor* input_condition = + const TfLiteTensor* input_condition = GetInput(context, node, kInputTensorCondition); - TfLiteTensor* input_x = GetInput(context, node, kInputTensorX); - TfLiteTensor* input_y = GetInput(context, node, kInputTensorY); + const TfLiteTensor* input_x = GetInput(context, node, kInputTensorX); + const TfLiteTensor* input_y = GetInput(context, node, kInputTensorY); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool is_rank_one = !HaveSameShapes(input_condition, input_x); diff --git a/tensorflow/contrib/lite/kernels/slice.cc b/tensorflow/contrib/lite/kernels/slice.cc index 82baf53..b28934e 100644 --- a/tensorflow/contrib/lite/kernels/slice.cc +++ b/tensorflow/contrib/lite/kernels/slice.cc @@ -39,8 +39,9 @@ const int kMaxDim = 4; template TfLiteStatus CalculateOutputShapeVector( - TfLiteContext* context, TfLiteTensor* input, TfLiteTensor* begin, - TfLiteTensor* size, std::vector* output_shape_vector) { + TfLiteContext* context, const TfLiteTensor* input, + const TfLiteTensor* begin, const TfLiteTensor* size, + std::vector* output_shape_vector) { for (int idx = 0; idx < NumDimensions(input); ++idx) { T size_value = GetTensorData(size)[idx]; if (size_value < 0) { @@ -62,8 +63,8 @@ TfLiteStatus CalculateOutputShapeVector( } template -void GetBeginAndSizeVectors(int dimensions, TfLiteTensor* begin, - TfLiteTensor* size, std::vector* begins, +void GetBeginAndSizeVectors(int dimensions, const TfLiteTensor* begin, + const TfLiteTensor* size, std::vector* begins, std::vector* sizes) { for (int idx = dimensions - 1; idx >= 0; --idx) { begins->push_back(GetTensorData(begin)[idx]); @@ -71,9 +72,10 @@ void GetBeginAndSizeVectors(int dimensions, TfLiteTensor* begin, } } -TfLiteStatus ResizeOutputShape(TfLiteContext* context, TfLiteTensor* input, - TfLiteTensor* begin, TfLiteTensor* size, - TfLiteTensor* output) { +TfLiteStatus ResizeOutputShape(TfLiteContext* context, + const TfLiteTensor* input, + const TfLiteTensor* begin, + const TfLiteTensor* size, TfLiteTensor* output) { std::vector output_shape_vector; if (begin->type == kTfLiteInt32) { @@ -98,9 +100,9 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* input = GetInput(context, node, kInputTensor); - TfLiteTensor* begin = GetInput(context, node, kBeginTensor); - TfLiteTensor* size = GetInput(context, node, kSizeTensor); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* begin = GetInput(context, node, kBeginTensor); + const TfLiteTensor* size = GetInput(context, node, kSizeTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // Ensure validity of input tensor and its dimension. @@ -124,9 +126,9 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TfLiteTensor* input = GetInput(context, node, kInputTensor); - TfLiteTensor* begin = GetInput(context, node, kBeginTensor); - TfLiteTensor* size = GetInput(context, node, kSizeTensor); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* begin = GetInput(context, node, kBeginTensor); + const TfLiteTensor* size = GetInput(context, node, kSizeTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (IsDynamicTensor(output)) { diff --git a/tensorflow/contrib/lite/kernels/space_to_batch_nd.cc b/tensorflow/contrib/lite/kernels/space_to_batch_nd.cc index d8c9e35..1e35869 100644 --- a/tensorflow/contrib/lite/kernels/space_to_batch_nd.cc +++ b/tensorflow/contrib/lite/kernels/space_to_batch_nd.cc @@ -40,9 +40,9 @@ struct SpaceToBatchNDContext { paddings = GetInput(context, node, 2); output = GetOutput(context, node, 0); } - TfLiteTensor* input; - TfLiteTensor* block_shape; - TfLiteTensor* paddings; + const TfLiteTensor* input; + const TfLiteTensor* block_shape; + const TfLiteTensor* paddings; TfLiteTensor* output; }; diff --git a/tensorflow/contrib/lite/kernels/space_to_depth.cc b/tensorflow/contrib/lite/kernels/space_to_depth.cc index cb2e509..aafce89 100644 --- a/tensorflow/contrib/lite/kernels/space_to_depth.cc +++ b/tensorflow/contrib/lite/kernels/space_to_depth.cc @@ -42,7 +42,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4); @@ -76,7 +76,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast(node->builtin_data); - TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); #define TF_LITE_SPACE_TO_DEPTH(type, scalar) \ diff --git a/tensorflow/contrib/lite/kernels/split.cc b/tensorflow/contrib/lite/kernels/split.cc index b524c79..c6b94c2 100644 --- a/tensorflow/contrib/lite/kernels/split.cc +++ b/tensorflow/contrib/lite/kernels/split.cc @@ -34,8 +34,8 @@ struct OpContext { input = GetInput(context, node, 1); } TfLiteSplitParams* params; - TfLiteTensor* axis; - TfLiteTensor* input; + const TfLiteTensor* axis; + const TfLiteTensor* input; }; TfLiteStatus UseDynamicOutputTensors(TfLiteContext* context, TfLiteNode* node) { @@ -46,8 +46,8 @@ TfLiteStatus UseDynamicOutputTensors(TfLiteContext* context, TfLiteNode* node) { } TfLiteStatus ResizeOutputTensors(TfLiteContext* context, TfLiteNode* node, - TfLiteTensor* axis, TfLiteTensor* input, - int num_splits) { + const TfLiteTensor* axis, + const TfLiteTensor* input, int num_splits) { int axis_value = GetTensorData(axis)[0]; if (axis_value < 0) { axis_value += NumDimensions(input); diff --git a/tensorflow/contrib/lite/kernels/squeeze.cc b/tensorflow/contrib/lite/kernels/squeeze.cc index 29447ab..09a5662 100644 --- a/tensorflow/contrib/lite/kernels/squeeze.cc +++ b/tensorflow/contrib/lite/kernels/squeeze.cc @@ -26,13 +26,12 @@ namespace builtin { namespace squeeze { struct SqueezeContext { - SqueezeContext(TfLiteContext* context, TfLiteNode* node) { - params = reinterpret_cast(node->builtin_data); - input = GetInput(context, node, 0); - output = GetOutput(context, node, 0); - } + SqueezeContext(TfLiteContext* context, TfLiteNode* node) + : params(reinterpret_cast(node->builtin_data)), + input(GetInput(context, node, 0)), + output(GetOutput(context, node, 0)) {} TfLiteSqueezeParams* params; - TfLiteTensor* input; + const TfLiteTensor* const input; TfLiteTensor* output; }; diff --git a/tensorflow/contrib/lite/kernels/strided_slice.cc b/tensorflow/contrib/lite/kernels/strided_slice.cc index 40ac436..9417be3 100644 --- a/tensorflow/contrib/lite/kernels/strided_slice.cc +++ b/tensorflow/contrib/lite/kernels/strided_slice.cc @@ -49,10 +49,10 @@ struct StridedSliceContext { dims = NumDimensions(input); } const TfLiteStridedSliceParams* params; - TfLiteTensor* input; - TfLiteTensor* begin; - TfLiteTensor* end; - TfLiteTensor* strides; + const TfLiteTensor* input; + const TfLiteTensor* begin; + const TfLiteTensor* end; + const TfLiteTensor* strides; TfLiteTensor* output; int dims; }; diff --git a/tensorflow/contrib/lite/kernels/sub.cc b/tensorflow/contrib/lite/kernels/sub.cc index 7c60a4f..9531ecb 100644 --- a/tensorflow/contrib/lite/kernels/sub.cc +++ b/tensorflow/contrib/lite/kernels/sub.cc @@ -57,8 +57,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); - TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); + const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); + const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, input1->type, input2->type); @@ -80,7 +80,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { template void EvalFloat(TfLiteContext* context, TfLiteNode* node, TfLiteSubParams* params, const OpData* data, - TfLiteTensor* input1, TfLiteTensor* input2, + const TfLiteTensor* input1, const TfLiteTensor* input2, TfLiteTensor* output) { float output_activation_min, output_activation_max; CalculateActivationRangeFloat(params->activation, &output_activation_min, @@ -109,7 +109,7 @@ void EvalFloat(TfLiteContext* context, TfLiteNode* node, template void EvalQuantized(TfLiteContext* context, TfLiteNode* node, TfLiteSubParams* params, const OpData* data, - TfLiteTensor* input1, TfLiteTensor* input2, + const TfLiteTensor* input1, const TfLiteTensor* input2, TfLiteTensor* output) { auto input1_offset = -input1->params.zero_point; auto input2_offset = -input2->params.zero_point; @@ -164,8 +164,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast(node->builtin_data); OpData* data = reinterpret_cast(node->user_data); - TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); - TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); + const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); + const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (output->type == kTfLiteFloat32) { diff --git a/tensorflow/contrib/lite/kernels/svdf.cc b/tensorflow/contrib/lite/kernels/svdf.cc index 13da51c..7888127 100644 --- a/tensorflow/contrib/lite/kernels/svdf.cc +++ b/tensorflow/contrib/lite/kernels/svdf.cc @@ -58,9 +58,10 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, node->outputs->size, 2); TfLiteTensor* input = &context->tensors[node->inputs->data[kInputTensor]]; - TfLiteTensor* weights_feature = + const TfLiteTensor* weights_feature = GetInput(context, node, kWeightsFeatureTensor); - TfLiteTensor* weights_time = GetInput(context, node, kWeightsTimeTensor); + const TfLiteTensor* weights_time = + GetInput(context, node, kWeightsTimeTensor); // Check all the parameters of tensor match within themselves and match the // input configuration. @@ -123,10 +124,11 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast(node->builtin_data); - TfLiteTensor* input = GetInput(context, node, kInputTensor); - TfLiteTensor* weights_feature = + const TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* weights_feature = GetInput(context, node, kWeightsFeatureTensor); - TfLiteTensor* weights_time = GetInput(context, node, kWeightsTimeTensor); + const TfLiteTensor* weights_time = + GetInput(context, node, kWeightsTimeTensor); TfLiteTensor* state = GetOutput(context, node, kStateTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); diff --git a/tensorflow/contrib/lite/kernels/topk_v2.cc b/tensorflow/contrib/lite/kernels/topk_v2.cc index ad9b744..b331fc8 100644 --- a/tensorflow/contrib/lite/kernels/topk_v2.cc +++ b/tensorflow/contrib/lite/kernels/topk_v2.cc @@ -30,7 +30,7 @@ constexpr int kOutputIndexes = 1; namespace { TfLiteStatus ResizeOutput(TfLiteContext* context, TfLiteNode* node) { - TfLiteTensor* top_k = GetInput(context, node, kInputTopK); + const TfLiteTensor* top_k = GetInput(context, node, kInputTopK); // INT32 number of top results is supported. TF_LITE_ENSURE_EQ(context, top_k->type, kTfLiteInt32); // Check that the tensor contains only one value. @@ -38,7 +38,7 @@ TfLiteStatus ResizeOutput(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumElements(top_k), 1); const int32 k = top_k->data.i32[0]; - TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); const int num_dimensions = NumDimensions(input); // Check that input has one or more dimensions. TF_LITE_ENSURE_MSG(context, input->dims->size >= 1, @@ -162,11 +162,11 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2); - TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output_values = GetOutput(context, node, kOutputValues); TF_LITE_ENSURE_EQ(context, input->type, output_values->type); - TfLiteTensor* top_k = GetInput(context, node, kInputTopK); + const TfLiteTensor* top_k = GetInput(context, node, kInputTopK); TF_LITE_ENSURE_EQ(context, top_k->type, kTfLiteInt32); // Set output dynamic if the input is not const. @@ -187,11 +187,11 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { if (IsDynamicTensor(output_values)) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, node)); } - TfLiteTensor* top_k = GetInput(context, node, kInputTopK); + const TfLiteTensor* top_k = GetInput(context, node, kInputTopK); const int32 k = top_k->data.i32[0]; // The tensor can have more than 2 dimensions or even be a vector, the code // anyway calls the internal dimension as row; - TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); const int32 row_size = input->dims->data[input->dims->size - 1]; int32 num_rows = 1; for (int i = 0; i < input->dims->size - 1; ++i) { diff --git a/tensorflow/contrib/lite/kernels/transpose.cc b/tensorflow/contrib/lite/kernels/transpose.cc index d3c10a9..8316a23 100644 --- a/tensorflow/contrib/lite/kernels/transpose.cc +++ b/tensorflow/contrib/lite/kernels/transpose.cc @@ -37,8 +37,8 @@ struct TransposeContext { perm = GetInput(context, node, 1); output = GetOutput(context, node, 0); } - TfLiteTensor* input; - TfLiteTensor* perm; + const TfLiteTensor* input; + const TfLiteTensor* perm; TfLiteTensor* output; }; diff --git a/tensorflow/contrib/lite/kernels/unidirectional_sequence_lstm.cc b/tensorflow/contrib/lite/kernels/unidirectional_sequence_lstm.cc index 5987bf6..46d65ca 100644 --- a/tensorflow/contrib/lite/kernels/unidirectional_sequence_lstm.cc +++ b/tensorflow/contrib/lite/kernels/unidirectional_sequence_lstm.cc @@ -100,13 +100,13 @@ TfLiteStatus CheckInputTensorDimensions(TfLiteContext* context, TF_LITE_ENSURE_EQ(context, input_to_input_weights->dims->data[1], n_input); } - TfLiteTensor* input_to_forget_weights = + const TfLiteTensor* input_to_forget_weights = GetInput(context, node, kInputToForgetWeightsTensor); TF_LITE_ENSURE_EQ(context, input_to_forget_weights->dims->size, 2); TF_LITE_ENSURE_EQ(context, input_to_forget_weights->dims->data[0], n_cell); TF_LITE_ENSURE_EQ(context, input_to_forget_weights->dims->data[1], n_input); - TfLiteTensor* input_to_cell_weights = + const TfLiteTensor* input_to_cell_weights = GetInput(context, node, kInputToCellWeightsTensor); TF_LITE_ENSURE_EQ(context, input_to_cell_weights->dims->size, 2); TF_LITE_ENSURE_EQ(context, input_to_cell_weights->dims->data[0], n_cell); @@ -122,7 +122,7 @@ TfLiteStatus CheckInputTensorDimensions(TfLiteContext* context, n_output); } - TfLiteTensor* recurrent_to_forget_weights = + const TfLiteTensor* recurrent_to_forget_weights = GetInput(context, node, kRecurrentToForgetWeightsTensor); TF_LITE_ENSURE_EQ(context, recurrent_to_forget_weights->dims->size, 2); TF_LITE_ENSURE_EQ(context, recurrent_to_forget_weights->dims->data[0], @@ -130,7 +130,7 @@ TfLiteStatus CheckInputTensorDimensions(TfLiteContext* context, TF_LITE_ENSURE_EQ(context, recurrent_to_forget_weights->dims->data[1], n_output); - TfLiteTensor* recurrent_to_cell_weights = + const TfLiteTensor* recurrent_to_cell_weights = GetInput(context, node, kRecurrentToCellWeightsTensor); TF_LITE_ENSURE_EQ(context, recurrent_to_cell_weights->dims->size, 2); TF_LITE_ENSURE_EQ(context, recurrent_to_cell_weights->dims->data[0], n_cell); @@ -188,16 +188,16 @@ TfLiteStatus CheckInputTensorDimensions(TfLiteContext* context, TF_LITE_ENSURE_EQ(context, input_gate_bias->dims->data[0], n_cell); } - TfLiteTensor* forget_gate_bias = + const TfLiteTensor* forget_gate_bias = GetInput(context, node, kForgetGateBiasTensor); TF_LITE_ENSURE_EQ(context, forget_gate_bias->dims->size, 1); TF_LITE_ENSURE_EQ(context, forget_gate_bias->dims->data[0], n_cell); - TfLiteTensor* cell_bias = GetInput(context, node, kCellGateBiasTensor); + const TfLiteTensor* cell_bias = GetInput(context, node, kCellGateBiasTensor); TF_LITE_ENSURE_EQ(context, cell_bias->dims->size, 1); TF_LITE_ENSURE_EQ(context, cell_bias->dims->data[0], n_cell); - TfLiteTensor* output_gate_bias = + const TfLiteTensor* output_gate_bias = GetInput(context, node, kOutputGateBiasTensor); TF_LITE_ENSURE_EQ(context, output_gate_bias->dims->size, 1); TF_LITE_ENSURE_EQ(context, output_gate_bias->dims->data[0], n_cell); @@ -241,19 +241,19 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { // Inferring batch size, number of outputs and sequence length and // number of cells from the input tensors. - TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); TF_LITE_ENSURE(context, input->dims->size > 1); const int max_time = input->dims->data[0]; const int n_batch = input->dims->data[1]; const int n_input = input->dims->data[2]; - TfLiteTensor* input_to_output_weights = + const TfLiteTensor* input_to_output_weights = GetInput(context, node, kInputToOutputWeightsTensor); const int n_cell = input_to_output_weights->dims->data[0]; TF_LITE_ENSURE_EQ(context, input_to_output_weights->dims->size, 2); TF_LITE_ENSURE_EQ(context, input_to_output_weights->dims->data[1], n_input); - TfLiteTensor* recurrent_to_output_weights = + const TfLiteTensor* recurrent_to_output_weights = GetInput(context, node, kRecurrentToOutputWeightsTensor); TF_LITE_ENSURE_EQ(context, recurrent_to_output_weights->dims->size, 2); TF_LITE_ENSURE_EQ(context, recurrent_to_output_weights->dims->data[0], @@ -324,24 +324,24 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { // The LSTM Op engine. TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast(node->builtin_data); - TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* input_to_input_weights = GetOptionalInputTensor(context, node, kInputToInputWeightsTensor); - TfLiteTensor* input_to_forget_weights = + const TfLiteTensor* input_to_forget_weights = GetInput(context, node, kInputToForgetWeightsTensor); - TfLiteTensor* input_to_cell_weights = + const TfLiteTensor* input_to_cell_weights = GetInput(context, node, kInputToCellWeightsTensor); - TfLiteTensor* input_to_output_weights = + const TfLiteTensor* input_to_output_weights = GetInput(context, node, kInputToOutputWeightsTensor); TfLiteTensor* recurrent_to_input_weights = GetOptionalInputTensor(context, node, kRecurrentToInputWeightsTensor); - TfLiteTensor* recurrent_to_forget_weights = + const TfLiteTensor* recurrent_to_forget_weights = GetInput(context, node, kRecurrentToForgetWeightsTensor); - TfLiteTensor* recurrent_to_cell_weights = + const TfLiteTensor* recurrent_to_cell_weights = GetInput(context, node, kRecurrentToCellWeightsTensor); - TfLiteTensor* recurrent_to_output_weights = + const TfLiteTensor* recurrent_to_output_weights = GetInput(context, node, kRecurrentToOutputWeightsTensor); TfLiteTensor* cell_to_input_weights = @@ -353,10 +353,10 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* input_gate_bias = GetOptionalInputTensor(context, node, kInputGateBiasTensor); - TfLiteTensor* forget_gate_bias = + const TfLiteTensor* forget_gate_bias = GetInput(context, node, kForgetGateBiasTensor); - TfLiteTensor* cell_bias = GetInput(context, node, kCellGateBiasTensor); - TfLiteTensor* output_gate_bias = + const TfLiteTensor* cell_bias = GetInput(context, node, kCellGateBiasTensor); + const TfLiteTensor* output_gate_bias = GetInput(context, node, kOutputGateBiasTensor); TfLiteTensor* projection_weights = diff --git a/tensorflow/contrib/lite/kernels/unidirectional_sequence_rnn.cc b/tensorflow/contrib/lite/kernels/unidirectional_sequence_rnn.cc index 5ae635b..3eb2810 100644 --- a/tensorflow/contrib/lite/kernels/unidirectional_sequence_rnn.cc +++ b/tensorflow/contrib/lite/kernels/unidirectional_sequence_rnn.cc @@ -54,11 +54,11 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, node->inputs->size, 4); TF_LITE_ENSURE_EQ(context, node->outputs->size, 2); - TfLiteTensor* input = GetInput(context, node, kInputTensor); - TfLiteTensor* input_weights = GetInput(context, node, kWeightsTensor); - TfLiteTensor* recurrent_weights = + const TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* input_weights = GetInput(context, node, kWeightsTensor); + const TfLiteTensor* recurrent_weights = GetInput(context, node, kRecurrentWeightsTensor); - TfLiteTensor* bias = GetInput(context, node, kBiasTensor); + const TfLiteTensor* bias = GetInput(context, node, kBiasTensor); // Check all the parameters of tensor match within themselves and match the // input configuration. @@ -260,11 +260,11 @@ TfLiteStatus EvalQuantized(const TfLiteTensor* input, TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast(node->builtin_data); - TfLiteTensor* input = GetInput(context, node, kInputTensor); - TfLiteTensor* input_weights = GetInput(context, node, kWeightsTensor); - TfLiteTensor* recurrent_weights = + const TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* input_weights = GetInput(context, node, kWeightsTensor); + const TfLiteTensor* recurrent_weights = GetInput(context, node, kRecurrentWeightsTensor); - TfLiteTensor* bias = GetInput(context, node, kBiasTensor); + const TfLiteTensor* bias = GetInput(context, node, kBiasTensor); TfLiteTensor* hidden_state = GetOutput(context, node, kHiddenStateTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); diff --git a/tensorflow/contrib/lite/models/smartreply/ops/extract_feature.cc b/tensorflow/contrib/lite/models/smartreply/ops/extract_feature.cc index f97a648..29c8ad2 100644 --- a/tensorflow/contrib/lite/models/smartreply/ops/extract_feature.cc +++ b/tensorflow/contrib/lite/models/smartreply/ops/extract_feature.cc @@ -61,7 +61,7 @@ bool IsValidNgram(const tflite::StringRef& strref) { TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TfLiteIntArray* outputSize1 = TfLiteIntArrayCreate(1); TfLiteIntArray* outputSize2 = TfLiteIntArrayCreate(1); - TfLiteTensor* input = GetInput(context, node, 0); + const TfLiteTensor* input = GetInput(context, node, 0); int dim = input->dims->data[0]; if (dim == 0) { // TFLite non-string output should have size greater than 0. @@ -76,7 +76,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TfLiteTensor* input = GetInput(context, node, 0); + const TfLiteTensor* input = GetInput(context, node, 0); int num_strings = tflite::GetStringCount(input); TfLiteTensor* label = GetOutput(context, node, 0); TfLiteTensor* weight = GetOutput(context, node, 1); -- 2.7.4