From a4973345351a14a786987cd7f648a99c029fdc1d Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Sat, 13 Jan 2018 15:28:01 -0800 Subject: [PATCH] Rename RELU1 to RELU_N1_TO_1 to indicate that the image of the Op is in between -1 and 1. PiperOrigin-RevId: 181864303 --- .../contrib/lite/g3doc/tf_ops_compatibility.md | 6 +++--- tensorflow/contrib/lite/kernels/activations.cc | 2 +- .../contrib/lite/kernels/activations_test.cc | 2 +- tensorflow/contrib/lite/kernels/add_test.cc | 9 +++++---- tensorflow/contrib/lite/kernels/mul_test.cc | 5 +++-- tensorflow/contrib/lite/kernels/register.cc | 4 ++-- tensorflow/contrib/lite/model.cc | 4 ++-- tensorflow/contrib/lite/nnapi_delegate.cc | 2 +- tensorflow/contrib/lite/schema/schema.fbs | 4 ++-- .../contrib/lite/schema/schema_generated.h | 16 ++++++++-------- tensorflow/contrib/lite/toco/tflite/operator.cc | 2 +- .../contrib/lite/toco/tflite/operator_test.cc | 2 +- tensorflow/contrib/lite/toco/tflite/types.cc | 4 ++-- .../contrib/lite/toco/tflite/types_test.cc | 2 +- 14 files changed, 33 insertions(+), 31 deletions(-) diff --git a/tensorflow/contrib/lite/g3doc/tf_ops_compatibility.md b/tensorflow/contrib/lite/g3doc/tf_ops_compatibility.md index 9ade04eb8c..8e5e694a5c 100644 --- a/tensorflow/contrib/lite/g3doc/tf_ops_compatibility.md +++ b/tensorflow/contrib/lite/g3doc/tf_ops_compatibility.md @@ -329,18 +329,18 @@ Inputs { 0: a tensor } Outputs { - 0: a tensor equivalent to max(0, min(input, 1) + 0: a tensor equivalent to max(0, input) } ``` -**RELU1** +**RELU_N1_TO_1** ``` Inputs { 0: a tensor } Outputs { - 0: a tensor equivalent to max(-1, min(input, 6) + 0: a tensor equivalent to max(-1, min(input, 1) } ``` diff --git a/tensorflow/contrib/lite/kernels/activations.cc b/tensorflow/contrib/lite/kernels/activations.cc index 7ab60a33e5..8ac93bc8c8 100644 --- a/tensorflow/contrib/lite/kernels/activations.cc +++ b/tensorflow/contrib/lite/kernels/activations.cc @@ -349,7 +349,7 @@ TfLiteRegistration* Register_RELU() { return &r; } -TfLiteRegistration* Register_RELU1() { +TfLiteRegistration* Register_RELU_N1_TO_1() { static TfLiteRegistration r = {/*init=*/nullptr, /*free=*/nullptr, activations::GenericPrepare, activations::Relu1Eval}; diff --git a/tensorflow/contrib/lite/kernels/activations_test.cc b/tensorflow/contrib/lite/kernels/activations_test.cc index 33ca56e745..68d49944e5 100644 --- a/tensorflow/contrib/lite/kernels/activations_test.cc +++ b/tensorflow/contrib/lite/kernels/activations_test.cc @@ -102,7 +102,7 @@ TEST(FloatActivationsOpTest, Relu) { } TEST(FloatActivationsOpTest, Relu1) { - FloatActivationsOpModel m(BuiltinOperator_RELU1, + FloatActivationsOpModel m(BuiltinOperator_RELU_N1_TO_1, /*input=*/{TensorType_FLOAT32, {1, 2, 4, 1}}); m.SetInput({ 0.0, -0.6, 0.2, -0.4, // diff --git a/tensorflow/contrib/lite/kernels/add_test.cc b/tensorflow/contrib/lite/kernels/add_test.cc index ddf45bb576..306dfc3e80 100644 --- a/tensorflow/contrib/lite/kernels/add_test.cc +++ b/tensorflow/contrib/lite/kernels/add_test.cc @@ -77,9 +77,10 @@ TEST(FloatAddOpModel, NoActivation) { EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3})); } -TEST(FloatAddOpModel, ActivationRELU1) { +TEST(FloatAddOpModel, ActivationRELU_N1_TO_1) { FloatAddOpModel m({TensorType_FLOAT32, {1, 2, 2, 1}}, - {TensorType_FLOAT32, {}}, ActivationFunctionType_RELU1); + {TensorType_FLOAT32, {}}, + ActivationFunctionType_RELU_N1_TO_1); m.PopulateTensor(m.input1(), {-2.0, 0.2, 0.7, 0.8}); m.PopulateTensor(m.input2(), {0.1, 0.2, 0.3, 0.5}); m.Invoke(); @@ -122,7 +123,7 @@ TEST(QuantizedAddOpModel, QuantizedTestsNoActivation) { } } -TEST(QuantizedAddOpModel, QuantizedTestsActivationRELU1) { +TEST(QuantizedAddOpModel, QuantizedTestsActivationRELU_N1_TO_1) { float kQuantizedTolerance = GetTolerance(-1.0, 1.0); std::vector> inputs1 = {{-0.8, 0.2, 0.9, 0.7}, {-0.8, 0.2, 0.7, 0.3}}; @@ -133,7 +134,7 @@ TEST(QuantizedAddOpModel, QuantizedTestsActivationRELU1) { for (int i = 0; i < inputs1.size(); ++i) { QuantizedAddOpModel m({TensorType_UINT8, {1, 2, 2, 1}, -1.0, 1.0}, {TensorType_UINT8, {}, -1.0, 1.0}, - ActivationFunctionType_RELU1); + ActivationFunctionType_RELU_N1_TO_1); m.QuantizeAndPopulate(m.input1(), inputs1[i]); m.QuantizeAndPopulate(m.input2(), inputs2[i]); m.Invoke(); diff --git a/tensorflow/contrib/lite/kernels/mul_test.cc b/tensorflow/contrib/lite/kernels/mul_test.cc index 4255cfe18a..8838b300c0 100644 --- a/tensorflow/contrib/lite/kernels/mul_test.cc +++ b/tensorflow/contrib/lite/kernels/mul_test.cc @@ -78,9 +78,10 @@ TEST(FloatMulOpTest, NoActivation) { ElementsAreArray(ArrayFloatNear({-0.2, 0.04, 0.21, 0.4}))); } -TEST(FloatMulOpTest, ActivationRELU1) { +TEST(FloatMulOpTest, ActivationRELU_N1_TO_1) { FloatMulOpModel m({TensorType_FLOAT32, {1, 2, 2, 1}}, - {TensorType_FLOAT32, {}}, ActivationFunctionType_RELU1); + {TensorType_FLOAT32, {}}, + ActivationFunctionType_RELU_N1_TO_1); m.PopulateTensor(m.input1(), {-2.0, 0.2, 0.7, 0.8}); m.PopulateTensor(m.input2(), {0.1, 0.2, 0.3, 5}); m.Invoke(); diff --git a/tensorflow/contrib/lite/kernels/register.cc b/tensorflow/contrib/lite/kernels/register.cc index ecaf4d7042..de14afc546 100644 --- a/tensorflow/contrib/lite/kernels/register.cc +++ b/tensorflow/contrib/lite/kernels/register.cc @@ -20,7 +20,7 @@ namespace ops { namespace builtin { TfLiteRegistration* Register_RELU(); -TfLiteRegistration* Register_RELU1(); +TfLiteRegistration* Register_RELU_N1_TO_1(); TfLiteRegistration* Register_RELU6(); TfLiteRegistration* Register_TANH(); TfLiteRegistration* Register_LOGISTIC(); @@ -57,7 +57,7 @@ TfLiteRegistration* Register_MEAN(); BuiltinOpResolver::BuiltinOpResolver() { AddBuiltin(BuiltinOperator_RELU, Register_RELU()); - AddBuiltin(BuiltinOperator_RELU1, Register_RELU1()); + AddBuiltin(BuiltinOperator_RELU_N1_TO_1, Register_RELU_N1_TO_1()); AddBuiltin(BuiltinOperator_RELU6, Register_RELU6()); AddBuiltin(BuiltinOperator_TANH, Register_TANH()); AddBuiltin(BuiltinOperator_LOGISTIC, Register_LOGISTIC()); diff --git a/tensorflow/contrib/lite/model.cc b/tensorflow/contrib/lite/model.cc index 0cd6c3e8dd..fe2a8bb723 100644 --- a/tensorflow/contrib/lite/model.cc +++ b/tensorflow/contrib/lite/model.cc @@ -230,7 +230,7 @@ void* ParseOpData(const Operator* op, BuiltinOperator op_type, return kTfLiteActNone; case ActivationFunctionType_RELU: return kTfLiteActRelu; - case ActivationFunctionType_RELU1: + case ActivationFunctionType_RELU_N1_TO_1: return kTfLiteActRelu1; case ActivationFunctionType_RELU6: return kTfLiteActRelu6; @@ -286,7 +286,7 @@ void* ParseOpData(const Operator* op, BuiltinOperator op_type, case BuiltinOperator_TANH: case BuiltinOperator_LOGISTIC: case BuiltinOperator_RELU: - case BuiltinOperator_RELU1: + case BuiltinOperator_RELU_N1_TO_1: case BuiltinOperator_RELU6: case BuiltinOperator_CONCAT_EMBEDDINGS: break; diff --git a/tensorflow/contrib/lite/nnapi_delegate.cc b/tensorflow/contrib/lite/nnapi_delegate.cc index 0be7cd96c9..ec42152e5c 100644 --- a/tensorflow/contrib/lite/nnapi_delegate.cc +++ b/tensorflow/contrib/lite/nnapi_delegate.cc @@ -329,7 +329,7 @@ void AddOpsAndParams(tflite::Interpreter* interpreter, case tflite::BuiltinOperator_RESIZE_BILINEAR: case tflite::BuiltinOperator_CALL: case tflite::BuiltinOperator_SKIP_GRAM: - case tflite::BuiltinOperator_RELU1: + case tflite::BuiltinOperator_RELU_N1_TO_1: case tflite::BuiltinOperator_GATHER: case tflite::BuiltinOperator_SPACE_TO_BATCH_ND: case tflite::BuiltinOperator_BATCH_TO_SPACE_ND: diff --git a/tensorflow/contrib/lite/schema/schema.fbs b/tensorflow/contrib/lite/schema/schema.fbs index 54ef48f4ed..0a2c63e2b2 100644 --- a/tensorflow/contrib/lite/schema/schema.fbs +++ b/tensorflow/contrib/lite/schema/schema.fbs @@ -89,7 +89,7 @@ enum BuiltinOperator : byte { MAX_POOL_2D = 17, MUL = 18, RELU = 19, - RELU1 = 20, + RELU_N1_TO_1 = 20, RELU6 = 21, RESHAPE = 22, RESIZE_BILINEAR = 23, @@ -149,7 +149,7 @@ enum Padding : byte { SAME, VALID } enum ActivationFunctionType : byte { NONE = 0, RELU = 1, - RELU1 = 2, + RELU_N1_TO_1 = 2, RELU6 = 3, TANH = 4, SIGN_BIT = 5, diff --git a/tensorflow/contrib/lite/schema/schema_generated.h b/tensorflow/contrib/lite/schema/schema_generated.h index 0774a216f4..b237a61203 100755 --- a/tensorflow/contrib/lite/schema/schema_generated.h +++ b/tensorflow/contrib/lite/schema/schema_generated.h @@ -170,7 +170,7 @@ enum BuiltinOperator { BuiltinOperator_MAX_POOL_2D = 17, BuiltinOperator_MUL = 18, BuiltinOperator_RELU = 19, - BuiltinOperator_RELU1 = 20, + BuiltinOperator_RELU_N1_TO_1 = 20, BuiltinOperator_RELU6 = 21, BuiltinOperator_RESHAPE = 22, BuiltinOperator_RESIZE_BILINEAR = 23, @@ -214,7 +214,7 @@ inline BuiltinOperator (&EnumValuesBuiltinOperator())[38] { BuiltinOperator_MAX_POOL_2D, BuiltinOperator_MUL, BuiltinOperator_RELU, - BuiltinOperator_RELU1, + BuiltinOperator_RELU_N1_TO_1, BuiltinOperator_RELU6, BuiltinOperator_RESHAPE, BuiltinOperator_RESIZE_BILINEAR, @@ -259,7 +259,7 @@ inline const char **EnumNamesBuiltinOperator() { "MAX_POOL_2D", "MUL", "RELU", - "RELU1", + "RELU_N1_TO_1", "RELU6", "RESHAPE", "RESIZE_BILINEAR", @@ -888,7 +888,7 @@ inline const char *EnumNamePadding(Padding e) { enum ActivationFunctionType { ActivationFunctionType_NONE = 0, ActivationFunctionType_RELU = 1, - ActivationFunctionType_RELU1 = 2, + ActivationFunctionType_RELU_N1_TO_1 = 2, ActivationFunctionType_RELU6 = 3, ActivationFunctionType_TANH = 4, ActivationFunctionType_SIGN_BIT = 5, @@ -898,14 +898,14 @@ enum ActivationFunctionType { inline ActivationFunctionType (&EnumValuesActivationFunctionType())[6] { static ActivationFunctionType values[] = { - ActivationFunctionType_NONE, ActivationFunctionType_RELU, - ActivationFunctionType_RELU1, ActivationFunctionType_RELU6, - ActivationFunctionType_TANH, ActivationFunctionType_SIGN_BIT}; + ActivationFunctionType_NONE, ActivationFunctionType_RELU, + ActivationFunctionType_RELU_N1_TO_1, ActivationFunctionType_RELU6, + ActivationFunctionType_TANH, ActivationFunctionType_SIGN_BIT}; return values; } inline const char **EnumNamesActivationFunctionType() { - static const char *names[] = {"NONE", "RELU", "RELU1", "RELU6", + static const char *names[] = {"NONE", "RELU", "RELU_N1_TO_1", "RELU6", "TANH", "SIGN_BIT", nullptr}; return names; } diff --git a/tensorflow/contrib/lite/toco/tflite/operator.cc b/tensorflow/contrib/lite/toco/tflite/operator.cc index d6335b8253..ae6c716eab 100644 --- a/tensorflow/contrib/lite/toco/tflite/operator.cc +++ b/tensorflow/contrib/lite/toco/tflite/operator.cc @@ -738,7 +738,7 @@ std::vector> BuildOperatorList() { ops.emplace_back( new SimpleOperator("RELU", OperatorType::kRelu)); ops.emplace_back( - new SimpleOperator("RELU1", OperatorType::kRelu1)); + new SimpleOperator("RELU_N1_TO_1", OperatorType::kRelu1)); ops.emplace_back( new SimpleOperator("RELU6", OperatorType::kRelu6)); ops.emplace_back(new SimpleOperator( diff --git a/tensorflow/contrib/lite/toco/tflite/operator_test.cc b/tensorflow/contrib/lite/toco/tflite/operator_test.cc index 093144f6ac..debce63760 100644 --- a/tensorflow/contrib/lite/toco/tflite/operator_test.cc +++ b/tensorflow/contrib/lite/toco/tflite/operator_test.cc @@ -102,7 +102,7 @@ TEST_F(OperatorTest, SimpleOperators) { OperatorType::kDequantize); CheckSimpleOperator("FLOOR", OperatorType::kFloor); CheckSimpleOperator("RELU", OperatorType::kRelu); - CheckSimpleOperator("RELU1", OperatorType::kRelu1); + CheckSimpleOperator("RELU_N1_TO_1", OperatorType::kRelu1); CheckSimpleOperator("RELU6", OperatorType::kRelu6); CheckSimpleOperator("RESIZE_BILINEAR", OperatorType::kResizeBilinear); diff --git a/tensorflow/contrib/lite/toco/tflite/types.cc b/tensorflow/contrib/lite/toco/tflite/types.cc index a6fa0237bc..5cd1675f54 100644 --- a/tensorflow/contrib/lite/toco/tflite/types.cc +++ b/tensorflow/contrib/lite/toco/tflite/types.cc @@ -146,7 +146,7 @@ PaddingType Padding::Deserialize(int padding) { case FusedActivationFunctionType::kRelu6: return ::tflite::ActivationFunctionType_RELU6; case FusedActivationFunctionType::kRelu1: - return ::tflite::ActivationFunctionType_RELU1; + return ::tflite::ActivationFunctionType_RELU_N1_TO_1; default: LOG(FATAL) << "Unhandled fused activation function type."; } @@ -161,7 +161,7 @@ FusedActivationFunctionType ActivationFunction::Deserialize( return FusedActivationFunctionType::kRelu; case ::tflite::ActivationFunctionType_RELU6: return FusedActivationFunctionType::kRelu6; - case ::tflite::ActivationFunctionType_RELU1: + case ::tflite::ActivationFunctionType_RELU_N1_TO_1: return FusedActivationFunctionType::kRelu1; default: LOG(FATAL) << "Unhandled fused activation function type."; diff --git a/tensorflow/contrib/lite/toco/tflite/types_test.cc b/tensorflow/contrib/lite/toco/tflite/types_test.cc index 174b78f3e6..e982081f76 100644 --- a/tensorflow/contrib/lite/toco/tflite/types_test.cc +++ b/tensorflow/contrib/lite/toco/tflite/types_test.cc @@ -172,7 +172,7 @@ TEST(ActivationFunction, All) { {FusedActivationFunctionType::kRelu6, ::tflite::ActivationFunctionType_RELU6}, {FusedActivationFunctionType::kRelu1, - ::tflite::ActivationFunctionType_RELU1}}; + ::tflite::ActivationFunctionType_RELU_N1_TO_1}}; for (auto x : testdata) { EXPECT_EQ(x.second, ActivationFunction::Serialize(x.first)); EXPECT_EQ(x.first, ActivationFunction::Deserialize(x.second)); -- 2.34.1