0: a tensor
}
Outputs {
- 0: a tensor equivalent to max(0, min(input, 1)
+ 0: a tensor equivalent to max(0, input)
}
```
-**RELU1**
+**RELU_N1_TO_1**
```
Inputs {
0: a tensor
}
Outputs {
- 0: a tensor equivalent to max(-1, min(input, 6)
+ 0: a tensor equivalent to max(-1, min(input, 1)
}
```
return &r;
}
-TfLiteRegistration* Register_RELU1() {
+TfLiteRegistration* Register_RELU_N1_TO_1() {
static TfLiteRegistration r = {/*init=*/nullptr, /*free=*/nullptr,
activations::GenericPrepare,
activations::Relu1Eval};
}
TEST(FloatActivationsOpTest, Relu1) {
- FloatActivationsOpModel m(BuiltinOperator_RELU1,
+ FloatActivationsOpModel m(BuiltinOperator_RELU_N1_TO_1,
/*input=*/{TensorType_FLOAT32, {1, 2, 4, 1}});
m.SetInput({
0.0, -0.6, 0.2, -0.4, //
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
}
-TEST(FloatAddOpModel, ActivationRELU1) {
+TEST(FloatAddOpModel, ActivationRELU_N1_TO_1) {
FloatAddOpModel m({TensorType_FLOAT32, {1, 2, 2, 1}},
- {TensorType_FLOAT32, {}}, ActivationFunctionType_RELU1);
+ {TensorType_FLOAT32, {}},
+ ActivationFunctionType_RELU_N1_TO_1);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
m.Invoke();
}
}
-TEST(QuantizedAddOpModel, QuantizedTestsActivationRELU1) {
+TEST(QuantizedAddOpModel, QuantizedTestsActivationRELU_N1_TO_1) {
float kQuantizedTolerance = GetTolerance(-1.0, 1.0);
std::vector<std::initializer_list<float>> inputs1 = {{-0.8, 0.2, 0.9, 0.7},
{-0.8, 0.2, 0.7, 0.3}};
for (int i = 0; i < inputs1.size(); ++i) {
QuantizedAddOpModel m({TensorType_UINT8, {1, 2, 2, 1}, -1.0, 1.0},
{TensorType_UINT8, {}, -1.0, 1.0},
- ActivationFunctionType_RELU1);
+ ActivationFunctionType_RELU_N1_TO_1);
m.QuantizeAndPopulate<uint8_t>(m.input1(), inputs1[i]);
m.QuantizeAndPopulate<uint8_t>(m.input2(), inputs2[i]);
m.Invoke();
ElementsAreArray(ArrayFloatNear({-0.2, 0.04, 0.21, 0.4})));
}
-TEST(FloatMulOpTest, ActivationRELU1) {
+TEST(FloatMulOpTest, ActivationRELU_N1_TO_1) {
FloatMulOpModel m({TensorType_FLOAT32, {1, 2, 2, 1}},
- {TensorType_FLOAT32, {}}, ActivationFunctionType_RELU1);
+ {TensorType_FLOAT32, {}},
+ ActivationFunctionType_RELU_N1_TO_1);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 5});
m.Invoke();
namespace builtin {
TfLiteRegistration* Register_RELU();
-TfLiteRegistration* Register_RELU1();
+TfLiteRegistration* Register_RELU_N1_TO_1();
TfLiteRegistration* Register_RELU6();
TfLiteRegistration* Register_TANH();
TfLiteRegistration* Register_LOGISTIC();
BuiltinOpResolver::BuiltinOpResolver() {
AddBuiltin(BuiltinOperator_RELU, Register_RELU());
- AddBuiltin(BuiltinOperator_RELU1, Register_RELU1());
+ AddBuiltin(BuiltinOperator_RELU_N1_TO_1, Register_RELU_N1_TO_1());
AddBuiltin(BuiltinOperator_RELU6, Register_RELU6());
AddBuiltin(BuiltinOperator_TANH, Register_TANH());
AddBuiltin(BuiltinOperator_LOGISTIC, Register_LOGISTIC());
return kTfLiteActNone;
case ActivationFunctionType_RELU:
return kTfLiteActRelu;
- case ActivationFunctionType_RELU1:
+ case ActivationFunctionType_RELU_N1_TO_1:
return kTfLiteActRelu1;
case ActivationFunctionType_RELU6:
return kTfLiteActRelu6;
case BuiltinOperator_TANH:
case BuiltinOperator_LOGISTIC:
case BuiltinOperator_RELU:
- case BuiltinOperator_RELU1:
+ case BuiltinOperator_RELU_N1_TO_1:
case BuiltinOperator_RELU6:
case BuiltinOperator_CONCAT_EMBEDDINGS:
break;
case tflite::BuiltinOperator_RESIZE_BILINEAR:
case tflite::BuiltinOperator_CALL:
case tflite::BuiltinOperator_SKIP_GRAM:
- case tflite::BuiltinOperator_RELU1:
+ case tflite::BuiltinOperator_RELU_N1_TO_1:
case tflite::BuiltinOperator_GATHER:
case tflite::BuiltinOperator_SPACE_TO_BATCH_ND:
case tflite::BuiltinOperator_BATCH_TO_SPACE_ND:
MAX_POOL_2D = 17,
MUL = 18,
RELU = 19,
- RELU1 = 20,
+ RELU_N1_TO_1 = 20,
RELU6 = 21,
RESHAPE = 22,
RESIZE_BILINEAR = 23,
enum ActivationFunctionType : byte {
NONE = 0,
RELU = 1,
- RELU1 = 2,
+ RELU_N1_TO_1 = 2,
RELU6 = 3,
TANH = 4,
SIGN_BIT = 5,
BuiltinOperator_MAX_POOL_2D = 17,
BuiltinOperator_MUL = 18,
BuiltinOperator_RELU = 19,
- BuiltinOperator_RELU1 = 20,
+ BuiltinOperator_RELU_N1_TO_1 = 20,
BuiltinOperator_RELU6 = 21,
BuiltinOperator_RESHAPE = 22,
BuiltinOperator_RESIZE_BILINEAR = 23,
BuiltinOperator_MAX_POOL_2D,
BuiltinOperator_MUL,
BuiltinOperator_RELU,
- BuiltinOperator_RELU1,
+ BuiltinOperator_RELU_N1_TO_1,
BuiltinOperator_RELU6,
BuiltinOperator_RESHAPE,
BuiltinOperator_RESIZE_BILINEAR,
"MAX_POOL_2D",
"MUL",
"RELU",
- "RELU1",
+ "RELU_N1_TO_1",
"RELU6",
"RESHAPE",
"RESIZE_BILINEAR",
enum ActivationFunctionType {
ActivationFunctionType_NONE = 0,
ActivationFunctionType_RELU = 1,
- ActivationFunctionType_RELU1 = 2,
+ ActivationFunctionType_RELU_N1_TO_1 = 2,
ActivationFunctionType_RELU6 = 3,
ActivationFunctionType_TANH = 4,
ActivationFunctionType_SIGN_BIT = 5,
inline ActivationFunctionType (&EnumValuesActivationFunctionType())[6] {
static ActivationFunctionType values[] = {
- ActivationFunctionType_NONE, ActivationFunctionType_RELU,
- ActivationFunctionType_RELU1, ActivationFunctionType_RELU6,
- ActivationFunctionType_TANH, ActivationFunctionType_SIGN_BIT};
+ ActivationFunctionType_NONE, ActivationFunctionType_RELU,
+ ActivationFunctionType_RELU_N1_TO_1, ActivationFunctionType_RELU6,
+ ActivationFunctionType_TANH, ActivationFunctionType_SIGN_BIT};
return values;
}
inline const char **EnumNamesActivationFunctionType() {
- static const char *names[] = {"NONE", "RELU", "RELU1", "RELU6",
+ static const char *names[] = {"NONE", "RELU", "RELU_N1_TO_1", "RELU6",
"TANH", "SIGN_BIT", nullptr};
return names;
}
ops.emplace_back(
new SimpleOperator<ReluOperator>("RELU", OperatorType::kRelu));
ops.emplace_back(
- new SimpleOperator<Relu1Operator>("RELU1", OperatorType::kRelu1));
+ new SimpleOperator<Relu1Operator>("RELU_N1_TO_1", OperatorType::kRelu1));
ops.emplace_back(
new SimpleOperator<Relu6Operator>("RELU6", OperatorType::kRelu6));
ops.emplace_back(new SimpleOperator<ResizeBilinearOperator>(
OperatorType::kDequantize);
CheckSimpleOperator<FloorOperator>("FLOOR", OperatorType::kFloor);
CheckSimpleOperator<ReluOperator>("RELU", OperatorType::kRelu);
- CheckSimpleOperator<Relu1Operator>("RELU1", OperatorType::kRelu1);
+ CheckSimpleOperator<Relu1Operator>("RELU_N1_TO_1", OperatorType::kRelu1);
CheckSimpleOperator<Relu6Operator>("RELU6", OperatorType::kRelu6);
CheckSimpleOperator<ResizeBilinearOperator>("RESIZE_BILINEAR",
OperatorType::kResizeBilinear);
case FusedActivationFunctionType::kRelu6:
return ::tflite::ActivationFunctionType_RELU6;
case FusedActivationFunctionType::kRelu1:
- return ::tflite::ActivationFunctionType_RELU1;
+ return ::tflite::ActivationFunctionType_RELU_N1_TO_1;
default:
LOG(FATAL) << "Unhandled fused activation function type.";
}
return FusedActivationFunctionType::kRelu;
case ::tflite::ActivationFunctionType_RELU6:
return FusedActivationFunctionType::kRelu6;
- case ::tflite::ActivationFunctionType_RELU1:
+ case ::tflite::ActivationFunctionType_RELU_N1_TO_1:
return FusedActivationFunctionType::kRelu1;
default:
LOG(FATAL) << "Unhandled fused activation function type.";
{FusedActivationFunctionType::kRelu6,
::tflite::ActivationFunctionType_RELU6},
{FusedActivationFunctionType::kRelu1,
- ::tflite::ActivationFunctionType_RELU1}};
+ ::tflite::ActivationFunctionType_RELU_N1_TO_1}};
for (auto x : testdata) {
EXPECT_EQ(x.second, ActivationFunction::Serialize(x.first));
EXPECT_EQ(x.first, ActivationFunction::Deserialize(x.second));