From 8ea5452e7bcef13723e445a09b37eb3c1d37f5c3 Mon Sep 17 00:00:00 2001 From: Parichay Kapoor Date: Mon, 20 Jul 2020 14:09:38 +0900 Subject: [PATCH] [Loss] Update MSR to MSE Update the name of mean squared error from MSR to MSE Signed-off-by: Parichay Kapoor --- Applications/Classification/res/Classification.ini | 4 ++-- Applications/Classification/res/Classification_func.ini | 4 ++-- Applications/Classification/res/Classification_new.ini | 4 ++-- Applications/LogisticRegression/res/LogisticRegression.ini | 2 +- Applications/ReinforcementLearning/DeepQ/jni/DeepQ.ini | 4 ++-- Applications/Tizen_CAPI/Tizen_CAPI_config.ini | 2 +- Applications/Training/res/Training.ini | 4 ++-- Applications/mnist/res/mnist.ini | 2 +- README.md | 4 ++-- nntrainer/include/layer.h | 4 ++-- nntrainer/src/loss_layer.cpp | 4 ++-- nntrainer/src/parse_util.cpp | 4 ++-- test/tizen_capi/test_conf.ini | 4 ++-- test/unittest/unittest_nntrainer_layers.cpp | 10 +++++----- 14 files changed, 28 insertions(+), 28 deletions(-) diff --git a/Applications/Classification/res/Classification.ini b/Applications/Classification/res/Classification.ini index 6eaf73c..bf5558e 100644 --- a/Applications/Classification/res/Classification.ini +++ b/Applications/Classification/res/Classification.ini @@ -7,8 +7,8 @@ Decay_steps = 1000 # decay step for the exponential decayed learning rate Epoch = 30000 # Epoch Optimizer = adam # Optimizer : sgd (stochastic gradien decent), # adam (Adamtive Moment Estimation) -Cost = cross # Cost(loss) function : msr (mean square root error) - # cross ( for cross entropy ) +Cost = cross # Cost(loss) function : mse (mean squared error) + # cross (cross entropy) Model = "model.bin" # model path to save / read minibatch = 32 # mini batch size beta1 = 0.9 # beta 1 for adam diff --git a/Applications/Classification/res/Classification_func.ini b/Applications/Classification/res/Classification_func.ini index a91c6c1..db9fb42 100644 --- a/Applications/Classification/res/Classification_func.ini +++ b/Applications/Classification/res/Classification_func.ini @@ -7,8 +7,8 @@ Decay_steps = 1000 # decay step for the exponential decayed learning rate Epoch = 30000 # Epoch Optimizer = adam # Optimizer : sgd (stochastic gradien decent), # adam (Adamtive Moment Estimation) -Cost = cross # Cost(loss) function : msr (mean square root error) - # cross ( for cross entropy ) +Cost = cross # Cost(loss) function : mse (mean squared error) + # cross (cross entropy) Model = "model.bin" # model path to save / read minibatch = 32 # mini batch size beta1 = 0.9 # beta 1 for adam diff --git a/Applications/Classification/res/Classification_new.ini b/Applications/Classification/res/Classification_new.ini index ff074e5..0b53e87 100644 --- a/Applications/Classification/res/Classification_new.ini +++ b/Applications/Classification/res/Classification_new.ini @@ -8,8 +8,8 @@ Epoch = 30000 # Epoch Optimizer = sgd # Optimizer : sgd (stochastic gradien decent), # adam (Adamtive Moment Estimation) Activation = sigmoid # activation : sigmoid, tanh -Cost = cross # Cost(loss) function : msr (mean square root error) - # categorical ( for logistic regression ) +Cost = cross # Cost(loss) function : mse (mean squared error) + # cross (cross entropy) Model = "model.bin" # model path to save / read minibatch = 32 # mini batch size beta1 = 0.9 # beta 1 for adam diff --git a/Applications/LogisticRegression/res/LogisticRegression.ini b/Applications/LogisticRegression/res/LogisticRegression.ini index c0b8103..2f065bb 100644 --- a/Applications/LogisticRegression/res/LogisticRegression.ini +++ b/Applications/LogisticRegression/res/LogisticRegression.ini @@ -5,7 +5,7 @@ Learning_rate = 0.001 # Learning Rate Epoch = 100 # Epoch Optimizer = sgd # Optimizer : sgd (stochastic gradien decent), # adam (Adamtive Moment Estimation) -Cost = cross # Cost(loss) function : msr (mean square root error) +Cost = cross # Cost(loss) function : mse (mean squared error) # cross ( cross entropy ) Model = "model.bin" # model path to save / read minibatch = 1 # mini batch size diff --git a/Applications/ReinforcementLearning/DeepQ/jni/DeepQ.ini b/Applications/ReinforcementLearning/DeepQ/jni/DeepQ.ini index ec9d2c0..482f5ed 100644 --- a/Applications/ReinforcementLearning/DeepQ/jni/DeepQ.ini +++ b/Applications/ReinforcementLearning/DeepQ/jni/DeepQ.ini @@ -6,8 +6,8 @@ Epoch = 10000 # Epoch Optimizer = adam # Optimizer : sgd (stochastic gradien decent), # adam (Adamtive Moment Estimation) -Cost = msr # Cost(loss) function : msr (mean square root error) - # categorical ( for logistic regression ) +Cost = mse # Cost(loss) function : mse (mean squared error) + # cross (cross entropy) Model = "model.bin" # model path to save / read minibatch = 32 # mini batch size beta1 = 0.9 # beta 1 for adam diff --git a/Applications/Tizen_CAPI/Tizen_CAPI_config.ini b/Applications/Tizen_CAPI/Tizen_CAPI_config.ini index ba96f76..9318a70 100644 --- a/Applications/Tizen_CAPI/Tizen_CAPI_config.ini +++ b/Applications/Tizen_CAPI/Tizen_CAPI_config.ini @@ -7,7 +7,7 @@ Decay_steps = 1000 # decay step for the exponential decayed learning rate Epoch = 10 # Epoch Optimizer = adam # Optimizer : sgd (stochastic gradien decent), # adam (Adamtive Moment Estimation) -Cost = cross # Cost(loss) function : msr (mean square root error) +Cost = cross # Cost(loss) function : mse (mean squared error) # cross ( for cross entropy ) Model = "model.bin" # model path to save / read minibatch = 32 # mini batch size diff --git a/Applications/Training/res/Training.ini b/Applications/Training/res/Training.ini index 886c135..8b02b78 100644 --- a/Applications/Training/res/Training.ini +++ b/Applications/Training/res/Training.ini @@ -5,8 +5,8 @@ Learning_rate = 0.01 # Learning Rate Epoch = 100 # Epoch Optimizer = sgd # Optimizer : sgd (stochastic gradien decent), # adam (Adamtive Moment Estimation) -Cost = cross # Cost(loss) function : msr (mean square root error) - # cross ( for Cross Entropy ) +Cost = cross # Cost(loss) function : mse (mean squared error) + # cross (cross entropy) Model = "model.bin" # model path to save / read minibatch = 1 # mini batch size # beta1 = 0.9 # beta 1 for adam diff --git a/Applications/mnist/res/mnist.ini b/Applications/mnist/res/mnist.ini index 71a98c7..c161ed9 100644 --- a/Applications/mnist/res/mnist.ini +++ b/Applications/mnist/res/mnist.ini @@ -5,7 +5,7 @@ Learning_rate = 1e-4 # Learning Rate Epoch = 1500 # Epoch Optimizer = adam # Optimizer : sgd (stochastic gradien decent), # adam (Adamtive Moment Estimation) -Cost = cross # Cost(loss) function : msr (mean square root error) +Cost = cross # Cost(loss) function : mse (mean squared error) # cross ( for cross entropy ) Model = "model.bin" # model path to save / read minibatch = 32 # mini batch size diff --git a/README.md b/README.md index 48127ce..7032d1d 100644 --- a/README.md +++ b/README.md @@ -44,7 +44,7 @@ The most important role of this component is to activate forward / backward prop - **_Optimizer:_** Optimizer for the Network Model - sgd, adam - **_Activation:_** Activation Function - sigmoid , tanh - **_Cost:_** Cost Function - - msr(mean square root error), categorical (for logistic regression), cross (cross entropy) + mse(mean squared error), cross (cross entropy) - **_Model:_** Name of Model. Weight Data is saved in the name of this. - **_minibach:_** mini batch size - **_beta1,beta2,epsilon:_** hyper parameters for the adam optimizer @@ -86,7 +86,7 @@ The following dependencies are needed to compile / build / run. You can use [docker image](https://hub.docker.com/r/lunapocket/nntrainer-build-env) to easily set up and try building. -To run the docker +To run the docker ```bash $ docker pull lunapocket/nntrainer-build-env:ubuntu-18.04 diff --git a/nntrainer/include/layer.h b/nntrainer/include/layer.h index 18b84f3..50186b7 100644 --- a/nntrainer/include/layer.h +++ b/nntrainer/include/layer.h @@ -36,7 +36,7 @@ namespace nntrainer { /** * @brief Enumeration of cost(loss) function type - * 0. MSR ( Mean Squared Roots ) + * 0. MSE ( Mean Squared Error ) * 1. ENTROPY ( Cross Entropy ) * 2. ENTROPY_SIGMOID (Cross Entropy amalgamated with sigmoid for * stability) @@ -45,7 +45,7 @@ namespace nntrainer { * 4. Unknown */ typedef enum { - COST_MSR, + COST_MSE, COST_ENTROPY, COST_ENTROPY_SIGMOID, COST_ENTROPY_SOFTMAX, diff --git a/nntrainer/src/loss_layer.cpp b/nntrainer/src/loss_layer.cpp index ec752c5..89e1e0a 100644 --- a/nntrainer/src/loss_layer.cpp +++ b/nntrainer/src/loss_layer.cpp @@ -51,7 +51,7 @@ Tensor LossLayer::forwarding(Tensor output, Tensor label, int &status) { Tensor l; switch (cost) { - case COST_MSR: { + case COST_MSE: { // y2 <- y2 - y; y2.subtract_i(y); @@ -123,7 +123,7 @@ Tensor LossLayer::backwarding(Tensor derivative, int iteration) { Tensor y = input; switch (cost) { - case COST_MSR: + case COST_MSE: ret_derivative = y.subtract(y2).multiply(2).divide(y.getDim().getDataLen()); break; case COST_ENTROPY_SIGMOID: diff --git a/nntrainer/src/parse_util.cpp b/nntrainer/src/parse_util.cpp index b4c1a53..cf0c486 100644 --- a/nntrainer/src/parse_util.cpp +++ b/nntrainer/src/parse_util.cpp @@ -73,10 +73,10 @@ unsigned int parseType(std::string ll, InputType t) { /** * @brief Cost Function String from configure file - * "msr" : Mean Squared Roots + * "mse" : Mean Squared Error * "caterogical" : Categorical Cross Entropy */ - std::array cost_string = {"msr", "cross"}; + std::array cost_string = {"mse", "cross"}; /** * @brief Network Type String from configure file diff --git a/test/tizen_capi/test_conf.ini b/test/tizen_capi/test_conf.ini index 29cd287..f542b27 100644 --- a/test/tizen_capi/test_conf.ini +++ b/test/tizen_capi/test_conf.ini @@ -5,8 +5,8 @@ Learning_rate = 0.01 # Learning Rate Epoch = 100 # Epoch Optimizer = sgd # Optimizer : sgd (stochastic gradien decent), # adam (Adamtive Moment Estimation) -Cost = cross # Cost(loss) function : msr (mean square root error) - # cross ( for Cross Entropy ) +Cost = cross # Cost(loss) function : mse (mean squared error) + # cross (Cross Entropy) Model = "model.bin" # model path to save / read Minibatch = 1 # mini batch size # beta1 = 0.9 # beta 1 for adam diff --git a/test/unittest/unittest_nntrainer_layers.cpp b/test/unittest/unittest_nntrainer_layers.cpp index 468a20c..270ba31 100644 --- a/test/unittest/unittest_nntrainer_layers.cpp +++ b/test/unittest/unittest_nntrainer_layers.cpp @@ -479,7 +479,7 @@ TEST_F(nntrainer_FullyConnectedLayer_TFmatch, forwarding_backwarding_01_p) { TEST_F(nntrainer_FullyConnectedLayer_TFmatch, forwarding_backwarding_02_p) { addActivation(nntrainer::ACT_SIGMOID); - addLoss(nntrainer::COST_MSR); + addLoss(nntrainer::COST_MSE); setOptimizer(nntrainer::OptType::sgd, "learning_rate=1.0"); /** Verify forwarding value */ @@ -500,7 +500,7 @@ TEST_F(nntrainer_FullyConnectedLayer_TFmatch, forwarding_backwarding_02_p) { TEST_F(nntrainer_FullyConnectedLayer_TFmatch, forwarding_backwarding_03_p) { addActivation(nntrainer::ACT_SOFTMAX); - addLoss(nntrainer::COST_MSR); + addLoss(nntrainer::COST_MSE); setOptimizer(nntrainer::OptType::sgd, "learning_rate=1.0"); /** Verify forwarding value */ @@ -520,7 +520,7 @@ TEST_F(nntrainer_FullyConnectedLayer_TFmatch, forwarding_backwarding_03_p) { */ TEST_F(nntrainer_FullyConnectedLayer_TFmatch, forwarding_backwarding_04_p) { - addLoss(nntrainer::COST_MSR); + addLoss(nntrainer::COST_MSE); setOptimizer(nntrainer::OptType::sgd, "learning_rate=1.0"); /** Verify forwarding value */ @@ -541,7 +541,7 @@ TEST_F(nntrainer_FullyConnectedLayer_TFmatch, forwarding_backwarding_04_p) { TEST_F(nntrainer_FullyConnectedLayer_TFmatch, forwarding_backwarding_05_p) { addActivation(nntrainer::ACT_SIGMOID); - addLoss(nntrainer::COST_MSR); + addLoss(nntrainer::COST_MSE); setOptimizer(nntrainer::OptType::sgd, "learning_rate=1.0"); /** Verify forwarding value */ @@ -562,7 +562,7 @@ TEST_F(nntrainer_FullyConnectedLayer_TFmatch, forwarding_backwarding_05_p) { TEST_F(nntrainer_FullyConnectedLayer_TFmatch, forwarding_backwarding_06_p) { addActivation(nntrainer::ACT_SOFTMAX); - addLoss(nntrainer::COST_MSR); + addLoss(nntrainer::COST_MSE); setOptimizer(nntrainer::OptType::sgd, "learning_rate=1.0"); /** Verify forwarding value */ -- 2.7.4