Epoch = 30000 # Epoch
Optimizer = adam # Optimizer : sgd (stochastic gradien decent),
# adam (Adamtive Moment Estimation)
-Cost = cross # Cost(loss) function : msr (mean square root error)
- # cross ( for cross entropy )
+Cost = cross # Cost(loss) function : mse (mean squared error)
+ # cross (cross entropy)
Model = "model.bin" # model path to save / read
minibatch = 32 # mini batch size
beta1 = 0.9 # beta 1 for adam
Epoch = 30000 # Epoch
Optimizer = adam # Optimizer : sgd (stochastic gradien decent),
# adam (Adamtive Moment Estimation)
-Cost = cross # Cost(loss) function : msr (mean square root error)
- # cross ( for cross entropy )
+Cost = cross # Cost(loss) function : mse (mean squared error)
+ # cross (cross entropy)
Model = "model.bin" # model path to save / read
minibatch = 32 # mini batch size
beta1 = 0.9 # beta 1 for adam
Optimizer = sgd # Optimizer : sgd (stochastic gradien decent),
# adam (Adamtive Moment Estimation)
Activation = sigmoid # activation : sigmoid, tanh
-Cost = cross # Cost(loss) function : msr (mean square root error)
- # categorical ( for logistic regression )
+Cost = cross # Cost(loss) function : mse (mean squared error)
+ # cross (cross entropy)
Model = "model.bin" # model path to save / read
minibatch = 32 # mini batch size
beta1 = 0.9 # beta 1 for adam
Epoch = 100 # Epoch
Optimizer = sgd # Optimizer : sgd (stochastic gradien decent),
# adam (Adamtive Moment Estimation)
-Cost = cross # Cost(loss) function : msr (mean square root error)
+Cost = cross # Cost(loss) function : mse (mean squared error)
# cross ( cross entropy )
Model = "model.bin" # model path to save / read
minibatch = 1 # mini batch size
Optimizer = adam # Optimizer : sgd (stochastic gradien decent),
# adam (Adamtive Moment Estimation)
-Cost = msr # Cost(loss) function : msr (mean square root error)
- # categorical ( for logistic regression )
+Cost = mse # Cost(loss) function : mse (mean squared error)
+ # cross (cross entropy)
Model = "model.bin" # model path to save / read
minibatch = 32 # mini batch size
beta1 = 0.9 # beta 1 for adam
Epoch = 10 # Epoch
Optimizer = adam # Optimizer : sgd (stochastic gradien decent),
# adam (Adamtive Moment Estimation)
-Cost = cross # Cost(loss) function : msr (mean square root error)
+Cost = cross # Cost(loss) function : mse (mean squared error)
# cross ( for cross entropy )
Model = "model.bin" # model path to save / read
minibatch = 32 # mini batch size
Epoch = 100 # Epoch
Optimizer = sgd # Optimizer : sgd (stochastic gradien decent),
# adam (Adamtive Moment Estimation)
-Cost = cross # Cost(loss) function : msr (mean square root error)
- # cross ( for Cross Entropy )
+Cost = cross # Cost(loss) function : mse (mean squared error)
+ # cross (cross entropy)
Model = "model.bin" # model path to save / read
minibatch = 1 # mini batch size
# beta1 = 0.9 # beta 1 for adam
Epoch = 1500 # Epoch
Optimizer = adam # Optimizer : sgd (stochastic gradien decent),
# adam (Adamtive Moment Estimation)
-Cost = cross # Cost(loss) function : msr (mean square root error)
+Cost = cross # Cost(loss) function : mse (mean squared error)
# cross ( for cross entropy )
Model = "model.bin" # model path to save / read
minibatch = 32 # mini batch size
- **_Optimizer:_** Optimizer for the Network Model - sgd, adam
- **_Activation:_** Activation Function - sigmoid , tanh
- **_Cost:_** Cost Function -
- msr(mean square root error), categorical (for logistic regression), cross (cross entropy)
+ mse(mean squared error), cross (cross entropy)
- **_Model:_** Name of Model. Weight Data is saved in the name of this.
- **_minibach:_** mini batch size
- **_beta1,beta2,epsilon:_** hyper parameters for the adam optimizer
You can use [docker image](https://hub.docker.com/r/lunapocket/nntrainer-build-env) to easily set up and try building.
-To run the docker
+To run the docker
```bash
$ docker pull lunapocket/nntrainer-build-env:ubuntu-18.04
/**
* @brief Enumeration of cost(loss) function type
- * 0. MSR ( Mean Squared Roots )
+ * 0. MSE ( Mean Squared Error )
* 1. ENTROPY ( Cross Entropy )
* 2. ENTROPY_SIGMOID (Cross Entropy amalgamated with sigmoid for
* stability)
* 4. Unknown
*/
typedef enum {
- COST_MSR,
+ COST_MSE,
COST_ENTROPY,
COST_ENTROPY_SIGMOID,
COST_ENTROPY_SOFTMAX,
Tensor l;
switch (cost) {
- case COST_MSR: {
+ case COST_MSE: {
// y2 <- y2 - y;
y2.subtract_i(y);
Tensor y = input;
switch (cost) {
- case COST_MSR:
+ case COST_MSE:
ret_derivative = y.subtract(y2).multiply(2).divide(y.getDim().getDataLen());
break;
case COST_ENTROPY_SIGMOID:
/**
* @brief Cost Function String from configure file
- * "msr" : Mean Squared Roots
+ * "mse" : Mean Squared Error
* "caterogical" : Categorical Cross Entropy
*/
- std::array<std::string, 2> cost_string = {"msr", "cross"};
+ std::array<std::string, 2> cost_string = {"mse", "cross"};
/**
* @brief Network Type String from configure file
Epoch = 100 # Epoch
Optimizer = sgd # Optimizer : sgd (stochastic gradien decent),
# adam (Adamtive Moment Estimation)
-Cost = cross # Cost(loss) function : msr (mean square root error)
- # cross ( for Cross Entropy )
+Cost = cross # Cost(loss) function : mse (mean squared error)
+ # cross (Cross Entropy)
Model = "model.bin" # model path to save / read
Minibatch = 1 # mini batch size
# beta1 = 0.9 # beta 1 for adam
TEST_F(nntrainer_FullyConnectedLayer_TFmatch, forwarding_backwarding_02_p) {
addActivation(nntrainer::ACT_SIGMOID);
- addLoss(nntrainer::COST_MSR);
+ addLoss(nntrainer::COST_MSE);
setOptimizer(nntrainer::OptType::sgd, "learning_rate=1.0");
/** Verify forwarding value */
TEST_F(nntrainer_FullyConnectedLayer_TFmatch, forwarding_backwarding_03_p) {
addActivation(nntrainer::ACT_SOFTMAX);
- addLoss(nntrainer::COST_MSR);
+ addLoss(nntrainer::COST_MSE);
setOptimizer(nntrainer::OptType::sgd, "learning_rate=1.0");
/** Verify forwarding value */
*/
TEST_F(nntrainer_FullyConnectedLayer_TFmatch, forwarding_backwarding_04_p) {
- addLoss(nntrainer::COST_MSR);
+ addLoss(nntrainer::COST_MSE);
setOptimizer(nntrainer::OptType::sgd, "learning_rate=1.0");
/** Verify forwarding value */
TEST_F(nntrainer_FullyConnectedLayer_TFmatch, forwarding_backwarding_05_p) {
addActivation(nntrainer::ACT_SIGMOID);
- addLoss(nntrainer::COST_MSR);
+ addLoss(nntrainer::COST_MSE);
setOptimizer(nntrainer::OptType::sgd, "learning_rate=1.0");
/** Verify forwarding value */
TEST_F(nntrainer_FullyConnectedLayer_TFmatch, forwarding_backwarding_06_p) {
addActivation(nntrainer::ACT_SOFTMAX);
- addLoss(nntrainer::COST_MSR);
+ addLoss(nntrainer::COST_MSE);
setOptimizer(nntrainer::OptType::sgd, "learning_rate=1.0");
/** Verify forwarding value */