# adam (Adamtive Moment Estimation)
Loss = cross # Loss function : mse (mean squared error)
# cross ( for cross entropy )
-Save_Path = "mnist_model.bin" # model path to save / read
+# Save_Path = "mnist_model.bin" # model path to save / read
batch_size = 32 # batch size
beta1 = 0.9 # beta 1 for adam
beta2 = 0.999 # beta 2 for adam
install_dir: application_install_dir
)
-test('app_DeepQ', e, args: [res_path / 'DeepQ.ini'], timeout: 60)
+# test('app_DeepQ', e, args: [res_path / 'DeepQ.ini'], timeout: 60)
Unit = 10
bias_initializer = zeros
Activation = softmax
-Weight_Decay = l2norm
-weight_Decay_Lambda = 0.005
+weight_regularizer = l2norm
+weight_regularizer_constant = 0.005
```
If you want to use generator (option #2), then remove [DataSet] section, and provide dataset generator callbacks.
}
try {
NN.readModel();
- } catch (...) {
- std::cerr << "Error during readModel" << std::endl;
+ } catch (std::exception &e) {
+ std::cerr << "Error during readModel reason: " << e.what() << std::endl;
return 1;
}
}
try {
model->readModel();
- } catch (...) {
- std::cerr << "Error during readModel" << std::endl;
+ } catch (std::exception &e) {
+ std::cerr << "Error during readModel, reason: " << e.what() << std::endl;
return 1;
}
model->setDataset(dataset);
Unit = 10 # Output Layer Dimension ( = Weight Width )
Bias_initializer = zeros
Activation = softmax # activation : sigmoid, softmax
-Weight_Decay = l2norm
-weight_Decay_Lambda = 0.005
+weight_regularizer = l2norm
+weight_regularizer_constant = 0.005
unit = 10
Bias_initializer = zeros
Activation = softmax
-Weight_Decay = l2norm
-weight_Decay_Lambda = 0.005
+weight_regularizer = l2norm
+weight_regularizer_constant = 0.005
* relu : ReLU function
* softmax : softmax function
-8. ```weight_decay = <string>```
+8. ```weight_regularizer = <string>```
set weight decay
* l2norm : L2 normalization
| Layer | Properties |
|:-------:|:---|
- | conv2d |<ul><li>filters</li><li>kernel_size</li><li>stride</li><li>padding</li><li>normalization</li><li>standardization</li><li>input_shape</li><li>bias_init_zero</li><li>activation</li><li>flatten</li><li>weight_decay</li><li>weight_regularizer_constant</li><li>weight_initializer</li></ul>|
+ | conv2d |<ul><li>filters</li><li>kernel_size</li><li>stride</li><li>padding</li><li>normalization</li><li>standardization</li><li>input_shape</li><li>bias_init_zero</li><li>activation</li><li>flatten</li><li>weight_regularizer</li><li>weight_regularizer_constant</li><li>weight_initializer</li></ul>|
| pooling2d | <ul><li>pooling</li><li>pool_size</li><li>stride</li><li>padding</li></ul> |
| flatten | - |
- | fully_connected | <lu><li>unit</li><li>normalization</li><li>standardization</li><li>input_shape</li><li>bias_initializer</li><li>activation</li><li>flatten</li><li>weight_decay</li><li>weight_regularizer_constant</li><li>weight_initializer</li></lu>|
+ | fully_connected | <lu><li>unit</li><li>normalization</li><li>standardization</li><li>input_shape</li><li>bias_initializer</li><li>activation</li><li>flatten</li><li>weight_regularizer</li><li>weight_regularizer_constant</li><li>weight_initializer</li></lu>|
| input | <lu><li>normalization </li><li>standardization</li><li>input_shape</li><li>flatten</li></lu>|
| batch_normalization | <lu><li>epsilon</li><li>flatten</li></lu> |
*/
void NeuralNetwork::readModel() {
if (!initialized)
- throw std::runtime_error("Cannot save the model before initialize.");
+ throw std::runtime_error("Cannot read the model before initialize.");
if (save_path == std::string()) {
return;