#
# @file vgg_keras.py
# @date 08 Oct 2020
-# @brief This is VGG Example using Keras
+# @brief This is VGG16 Example using Keras
# @see https://github.com/nnstreamer/nntrainer
# @author Jijoong Moon <jijoong.moon@samsung.com>
# @bug No known bugs except for NYI items
batch_size =128
Learning = True
Test = False
-num_epoch = 1500
+num_epoch = 1
DEBUG = True
-USE_FIT = False
+USE_FIT = True
def save(filename, *data):
with open(filename, 'ab+') as outfile:
def create_model():
model = models.Sequential()
model.add(tf.keras.Input(shape=(32, 32, 3)))
- model.add(Conv2D(16, (3,3), padding='same', activation='relu', bias_initializer=initializers.Zeros()))
- model.add(Conv2D(16, (3,3), padding='same', activation='relu', bias_initializer=initializers.Zeros()))
- model.add(MaxPooling2D(pool_size=(2,2)))
- model.add(Conv2D(32, (3,3), padding='same', activation='relu', bias_initializer=initializers.Zeros()))
- model.add(Conv2D(32, (3,3), padding='same', activation='relu', bias_initializer=initializers.Zeros()))
- model.add(MaxPooling2D(pool_size=(2,2)))
- model.add(Conv2D(64, (3,3), padding='same', activation='relu', bias_initializer=initializers.Zeros()))
model.add(Conv2D(64, (3,3), padding='same', activation='relu', bias_initializer=initializers.Zeros()))
model.add(Conv2D(64, (3,3), padding='same', activation='relu', bias_initializer=initializers.Zeros()))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(128, (3,3), padding='same', activation='relu', bias_initializer=initializers.Zeros()))
model.add(Conv2D(128, (3,3), padding='same', activation='relu', bias_initializer=initializers.Zeros()))
- model.add(Conv2D(128, (3,3), padding='same', activation='relu', bias_initializer=initializers.Zeros()))
model.add(MaxPooling2D(pool_size=(2,2)))
- model.add(Conv2D(128, (3,3), padding='same', activation='relu', bias_initializer=initializers.Zeros()))
- model.add(Conv2D(128, (3,3), padding='same', activation='relu', bias_initializer=initializers.Zeros()))
- model.add(Conv2D(128, (3,3), padding='same', bias_initializer=initializers.Zeros()))
- model.add(BatchNormalization())
- model.add(Activation('relu'))
+ model.add(Conv2D(256, (3,3), padding='same', activation='relu', bias_initializer=initializers.Zeros()))
+ model.add(Conv2D(256, (3,3), padding='same', activation='relu', bias_initializer=initializers.Zeros()))
+ model.add(Conv2D(256, (3,3), padding='same', activation='relu', bias_initializer=initializers.Zeros()))
+ model.add(MaxPooling2D(pool_size=(2,2)))
+ model.add(Conv2D(512, (3,3), padding='same', activation='relu', bias_initializer=initializers.Zeros()))
+ model.add(Conv2D(512, (3,3), padding='same', activation='relu', bias_initializer=initializers.Zeros()))
+ model.add(Conv2D(512, (3,3), padding='same', activation='relu', bias_initializer=initializers.Zeros()))
+ model.add(MaxPooling2D(pool_size=(2,2)))
+ model.add(Conv2D(512, (3,3), padding='same', activation='relu', bias_initializer=initializers.Zeros()))
+ model.add(Conv2D(512, (3,3), padding='same', activation='relu', bias_initializer=initializers.Zeros()))
+ model.add(Conv2D(512, (3,3), padding='same', activation='relu', bias_initializer=initializers.Zeros()))
model.add(MaxPooling2D(pool_size=(2,2)))
+
model.add(Flatten())
- model.add(layers.Dense(128, bias_initializer=initializers.Zeros()))
- model.add(BatchNormalization())
- model.add(Activation('relu'))
- model.add(layers.Dense(128, bias_initializer=initializers.Zeros()))
+ model.add(layers.Dense(256, bias_initializer=initializers.Zeros()))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(layers.Dense(100, bias_initializer=initializers.Zeros()))
[Model]
Type = NeuralNetwork # Network Type : Regression, KNN, NeuralNetwork
Learning_rate = 1e-4 # Learning Rate
-Epochs = 1500 # Epochs
+Epochs = 1 # Epochs
Optimizer = adam # Optimizer : sgd (stochastic gradien decent),
# adam (Adamtive Moment Estimation)
Loss = cross # Loss function : mse (mean squared error)
input_layers = conv2d_c12_layer
kernel_size = 3,3
bias_initializer=zeros
+Activation=relu
weight_initializer = xavier_uniform
filters = 512
stride = 1,1
padding = 1,1
-[bn_normalization_b1_layer]
-Type = batch_normalization
-input_layers = conv2d_c13_layer
-epsilon = 1.0e-6
-momentum = 0.9
-Activation=relu
-beta_initializer = zeros
-gamma_initializer = ones
-moving_mean_initializer = zeros
-moving_variance_initializer = ones
-
[pooling2d_p5_layer]
Type=pooling2d
-input_layers = bn_normalization_b1_layer
+input_layers = conv2d_c13_layer
pool_size = 2,2
stride =2,2
padding = 0,0
moving_mean_initializer = zeros
moving_variance_initializer = ones
-[fc_f2_layer]
-Type = fully_connected
-input_layers = bn_normalization_b2_layer
-Unit = 256
-weight_initializer = xavier_uniform
-bias_initializer = zeros
-
-[bn_normalization_b3_layer]
-Type = batch_normalization
-input_layers = fc_f2_layer
-epsilon = 1.0e-6
-momentum = 0.9
-Activation = relu
-beta_initializer = zeros
-gamma_initializer = ones
-moving_mean_initializer = zeros
-moving_variance_initializer = ones
-
[fc_f3_layer]
Type = fully_connected
-input_layers = bn_normalization_b3_layer
+input_layers = bn_normalization_b2_layer
Unit = 100 # Output Layer Dimension ( = Weight Width )
weight_initializer = xavier_uniform
bias_initializer = zeros