2 * Copyright (C) 2019 Samsung Electronics Co., Ltd. All Rights Reserved.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 * http://www.apache.org/licenses/LICENSE-2.0
8 * Unless required by applicable law or agreed to in writing, software
9 * distributed under the License is distributed on an "AS IS" BASIS,
10 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 * See the License for the specific language governing permissions and
12 * limitations under the License.
16 * @date 04 December 2019
17 * @brief This is Neural Network Class
18 * @see https://github.com/nnstreamer/nntrainer
19 * @author Jijoong Moon <jijoong.moon@samsung.com>
20 * @bug No known bugs except for NYI items
23 #ifndef __NEURALNET_H__
24 #define __NEURALNET_H__
33 * @Namespace Namespace of Network
34 * @brief Namespace for Network
39 * @brief Enumeration of Network Type
40 * 0. KNN ( k Nearest Neighbor )
41 * 1. REG ( Logistic Regression )
42 * 2. NEU ( Neural Network )
45 typedef enum { NET_KNN, NET_REG, NET_NEU, NET_UNKNOWN } net_type;
48 * @brief Enumeration for input configuration file parsing
49 * 0. OPT ( Optimizer Token )
50 * 1. COST ( Cost Function Token )
51 * 2. NET ( Network Token )
52 * 3. ACTI ( Activation Token )
53 * 4. LAYER ( Layer Token )
54 * 5. WEIGHTINI ( Weight Initialization Token )
55 * 7. WEIGHT_DECAY ( Weight Decay Token )
70 * @class NeuralNetwork Class
71 * @brief NeuralNetwork Class which has Network Configuration & Layers
76 * @brief Constructor of NeuralNetwork Class
81 * @brief Constructor of NeuralNetwork Class with Configuration file path
83 NeuralNetwork(std::string config_path);
86 * @brief Destructor of NeuralNetwork Class
97 * @brief Get Optimizer
100 Layers::Optimizer getOptimizer() { return opt; };
103 * @brief Get Learing rate
104 * @retval Learning rate
106 float getLearningRate() { return learning_rate; };
110 * @param[in] l loss value
112 void setLoss(float l);
115 * @brief Initialize Network
120 * @brief forward propagation
121 * @param[in] input Input Tensor X
122 * @retval Output Tensor Y
124 Tensors::Tensor forwarding(Tensors::Tensor input);
127 * @brief forward propagation
128 * @param[in] input Input Tensor X
129 * @param[in] label Input Tensor Y2
130 * @retval Output Tensor Y
132 Tensors::Tensor forwarding(Tensors::Tensor input, Tensors::Tensor output);
135 * @brief back propagation to update W & B
136 * @param[in] input Input Tensor X
137 * @param[in] expectedOutput Lable Tensor Y
138 * @param[in] iteration Epoch Number for ADAM
140 void backwarding(Tensors::Tensor input, Tensors::Tensor expectedOutput, int iteration);
143 * @brief save W & B into file
148 * @brief read W & B from file
153 * @brief set configuration file
154 * @param[in] config_path configuration file path
156 void setConfig(std::string config_path);
162 unsigned int getEpoch() { return epoch; };
165 * @brief Copy Neural Network
166 * @param[in] from NeuralNetwork Object to copy
167 * @retval NeuralNewtork Object copyed
169 NeuralNetwork ©(NeuralNetwork &from);
172 * @brief finalize NeuralNetwork Object
183 * @brief function pointer for activation
185 float (*activation)(float);
188 * @brief function pointer for derivative of activation
190 float (*activationPrime)(float);
193 * @brief learning rate
198 * @brief decay_rate for decayed learning rate
203 * @brief decay_step for decayed learning rate
208 * @brief Maximum Epoch
218 * @brief boolean to set the Bias zero
223 * @brief Cost Function type
225 Layers::cost_type cost;
228 * @brief Weight Initialization type
230 Layers::weightIni_type weightini;
233 * @brief Model path to save or read
238 * @brief Configuration file path
245 Layers::Optimizer opt;
248 * @brief Network Type
253 * @brief vector for store layer pointers.
255 std::vector<Layers::Layer *> layers;