[optimizer] Update to camelcase
authorParichay Kapoor <pk.kapoor@samsung.com>
Fri, 29 Jan 2021 11:42:12 +0000 (20:42 +0900)
committerJijoong Moon <jijoong.moon@samsung.com>
Wed, 3 Mar 2021 00:46:41 +0000 (09:46 +0900)
Update apply_gradient(s) to applyGradient(s)

Signed-off-by: Parichay Kapoor <pk.kapoor@samsung.com>
Applications/SimpleShot/layers/centroid_knn.cpp
nntrainer/layers/layer_internal.h
nntrainer/models/neuralnet.cpp
nntrainer/optimizers/adam.cpp
nntrainer/optimizers/adam.h
nntrainer/optimizers/optimizer.cpp
nntrainer/optimizers/optimizer_internal.h
nntrainer/optimizers/sgd.cpp
nntrainer/optimizers/sgd.h
nntrainer/tensor/weight.h

index 856a976..aaceb86 100644 (file)
@@ -4,12 +4,12 @@
  *
  * @file   centroid_knn.cpp
  * @date   09 Jan 2021
- * @details  This file contains the simple nearest neighbor layer, this layer
- * takes centroid and calculate l2 distance
+ * @brief  This file contains the simple nearest neighbor layer
  * @see    https://github.com/nnstreamer/nntrainer
  * @author Jihoon Lee <jhoon.it.lee@samsung.com>
  * @bug    No known bugs except for NYI items
  *
+ * @details This layer takes centroid and calculate l2 distance
  */
 
 #include <iostream>
@@ -88,16 +88,18 @@ int CentroidKNN::initialize(nntrainer::Manager &manager) {
   if (weights.empty()) {
     weights.reserve(2);
     weights.emplace_back(map_dim, nntrainer::WeightInitializer::WEIGHT_ZEROS,
-                         false, "centroidNN:map");
+                         nntrainer::WeightRegularizer::NONE, 1.0f, false,
+                         "centroidNN:map");
     weights.emplace_back(samples_seen,
-                         nntrainer::WeightInitializer::WEIGHT_ZEROS, false,
+                         nntrainer::WeightInitializer::WEIGHT_ZEROS,
+                         nntrainer::WeightRegularizer::NONE, 1.0f, false,
                          "centroidNN:num_samples");
     manager.trackWeights(weights);
   } else {
     weights[0].reset(map_dim, nntrainer::WeightInitializer::WEIGHT_ZEROS,
-                     false);
+                     nntrainer::WeightRegularizer::NONE, 1.0f, false);
     weights[1].reset(samples_seen, nntrainer::WeightInitializer::WEIGHT_ZEROS,
-                     false);
+                     nntrainer::WeightRegularizer::NONE, 1.0f, false);
   }
 
   return ML_ERROR_NONE;
index b853cc3..9035233 100644 (file)
@@ -136,7 +136,7 @@ public:
   virtual void applyGradient(unsigned int iteration,
                              std::shared_ptr<Optimizer> optimizer) {
     if (optimizer)
-      optimizer->apply_gradients(weights, iteration);
+      optimizer->applyGradients(weights, iteration);
   }
 
   /**
index 44274f7..9f93ab9 100644 (file)
@@ -352,7 +352,7 @@ void NeuralNetwork::backwarding(std::shared_ptr<Layer> layer, int iteration,
     layer->calcDerivative();
 
   if (apply_gradient)
-    opt->apply_gradients(layer->getWeightsRef(), iteration);
+    opt->applyGradients(layer->getWeightsRef(), iteration);
 }
 
 /**
index 6a28b7a..5078c5e 100644 (file)
@@ -51,9 +51,8 @@ double Adam::getLearningRate(int iteration) {
   return ll;
 }
 
-void Adam::apply_gradient(Weight &weight, double updated_lr, int iteration) {
+void Adam::applyGradient(Weight &weight, double updated_lr, int iteration) {
 
-  Tensor &x = weight.getVariableRef();
   Tensor &x_grad = weight.getGradientRef();
 
   // This is implementation of adam from original paper.
@@ -89,7 +88,7 @@ void Adam::apply_gradient(Weight &weight, double updated_lr, int iteration) {
 
   x_grad = wv.apply(sqrtEps, x_grad);
   x_grad.multiply_i(wm);
-  x.add_i(x_grad, -updated_lr);
+  weight.applyGradient(updated_lr);
 }
 
 void Adam::setProperty(const PropertyType type, const std::string &value) {
index e32e0fe..ab9c792 100644 (file)
@@ -36,10 +36,10 @@ public:
     epsilon(ep) {}
 
   /**
-   * @copydoc apply_gradient(Weight &weight, int tensor_idx, double updated_lr,
+   * @copydoc applyGradient(Weight &weight, int tensor_idx, double updated_lr,
    * int iteration)
    */
-  void apply_gradient(Weight &weight, double updated_lr, int iteration);
+  void applyGradient(Weight &weight, double updated_lr, int iteration);
 
   /**
    * @copydoc Optimizer::getType()
index 48ca6e1..56683fc 100644 (file)
@@ -46,8 +46,8 @@ double Optimizer::getLearningRate(int iteration) {
   return ll;
 }
 
-void Optimizer::apply_gradients(std::vector<Weight> &weight_list,
-                                int iteration) {
+void Optimizer::applyGradients(std::vector<Weight> &weight_list,
+                               int iteration) {
 
   if (weight_list.empty())
     return;
@@ -61,7 +61,7 @@ void Optimizer::apply_gradients(std::vector<Weight> &weight_list,
     /** calculate regularization gradient before applying the gradient */
     weight.calcRegularizationGradient();
 
-    apply_gradient(weight, ll, iteration);
+    applyGradient(weight, ll, iteration);
   }
 }
 
index f7885b3..aa0a482 100644 (file)
@@ -108,7 +108,7 @@ public:
    * @param[in] params Weight list
    * @param[in] iteration nth epoch number
    */
-  void apply_gradients(std::vector<Weight> &params, int iteration);
+  void applyGradients(std::vector<Weight> &params, int iteration);
 
   /**
    * @brief     Read Training optimizer paramters from file
@@ -176,8 +176,8 @@ private:
    * @param[in] iteration nth epoch number
    * @note weight which is called upon can be assumed to be trainable
    */
-  virtual void apply_gradient(Weight &weight, double updated_lr,
-                              int iteration) = 0;
+  virtual void applyGradient(Weight &weight, double updated_lr,
+                             int iteration) = 0;
 };
 
 } /* namespace nntrainer */
index 047d5a3..964d5e5 100644 (file)
@@ -17,7 +17,7 @@ namespace nntrainer {
 
 const std::string SGD::type = "sgd";
 
-void SGD::apply_gradient(Weight &weight, double updated_lr, int iteration) {
+void SGD::applyGradient(Weight &weight, double updated_lr, int iteration) {
   weight.applyGradient(updated_lr);
 }
 
index b3cc1d2..2b7e48c 100644 (file)
@@ -31,10 +31,10 @@ public:
   SGD(float lr = 0.0001f, Args... args) : Optimizer(lr, args...) {}
 
   /**
-   * @copydoc apply_gradient(Weight &weight, double updated_lr,
+   * @copydoc applyGradient(Weight &weight, double updated_lr,
    * int iteration)
    */
-  void apply_gradient(Weight &weight, double updated_lr, int iteration);
+  void applyGradient(Weight &weight, double updated_lr, int iteration);
 
   /**
    * @copydoc Optimizer::getType()
index 5e08834..771b2ba 100644 (file)
@@ -83,7 +83,8 @@ public:
     const TensorDim &dim,
     const WeightInitializer init = WeightInitializer::WEIGHT_XAVIER_UNIFORM,
     const WeightRegularizer reg = WeightRegularizer::NONE,
-    const float reg_const = 1.0f, bool train = true, bool alloc_now = true, std::string name = "");
+    const float reg_const = 1.0f, bool train = true, bool alloc_now = true,
+    std::string name = "");
 
   /**
    * @copydoc var_grad::initializeVariable(const Tensor &)
@@ -243,9 +244,7 @@ public:
   /**
    * @brief     Apply the gradient to the weight
    */
-  void applyGradient(double lr) {
-    var->add_i(*grad.get(), -lr);
-  }
+  void applyGradient(double lr) { var->add_i(*grad.get(), -lr); }
 
 private:
   WeightInitializer initializer; /**< initializer for this variable */