From: Parichay Kapoor Date: Fri, 29 Jan 2021 11:42:12 +0000 (+0900) Subject: [optimizer] Update to camelcase X-Git-Tag: accepted/tizen/unified/20210305.034114~8 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=962583a46bb998717d605c17965bde3f4d3241b0;p=platform%2Fcore%2Fml%2Fnntrainer.git [optimizer] Update to camelcase Update apply_gradient(s) to applyGradient(s) Signed-off-by: Parichay Kapoor --- diff --git a/Applications/SimpleShot/layers/centroid_knn.cpp b/Applications/SimpleShot/layers/centroid_knn.cpp index 856a976..aaceb86 100644 --- a/Applications/SimpleShot/layers/centroid_knn.cpp +++ b/Applications/SimpleShot/layers/centroid_knn.cpp @@ -4,12 +4,12 @@ * * @file centroid_knn.cpp * @date 09 Jan 2021 - * @details This file contains the simple nearest neighbor layer, this layer - * takes centroid and calculate l2 distance + * @brief This file contains the simple nearest neighbor layer * @see https://github.com/nnstreamer/nntrainer * @author Jihoon Lee * @bug No known bugs except for NYI items * + * @details This layer takes centroid and calculate l2 distance */ #include @@ -88,16 +88,18 @@ int CentroidKNN::initialize(nntrainer::Manager &manager) { if (weights.empty()) { weights.reserve(2); weights.emplace_back(map_dim, nntrainer::WeightInitializer::WEIGHT_ZEROS, - false, "centroidNN:map"); + nntrainer::WeightRegularizer::NONE, 1.0f, false, + "centroidNN:map"); weights.emplace_back(samples_seen, - nntrainer::WeightInitializer::WEIGHT_ZEROS, false, + nntrainer::WeightInitializer::WEIGHT_ZEROS, + nntrainer::WeightRegularizer::NONE, 1.0f, false, "centroidNN:num_samples"); manager.trackWeights(weights); } else { weights[0].reset(map_dim, nntrainer::WeightInitializer::WEIGHT_ZEROS, - false); + nntrainer::WeightRegularizer::NONE, 1.0f, false); weights[1].reset(samples_seen, nntrainer::WeightInitializer::WEIGHT_ZEROS, - false); + nntrainer::WeightRegularizer::NONE, 1.0f, false); } return ML_ERROR_NONE; diff --git a/nntrainer/layers/layer_internal.h b/nntrainer/layers/layer_internal.h index b853cc3..9035233 100644 --- a/nntrainer/layers/layer_internal.h +++ b/nntrainer/layers/layer_internal.h @@ -136,7 +136,7 @@ public: virtual void applyGradient(unsigned int iteration, std::shared_ptr optimizer) { if (optimizer) - optimizer->apply_gradients(weights, iteration); + optimizer->applyGradients(weights, iteration); } /** diff --git a/nntrainer/models/neuralnet.cpp b/nntrainer/models/neuralnet.cpp index 44274f7..9f93ab9 100644 --- a/nntrainer/models/neuralnet.cpp +++ b/nntrainer/models/neuralnet.cpp @@ -352,7 +352,7 @@ void NeuralNetwork::backwarding(std::shared_ptr layer, int iteration, layer->calcDerivative(); if (apply_gradient) - opt->apply_gradients(layer->getWeightsRef(), iteration); + opt->applyGradients(layer->getWeightsRef(), iteration); } /** diff --git a/nntrainer/optimizers/adam.cpp b/nntrainer/optimizers/adam.cpp index 6a28b7a..5078c5e 100644 --- a/nntrainer/optimizers/adam.cpp +++ b/nntrainer/optimizers/adam.cpp @@ -51,9 +51,8 @@ double Adam::getLearningRate(int iteration) { return ll; } -void Adam::apply_gradient(Weight &weight, double updated_lr, int iteration) { +void Adam::applyGradient(Weight &weight, double updated_lr, int iteration) { - Tensor &x = weight.getVariableRef(); Tensor &x_grad = weight.getGradientRef(); // This is implementation of adam from original paper. @@ -89,7 +88,7 @@ void Adam::apply_gradient(Weight &weight, double updated_lr, int iteration) { x_grad = wv.apply(sqrtEps, x_grad); x_grad.multiply_i(wm); - x.add_i(x_grad, -updated_lr); + weight.applyGradient(updated_lr); } void Adam::setProperty(const PropertyType type, const std::string &value) { diff --git a/nntrainer/optimizers/adam.h b/nntrainer/optimizers/adam.h index e32e0fe..ab9c792 100644 --- a/nntrainer/optimizers/adam.h +++ b/nntrainer/optimizers/adam.h @@ -36,10 +36,10 @@ public: epsilon(ep) {} /** - * @copydoc apply_gradient(Weight &weight, int tensor_idx, double updated_lr, + * @copydoc applyGradient(Weight &weight, int tensor_idx, double updated_lr, * int iteration) */ - void apply_gradient(Weight &weight, double updated_lr, int iteration); + void applyGradient(Weight &weight, double updated_lr, int iteration); /** * @copydoc Optimizer::getType() diff --git a/nntrainer/optimizers/optimizer.cpp b/nntrainer/optimizers/optimizer.cpp index 48ca6e1..56683fc 100644 --- a/nntrainer/optimizers/optimizer.cpp +++ b/nntrainer/optimizers/optimizer.cpp @@ -46,8 +46,8 @@ double Optimizer::getLearningRate(int iteration) { return ll; } -void Optimizer::apply_gradients(std::vector &weight_list, - int iteration) { +void Optimizer::applyGradients(std::vector &weight_list, + int iteration) { if (weight_list.empty()) return; @@ -61,7 +61,7 @@ void Optimizer::apply_gradients(std::vector &weight_list, /** calculate regularization gradient before applying the gradient */ weight.calcRegularizationGradient(); - apply_gradient(weight, ll, iteration); + applyGradient(weight, ll, iteration); } } diff --git a/nntrainer/optimizers/optimizer_internal.h b/nntrainer/optimizers/optimizer_internal.h index f7885b3..aa0a482 100644 --- a/nntrainer/optimizers/optimizer_internal.h +++ b/nntrainer/optimizers/optimizer_internal.h @@ -108,7 +108,7 @@ public: * @param[in] params Weight list * @param[in] iteration nth epoch number */ - void apply_gradients(std::vector ¶ms, int iteration); + void applyGradients(std::vector ¶ms, int iteration); /** * @brief Read Training optimizer paramters from file @@ -176,8 +176,8 @@ private: * @param[in] iteration nth epoch number * @note weight which is called upon can be assumed to be trainable */ - virtual void apply_gradient(Weight &weight, double updated_lr, - int iteration) = 0; + virtual void applyGradient(Weight &weight, double updated_lr, + int iteration) = 0; }; } /* namespace nntrainer */ diff --git a/nntrainer/optimizers/sgd.cpp b/nntrainer/optimizers/sgd.cpp index 047d5a3..964d5e5 100644 --- a/nntrainer/optimizers/sgd.cpp +++ b/nntrainer/optimizers/sgd.cpp @@ -17,7 +17,7 @@ namespace nntrainer { const std::string SGD::type = "sgd"; -void SGD::apply_gradient(Weight &weight, double updated_lr, int iteration) { +void SGD::applyGradient(Weight &weight, double updated_lr, int iteration) { weight.applyGradient(updated_lr); } diff --git a/nntrainer/optimizers/sgd.h b/nntrainer/optimizers/sgd.h index b3cc1d2..2b7e48c 100644 --- a/nntrainer/optimizers/sgd.h +++ b/nntrainer/optimizers/sgd.h @@ -31,10 +31,10 @@ public: SGD(float lr = 0.0001f, Args... args) : Optimizer(lr, args...) {} /** - * @copydoc apply_gradient(Weight &weight, double updated_lr, + * @copydoc applyGradient(Weight &weight, double updated_lr, * int iteration) */ - void apply_gradient(Weight &weight, double updated_lr, int iteration); + void applyGradient(Weight &weight, double updated_lr, int iteration); /** * @copydoc Optimizer::getType() diff --git a/nntrainer/tensor/weight.h b/nntrainer/tensor/weight.h index 5e08834..771b2ba 100644 --- a/nntrainer/tensor/weight.h +++ b/nntrainer/tensor/weight.h @@ -83,7 +83,8 @@ public: const TensorDim &dim, const WeightInitializer init = WeightInitializer::WEIGHT_XAVIER_UNIFORM, const WeightRegularizer reg = WeightRegularizer::NONE, - const float reg_const = 1.0f, bool train = true, bool alloc_now = true, std::string name = ""); + const float reg_const = 1.0f, bool train = true, bool alloc_now = true, + std::string name = ""); /** * @copydoc var_grad::initializeVariable(const Tensor &) @@ -243,9 +244,7 @@ public: /** * @brief Apply the gradient to the weight */ - void applyGradient(double lr) { - var->add_i(*grad.get(), -lr); - } + void applyGradient(double lr) { var->add_i(*grad.get(), -lr); } private: WeightInitializer initializer; /**< initializer for this variable */