From 2f23bd097642a5ea383438d7028a63cf34b9d64b Mon Sep 17 00:00:00 2001 From: Jihoon Lee Date: Thu, 18 Jun 2020 19:15:19 +0900 Subject: [PATCH] Separate activation to layer **Changes proposed in this PR:** - add activation_layer.[h|cpp] - add test to activation_layer See also #153, #152 **Self evaluation:** 1. Build test: [X]Passed [ ]Failed [ ]Skipped 2. Run test: [X]Passed [ ]Failed [ ]Skipped Signed-off-by: Jihoon Lee --- jni/Android.mk | 3 +- nntrainer/include/activation_layer.h | 141 ++++++++++++++++++++++++++++++++ nntrainer/include/layer.h | 7 +- nntrainer/meson.build | 6 +- nntrainer/src/activation_layer.cpp | 154 +++++++++++++++++++++++++++++++++++ packaging/nntrainer.spec | 1 + 6 files changed, 307 insertions(+), 5 deletions(-) create mode 100644 nntrainer/include/activation_layer.h create mode 100644 nntrainer/src/activation_layer.cpp diff --git a/jni/Android.mk b/jni/Android.mk index 18b8658..2539b35 100644 --- a/jni/Android.mk +++ b/jni/Android.mk @@ -36,7 +36,8 @@ NNTRAINER_SRCS := $(NNTRAINER_ROOT)/nntrainer/src/neuralnet.cpp \ $(NNTRAINER_ROOT)/nntrainer/src/parse_util.cpp \ $(NNTRAINER_ROOT)/nntrainer/src/tensor_dim.cpp \ $(NNTRAINER_ROOT)/nntrainer/src/conv2d_layer.cpp \ - $(NNTRAINER_ROOT)/nntrainer/src/pooling2d_layer.cpp + $(NNTRAINER_ROOT)/nntrainer/src/pooling2d_layer.cpp \ + $(NNTRAINER_ROOT)/nntrainer/src/activation_layer.cpp NNTRAINER_INCLUDES := $(NNTRAINER_ROOT)/nntrainer/include diff --git a/nntrainer/include/activation_layer.h b/nntrainer/include/activation_layer.h new file mode 100644 index 0000000..7eac5e7 --- /dev/null +++ b/nntrainer/include/activation_layer.h @@ -0,0 +1,141 @@ +/** + * Copyright (C) 2020 Jihoon Lee + * + * SPDX-License-Identifier: Apache-2.0-only + * + * @file activation_layer.h + * @date 17 June 2020 + * @see https://github.com/nnstreamer/nntrainer + * @author Jihoon Lee + * @bug No known bugs except for NYI items + * @brief This is Activation Layer Class for Neural Network + * + */ + +#ifndef __ACTIVATION_LAYER_H__ +#define __ACTIVATION_LAYER_H__ +#ifdef __cplusplus + +#include +#include +#include +#include +#include +#include +#include + +namespace nntrainer { + + +/** + * @class Activation Layer + * @brief Activation Layer + */ +class ActivationLayer : public Layer { + +public: + /** + * @brief Constructor of Activation Layer + */ + ActivationLayer() : Layer(){ this->type = LAYER_ACTIVATION; }; + + /** + * @brief Destructor of Activation Layer + */ + ~ActivationLayer(){}; + + /** + * @brief Initialize the layer + * + * @param[in] last last layer + * @retval #ML_ERROR_NONE Successful. + * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter. + */ + int initialize(bool last); + + /** + * @brief Read Activation layer params. This is essentially noops for now. + * @param[in] file input stream file + */ + void read(std::ifstream &file) { /* noop */ }; + + /** + * @brief Save Activation layer params. This is essentially noops for now. + * @param[in] file output stream file + */ + void save(std::ofstream &file) { /* noop */ }; + + /** + * @brief forward propagation with input + * @param[in] in Input Tensor from upper layer + * @param[out] status Error Status of this function + * @retval Activation(f(x)) + */ + Tensor forwarding(Tensor in, int &status); + + /** + * @brief back propagation calculate activation prime. + * @param[in] input Input Tensor from lower layer + * @param[in] iteration Numberof Epoch for ADAM + * @retval Tensor + */ + Tensor backwarding(Tensor in, int iteration); + + /** + * @brief copy layer + * @param[in] l layer to copy + */ + void copy(std::shared_ptr l); + + /** + * @brief setActivation by custom activation function + * + * @param[in] std::function activation_fn activation + * function to be used + * @param[in] std::function activation_prime_fn + * activation_prime_function to be used + * @retval #ML_ERROR_NONE when successful + */ + int setActivation( + std::function const &activation_fn, + std::function const &activation_prime_fn); + + /** + * @brief setActivation by custom activation function + * + * @param[in] std::function activation_fn activation + * function to be used + * @param[in] std::function activation_prime_fn + * activation_prime_function to be used + * @retval #ML_ERROR_NONE when successful + */ + int + setActivation(std::function const &activation_fn, + std::function const &activation_prime_fn); + + /** + * @brief setActivation by preset actiType + * + * @param[in] ActiType actiType actiType to be set + */ + void setActivation(ActiType acti_type); + + /** + * @brief set Property of layer + * @param[in] values values of property + * @retval #ML_ERROR_NONE Successful. + * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter. + */ + int setProperty(std::vector values); + +private: + static constexpr unsigned int ACTIVATION_PROPERTY = 4; + + std::function _act_fn; + std::function _act_prime_fn; +}; + +} // namespace nntrainer + +#endif /* __cplusplus */ +#endif /* __ACTIVATION_LAYER_H__ */ diff --git a/nntrainer/include/layer.h b/nntrainer/include/layer.h index 73dc593..c458c9b 100644 --- a/nntrainer/include/layer.h +++ b/nntrainer/include/layer.h @@ -49,7 +49,8 @@ CostType; * 0. tanh * 1. sigmoid * 2. relu - * 3. Unknown + * 3. softmax + * 4. Unknown */ typedef enum { ACT_TANH, @@ -68,7 +69,8 @@ typedef enum { * 4. Pooling 2D Layer type * 5. Flatten Layer type * 6. Loss Layer type - * 7. Unknown + * 7. Activation Layer type + * 8. Unknown */ typedef enum { LAYER_IN, @@ -78,6 +80,7 @@ typedef enum { LAYER_POOLING2D, LAYER_FLATTEN, LAYER_LOSS, + LAYER_ACTIVATION, LAYER_UNKNOWN } LayerType; diff --git a/nntrainer/meson.build b/nntrainer/meson.build index a43506d..fece32a 100644 --- a/nntrainer/meson.build +++ b/nntrainer/meson.build @@ -33,7 +33,8 @@ nntrainer_sources = [ 'src/conv2d_layer.cpp', 'src/lazy_tensor.cpp', 'src/pooling2d_layer.cpp', - 'src/flatten_layer.cpp' + 'src/flatten_layer.cpp', + 'src/activation_layer.cpp' ] nntrainer_headers = [ @@ -56,7 +57,8 @@ nntrainer_headers = [ 'include/conv2d_layer.h', 'include/lazy_tensor.h', 'include/pooling2d_layer.h', - 'include/flatten_layer.h' + 'include/flatten_layer.h', + 'include/activation_layer.h' ] # Build libraries diff --git a/nntrainer/src/activation_layer.cpp b/nntrainer/src/activation_layer.cpp new file mode 100644 index 0000000..b1f13ac --- /dev/null +++ b/nntrainer/src/activation_layer.cpp @@ -0,0 +1,154 @@ +/** + * Copyright (C) 2020 Jihoon Lee + * + * SPDX-License-Identifier: Apache-2.0-only + * + * @file activation_layer.cpp + * @date 17 June 2020 + * @see https://github.com/nnstreamer/nntrainer + * @author Jihoon Lee + * @bug No known bugs except for NYI items + * @brief This is Activation Layer Class for Neural Network + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace nntrainer { + +/** + * @brief Initialize the layer + * + * @param[in] last last layer + * @retval #ML_ERROR_NONE Successful. + * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter. + */ +int ActivationLayer::initialize(bool last) { + if (input_dim.batch() <= 0 || input_dim.height() <= 0 || + input_dim.width() <= 0 || input_dim.channel() <= 0) { + ml_loge("Error: Dimension must be greater than 0"); + return ML_ERROR_INVALID_PARAMETER; + } + + this->last_layer = last; + + dim = input_dim; + output_dim = dim; + + return ML_ERROR_NONE; +} + +Tensor ActivationLayer::forwarding(Tensor in, int &status) { + status = ML_ERROR_NONE; + + input = in; + hidden = _act_fn(in); + + return hidden; +} + +Tensor ActivationLayer::backwarding(Tensor derivative, int iteration) { + return derivative.multiply(_act_prime_fn(hidden)); +} + +/** + * @brief copy layer + * @param[in] l layer to copy + */ +void ActivationLayer::copy(std::shared_ptr l) { + std::shared_ptr from = + std::static_pointer_cast(l); + this->input.copy(from->input); + this->hidden.copy(from->hidden); + this->activation_type = from->activation_type; +}; + +int ActivationLayer::setActivation( + std::function const &activation_fn, + std::function const &activation_prime_fn) { + _act_fn = activation_fn; + _act_prime_fn = activation_prime_fn; + + return ML_ERROR_NONE; +} + +int ActivationLayer::setActivation( + std::function const &activation_fn, + std::function const &activation_prime_fn) { + _act_fn = [activation_fn](Tensor const &t) { return t.apply(activation_fn); }; + _act_prime_fn = [activation_prime_fn](Tensor const &t) { + return t.apply(activation_prime_fn); + }; + + return ML_ERROR_NONE; +} + +/** + * @brief setActivation by preset actiType + * + * @param[in] ActiType actiType actiType to be set + */ +void ActivationLayer::setActivation(ActiType acti_type) { + switch (acti_type) { + case ActiType::ACT_TANH: + this->setActivation(tanhFloat, tanhPrime); + break; + case ActiType::ACT_SIGMOID: + this->setActivation(sigmoid, sigmoidePrime); + break; + case ActiType::ACT_SOFTMAX: + this->setActivation(softmax, softmaxPrime); + break; + case ActiType::ACT_RELU: + this->setActivation(relu, reluPrime); + break; + case ActiType::ACT_UNKNOWN: + default: + throw std::runtime_error("Error: Not Supported Activation Type"); + } + this->activation_type = acti_type; +} + +/** + * @brief set Property of layer + * @param[in] values values of property + * @retval #ML_ERROR_NONE Successful. + * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter. + */ +int ActivationLayer::setProperty(std::vector values) { + int status = ML_ERROR_NONE; + + if (values.size() != 1) { + return ML_ERROR_INVALID_PARAMETER; + } + + std::string key; + std::string value; + + status = getKeyValue(values[0], key, value); + NN_RETURN_STATUS(); + + if (parseLayerProperty(key) != ACTIVATION_PROPERTY) { + return ML_ERROR_INVALID_PARAMETER; + } + + try { + this->setActivation((ActiType)parseType(value, TOKEN_ACTI)); + } catch (const std::exception &ex) { + ml_loge("Error: Not supported Data"); + return ML_ERROR_INVALID_PARAMETER; + } + return ML_ERROR_NONE; +} + +}; // namespace nntrainer diff --git a/packaging/nntrainer.spec b/packaging/nntrainer.spec index 17c2613..7b90adb 100644 --- a/packaging/nntrainer.spec +++ b/packaging/nntrainer.spec @@ -190,6 +190,7 @@ cp -r result %{buildroot}%{_datadir}/nntrainer/unittest/ %{_includedir}/nntrainer/pooling2d_layer.h %{_includedir}/nntrainer/flatten_layer.h %{_includedir}/nntrainer/loss_layer.h +%{_includedir}/nntrainer/activation_layer.h %{_includedir}/nntrainer/neuralnet.h %{_includedir}/nntrainer/tensor.h %{_includedir}/nntrainer/lazy_tensor.h -- 2.7.4