From 0970ce35d6595bbbc165ed272efb136a5af4e2b8 Mon Sep 17 00:00:00 2001 From: Parichay Kapoor Date: Thu, 8 Oct 2020 17:47:29 +0900 Subject: [PATCH] [layer] Update the layer constructors Update the constructors of layer to take arguments Added layer_factor creator method Updated model_loader and capi to use the factory creator **Self evaluation:** 1. Build test: [x]Passed [ ]Failed [ ]Skipped 2. Run test: [x]Passed [ ]Failed [ ]Skipped Signed-off-by: Parichay Kapoor --- api/capi/include/nntrainer_internal.h | 7 ++++ api/capi/src/nntrainer.cpp | 28 +++++--------- api/capi/src/nntrainer_util.cpp | 14 +++++++ nntrainer/include/activation_layer.h | 6 ++- nntrainer/include/addition_layer.h | 9 +++-- nntrainer/include/bn_layer.h | 23 ++++++++---- nntrainer/include/conv2d_layer.h | 41 +++++++++++--------- nntrainer/include/fc_layer.h | 5 ++- nntrainer/include/flatten_layer.h | 3 +- nntrainer/include/input_layer.h | 11 ++++-- nntrainer/include/layer.h | 25 ++++++++----- nntrainer/include/layer_factory.h | 70 +++++++++++++++++++++++++++++++++++ nntrainer/include/loss_layer.h | 7 ++-- nntrainer/include/pooling2d_layer.h | 27 ++++++++------ nntrainer/meson.build | 1 + nntrainer/src/activation_layer.cpp | 8 ---- nntrainer/src/conv2d_layer.cpp | 25 +++++++------ nntrainer/src/model_loader.cpp | 37 +++++------------- nntrainer/src/pooling2d_layer.cpp | 8 ++-- packaging/nntrainer.spec | 1 + 20 files changed, 226 insertions(+), 130 deletions(-) create mode 100644 nntrainer/include/layer_factory.h diff --git a/api/capi/include/nntrainer_internal.h b/api/capi/include/nntrainer_internal.h index b0e3d1c..ed1a2c4 100644 --- a/api/capi/include/nntrainer_internal.h +++ b/api/capi/include/nntrainer_internal.h @@ -337,4 +337,11 @@ void ml_tizen_set_feature_state(feature_state_t state); nntrainer::OptType ml_optimizer_to_nntrainer_type(ml_train_optimizer_type_e type); +/** + * @brief Convert nntrainer API layer type to neural network layer type + * @param[in] type Layer type API enum + * @return nntrainer::LayerType layer type + */ +nntrainer::LayerType ml_layer_to_nntrainer_type(ml_train_layer_type_e type); + #endif diff --git a/api/capi/src/nntrainer.cpp b/api/capi/src/nntrainer.cpp index 05d0287..8c9ff35 100644 --- a/api/capi/src/nntrainer.cpp +++ b/api/capi/src/nntrainer.cpp @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -513,38 +514,27 @@ int ml_train_model_get_layer(ml_train_model_h model, const char *layer_name, int ml_train_layer_create(ml_train_layer_h *layer, ml_train_layer_type_e type) { int status = ML_ERROR_NONE; - returnable f; ml_train_layer *nnlayer; check_feature_state(); nnlayer = new ml_train_layer; nnlayer->magic = ML_NNTRAINER_MAGIC; + nnlayer->in_use = false; - switch (type) { - case ML_TRAIN_LAYER_TYPE_INPUT: - status = - exception_bounded_make_shared(nnlayer->layer); - break; - case ML_TRAIN_LAYER_TYPE_FC: - status = exception_bounded_make_shared( - nnlayer->layer); - break; - default: - delete nnlayer; - ml_loge("Error: Unknown layer type"); - status = ML_ERROR_INVALID_PARAMETER; - return status; - } + returnable f = [&]() { + nnlayer->layer = createLayer(ml_layer_to_nntrainer_type(type)); + return ML_ERROR_NONE; + }; + status = nntrainer_exception_boundary(f); if (status != ML_ERROR_NONE) { delete nnlayer; ml_loge("Error: Create layer failed"); - return status; + } else { + *layer = nnlayer; } - nnlayer->in_use = false; - *layer = nnlayer; return status; } diff --git a/api/capi/src/nntrainer_util.cpp b/api/capi/src/nntrainer_util.cpp index 74bcd29..43716ff 100644 --- a/api/capi/src/nntrainer_util.cpp +++ b/api/capi/src/nntrainer_util.cpp @@ -26,3 +26,17 @@ ml_optimizer_to_nntrainer_type(ml_train_optimizer_type_e type) { return nntrainer::OptType::unknown; } } + +/** + * @brief Convert nntrainer API layer type to neural network layer type + */ +nntrainer::LayerType ml_layer_to_nntrainer_type(ml_train_layer_type_e type) { + switch (type) { + case ML_TRAIN_LAYER_TYPE_FC: + return nntrainer::LayerType::LAYER_FC; + case ML_TRAIN_LAYER_TYPE_INPUT: + return nntrainer::LayerType::LAYER_IN; + default: + return nntrainer::LayerType::LAYER_UNKNOWN; + } +} diff --git a/nntrainer/include/activation_layer.h b/nntrainer/include/activation_layer.h index 26118ea..b811efe 100644 --- a/nntrainer/include/activation_layer.h +++ b/nntrainer/include/activation_layer.h @@ -30,7 +30,11 @@ public: /** * @brief Constructor of Activation Layer */ - ActivationLayer(ActivationType at = ActivationType::ACT_NONE); + template + ActivationLayer(ActivationType at = ActivationType::ACT_NONE, Args... args) : + Layer(LayerType::LAYER_ACTIVATION, args...) { + setActivation(at); + } /** * @brief Destructor of Activation Layer diff --git a/nntrainer/include/addition_layer.h b/nntrainer/include/addition_layer.h index e5407ce..1027401 100644 --- a/nntrainer/include/addition_layer.h +++ b/nntrainer/include/addition_layer.h @@ -29,10 +29,11 @@ public: /** * @brief Constructor of Addition Layer */ - AdditionLayer() { - setType(LayerType::LAYER_ADDITION); - num_inputs = 0; - }; + template + AdditionLayer(unsigned int num_inputs_ = 0, Args... args) : + Layer(LayerType::LAYER_ADDITION, args...) { + num_inputs = num_inputs_; + } /** * @brief Destructor of Addition Layer diff --git a/nntrainer/include/bn_layer.h b/nntrainer/include/bn_layer.h index fab3d4b..5198eeb 100644 --- a/nntrainer/include/bn_layer.h +++ b/nntrainer/include/bn_layer.h @@ -42,21 +42,28 @@ public: /** * @brief Constructor of Batch Noramlization Layer */ - BatchNormalizationLayer(float epsilon = 0.001, float momentum = 0.99, - int axis = -1) : + template + BatchNormalizationLayer( + int axis = -1, float momentum = 0.99, + + float epsilon = 0.001, + WeightInitializer moving_mean_initializer = WeightInitializer::WEIGHT_ZEROS, + WeightInitializer moving_variance_initializer = + WeightInitializer::WEIGHT_ZEROS, + WeightInitializer gamma_initializer = WeightInitializer::WEIGHT_ONES, + WeightInitializer beta_initializer = WeightInitializer::WEIGHT_ONES, + Args... args) : + Layer(LayerType::LAYER_BN, args...), epsilon(epsilon), momentum(momentum), axis(axis), - initializers{ - WeightInitializer::WEIGHT_ZEROS, WeightInitializer::WEIGHT_ONES, - WeightInitializer::WEIGHT_ZEROS, WeightInitializer::WEIGHT_ONES} { - setType(LayerType::LAYER_BN); - }; + initializers{moving_variance_initializer, moving_variance_initializer, + gamma_initializer, beta_initializer} {} /** * @brief Destructor of BatchNormalizationLayer */ - ~BatchNormalizationLayer(){}; + ~BatchNormalizationLayer() {} /** * @brief Move constructor of Pooling 2D Layer. diff --git a/nntrainer/include/conv2d_layer.h b/nntrainer/include/conv2d_layer.h index 76e665e..18526e1 100644 --- a/nntrainer/include/conv2d_layer.h +++ b/nntrainer/include/conv2d_layer.h @@ -31,20 +31,25 @@ public: /** * @brief Constructor of Conv 2D Layer */ - Conv2DLayer() : - filter_size(0), - kernel_size{0, 0}, - stride{1, 1}, - padding{0, 0}, - normalization(false), - standardization(false) { - setType(LayerType::LAYER_CONV2D); - }; + template + Conv2DLayer(unsigned int filter_size_ = 0, + const std::array &kernel_size_ = {0, 0}, + const std::array &stride_ = {1, 1}, + const std::array &padding_ = {0, 0}, + bool normalization_ = false, bool standardization_ = false, + Args... args) : + Layer(LayerType::LAYER_CONV2D, args...), + filter_size(filter_size_), + kernel_size(kernel_size_), + stride(stride_), + padding(padding_), + normalization(normalization_), + standardization(standardization_) {} /** * @brief Destructor of Conv 2D Layer */ - ~Conv2DLayer(){}; + ~Conv2DLayer() {} /** * @brief Move constructor of Conv 2D Layer. @@ -117,9 +122,9 @@ public: private: unsigned int filter_size; - unsigned int kernel_size[CONV2D_DIM]; - unsigned int stride[CONV2D_DIM]; - unsigned int padding[CONV2D_DIM]; + std::array kernel_size; + std::array stride; + std::array padding; bool normalization; bool standardization; @@ -183,9 +188,10 @@ private: * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter. */ int conv2d_gemm(const float *mkernel, TensorDim kdim, Tensor const &in, - TensorDim outdim, unsigned int const *mstride, - unsigned int const *pad, float *out, unsigned int osize, - bool channel_mode); + TensorDim outdim, + const std::array &stride, + const std::array &pad, float *out, + unsigned int osize, bool channel_mode); /** * @brief reform the data to 2d matrix @@ -199,7 +205,8 @@ private: * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter. */ int im2col(Tensor in_padded, TensorDim kdim, float *inCol, TensorDim outdim, - unsigned int const *mstride, bool channel_mode); + const std::array &mstride, + bool channel_mode); }; } // namespace nntrainer diff --git a/nntrainer/include/fc_layer.h b/nntrainer/include/fc_layer.h index 0581a5b..3d4185d 100644 --- a/nntrainer/include/fc_layer.h +++ b/nntrainer/include/fc_layer.h @@ -29,7 +29,10 @@ public: /** * @brief Constructor of Fully Connected Layer */ - FullyConnectedLayer() : unit(0) { setType(LayerType::LAYER_FC); }; + template + FullyConnectedLayer(unsigned int unit_ = 0, Args... args) : + Layer(LayerType::LAYER_FC, args...), + unit(unit_) {} /** * @brief Destructor of Fully Connected Layer diff --git a/nntrainer/include/flatten_layer.h b/nntrainer/include/flatten_layer.h index 54501c2..d10253a 100644 --- a/nntrainer/include/flatten_layer.h +++ b/nntrainer/include/flatten_layer.h @@ -29,7 +29,8 @@ public: /** * @brief Constructor of Flatten Layer */ - FlattenLayer() { setType(LayerType::LAYER_FLATTEN); }; + template + FlattenLayer(Args... args) : Layer(LayerType::LAYER_FLATTEN, args...) {} /** * @brief Destructor of Flatten Layer diff --git a/nntrainer/include/input_layer.h b/nntrainer/include/input_layer.h index ac1ae2d..188bc1c 100644 --- a/nntrainer/include/input_layer.h +++ b/nntrainer/include/input_layer.h @@ -38,14 +38,17 @@ public: /** * @brief Constructor of InputLayer */ - InputLayer() : normalization(false), standardization(false) { - setType(LayerType::LAYER_IN); - }; + template + InputLayer(bool normalization = false, bool standardization = false, + Args... args) : + Layer(LayerType::LAYER_IN, args...), + normalization(false), + standardization(false) {} /** * @brief Destructor of InputLayer */ - ~InputLayer(){}; + ~InputLayer() {} /** * @brief Move constructor of Pooling 2D Layer. diff --git a/nntrainer/include/layer.h b/nntrainer/include/layer.h index f3caaf1..8345b1b 100644 --- a/nntrainer/include/layer.h +++ b/nntrainer/include/layer.h @@ -84,17 +84,24 @@ public: /** * @brief Constructor of Layer Class */ - Layer() : + Layer( + LayerType type_, ActivationType activation_type_ = ActivationType::ACT_NONE, + WeightRegularizerType weight_regularizer_ = WeightRegularizerType::unknown, + const float weight_regularizer_constant_ = 1.0f, + WeightInitializer weight_initializer_ = + WeightInitializer::WEIGHT_XAVIER_UNIFORM, + WeightInitializer bias_initializer_ = WeightInitializer::WEIGHT_ZEROS, + bool trainable_ = true, bool flatten_ = false) : name(std::string()), - type(LayerType::LAYER_UNKNOWN), + type(type_), loss(0.0f), - activation_type(ActivationType::ACT_NONE), - weight_regularizer(WeightRegularizerType::unknown), - weight_regularizer_constant(0.0f), - weight_initializer(WeightInitializer::WEIGHT_XAVIER_UNIFORM), - bias_initializer(WeightInitializer::WEIGHT_ZEROS), - flatten(false), - trainable(true), + activation_type(activation_type_), + weight_regularizer(weight_regularizer_), + weight_regularizer_constant(weight_regularizer_constant_), + weight_initializer(weight_initializer_), + bias_initializer(bias_initializer_), + flatten(flatten_), + trainable(trainable_), num_weights(0), num_inputs(1), num_outputs(1) {} diff --git a/nntrainer/include/layer_factory.h b/nntrainer/include/layer_factory.h new file mode 100644 index 0000000..06f6180 --- /dev/null +++ b/nntrainer/include/layer_factory.h @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: Apache-2.0 +/** + * Copyright (C) 2020 Parichay Kapoor + * + * @file optimizer_factory.h + * @date 7 October 2020 + * @see https://github.com/nnstreamer/nntrainer + * @author Parichay Kapoor + * @bug No known bugs except for NYI items + * @brief This is the layer factory. + */ + +#ifndef __LAYER_FACTORY_H__ +#define __LAYER_FACTORY_H__ +#ifdef __cplusplus + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace nntrainer { + +/** + * @brief Factory creator with copy constructor + */ +// std::unique_ptr createOptimizer(OptType type, const Optimizer +// &opt); + +/** + * @brief Factory creator with constructor + */ +template +std::unique_ptr createLayer(LayerType type, Args... args) { + switch (type) { + case LayerType::LAYER_IN: + return std::make_unique(args...); + case LayerType::LAYER_FC: + return std::make_unique(args...); + case LayerType::LAYER_BN: + return std::make_unique(args...); + case LayerType::LAYER_CONV2D: + return std::make_unique(args...); + case LayerType::LAYER_POOLING2D: + return std::make_unique(args...); + case LayerType::LAYER_FLATTEN: + return std::make_unique(args...); + case LayerType::LAYER_ACTIVATION: + return std::make_unique(args...); + case LayerType::LAYER_ADDITION: + return std::make_unique(args...); + case LayerType::LAYER_LOSS: + return std::make_unique(args...); + case LayerType::LAYER_UNKNOWN: + /** fallthrough intended */ + default: + throw std::invalid_argument("Unknown type for the layer"); + } +} + +} /* namespace nntrainer */ + +#endif /* __cplusplus */ +#endif /* __LAYER_FACTORY_H__ */ diff --git a/nntrainer/include/loss_layer.h b/nntrainer/include/loss_layer.h index 6256df5..23c61f1 100644 --- a/nntrainer/include/loss_layer.h +++ b/nntrainer/include/loss_layer.h @@ -42,9 +42,10 @@ public: /** * @brief Constructor of Loss Layer */ - LossLayer() : loss_type(LossType::LOSS_UNKNOWN) { - setType(LayerType::LAYER_LOSS); - }; + template + LossLayer(LossType loss_type_ = LossType::LOSS_UNKNOWN, Args... args) : + Layer(LayerType::LAYER_LOSS, args...), + loss_type(LossType::LOSS_UNKNOWN) {} /** * @brief Destructor of Loss Layer diff --git a/nntrainer/include/pooling2d_layer.h b/nntrainer/include/pooling2d_layer.h index e42557f..aedf74a 100644 --- a/nntrainer/include/pooling2d_layer.h +++ b/nntrainer/include/pooling2d_layer.h @@ -40,18 +40,23 @@ public: /** * @brief Constructor of Pooling 2D Layer */ - Pooling2DLayer() : - pool_size{0, 0}, - stride{1, 1}, - padding{0, 0}, - pooling_type(PoolingType::average) { - setType(LayerType::LAYER_POOLING2D); - }; + template + Pooling2DLayer( + PoolingType pooling_type_ = PoolingType::average, + const std::array &pool_size_ = {0, 0}, + const std::array &stride_ = {1, 1}, + const std::array &padding_ = {0, 0}, + Args... args) : + Layer(LayerType::LAYER_POOLING2D, args...), + pool_size(pool_size_), + stride(stride_), + padding(padding_), + pooling_type(pooling_type_) {} /** * @brief Destructor of Pooling 2D Layer */ - ~Pooling2DLayer(){}; + ~Pooling2DLayer() {} /** * @brief Move constructor of Pooling 2D Layer. @@ -128,9 +133,9 @@ public: void setProperty(const PropertyType type, const std::string &value = ""); private: - unsigned int pool_size[POOLING2D_DIM]; - unsigned int stride[POOLING2D_DIM]; - unsigned int padding[POOLING2D_DIM]; + std::array pool_size; + std::array stride; + std::array padding; std::vector max_idx; std::vector> max_idx_global; PoolingType pooling_type; diff --git a/nntrainer/meson.build b/nntrainer/meson.build index b656cb1..e8cc189 100644 --- a/nntrainer/meson.build +++ b/nntrainer/meson.build @@ -67,6 +67,7 @@ nntrainer_headers = [ 'include/flatten_layer.h', 'include/input_layer.h', 'include/layer.h', + 'include/layer_factory.h', 'include/lazy_tensor.h', 'include/loss_layer.h', 'include/model_loader.h', diff --git a/nntrainer/src/activation_layer.cpp b/nntrainer/src/activation_layer.cpp index f0c798d..c796fb1 100644 --- a/nntrainer/src/activation_layer.cpp +++ b/nntrainer/src/activation_layer.cpp @@ -32,14 +32,6 @@ namespace nntrainer { /** - * @brief Constructor of Activation Layer - */ -ActivationLayer::ActivationLayer(ActivationType at) : Layer() { - setType(LayerType::LAYER_ACTIVATION); - setActivation(at); -} - -/** * @brief Initialize the layer * * @retval #ML_ERROR_NONE Successful. diff --git a/nntrainer/src/conv2d_layer.cpp b/nntrainer/src/conv2d_layer.cpp index 17808f6..57d0876 100644 --- a/nntrainer/src/conv2d_layer.cpp +++ b/nntrainer/src/conv2d_layer.cpp @@ -167,7 +167,7 @@ sharedConstTensor Conv2DLayer::forwarding(sharedConstTensor in) { sharedConstTensor Conv2DLayer::backwarding(sharedConstTensor derivative, int iteration) { - unsigned int same_pad[CONV2D_DIM]; + std::array same_pad; same_pad[0] = kernel_size[0] - 1; same_pad[1] = kernel_size[1] - 1; @@ -351,7 +351,7 @@ sharedConstTensor Conv2DLayer::backwarding(sharedConstTensor derivative, opt->apply_gradients(weight_list, num_weights, iteration); } - return MAKE_SHARED_TENSOR(std::move(strip_pad(ret, padding))); + return MAKE_SHARED_TENSOR(std::move(strip_pad(ret, padding.data()))); } void Conv2DLayer::copy(std::shared_ptr l) { @@ -415,7 +415,7 @@ void Conv2DLayer::setProperty(const PropertyType type, } break; case PropertyType::kernel_size: if (!value.empty()) { - status = getValues(CONV2D_DIM, value, (int *)(kernel_size)); + status = getValues(CONV2D_DIM, value, (int *)(kernel_size.data())); throw_status(status); if (kernel_size[0] == 0 || kernel_size[1] == 0) { throw std::invalid_argument( @@ -425,7 +425,7 @@ void Conv2DLayer::setProperty(const PropertyType type, break; case PropertyType::stride: if (!value.empty()) { - status = getValues(CONV2D_DIM, value, (int *)(stride)); + status = getValues(CONV2D_DIM, value, (int *)(stride.data())); throw_status(status); if (stride[0] == 0 || stride[1] == 0) { throw std::invalid_argument( @@ -435,7 +435,7 @@ void Conv2DLayer::setProperty(const PropertyType type, break; case PropertyType::padding: if (!value.empty()) { - status = getValues(CONV2D_DIM, value, (int *)(padding)); + status = getValues(CONV2D_DIM, value, (int *)(padding.data())); throw_status(status); } break; @@ -498,11 +498,11 @@ int Conv2DLayer::conv2d(float *in, TensorDim indim, const float *kernel, return status; } -int Conv2DLayer::conv2d_gemm(const float *mkernel, TensorDim kdim, - Tensor const &in, TensorDim outdim, - unsigned int const *mstride, - unsigned int const *pad, float *out, - unsigned int osize, bool channel_mode) { +int Conv2DLayer::conv2d_gemm( + const float *mkernel, TensorDim kdim, Tensor const &in, TensorDim outdim, + const std::array &mstride, + const std::array &pad, float *out, + unsigned int osize, bool channel_mode) { int status = ML_ERROR_NONE; std::vector in_col; @@ -512,7 +512,7 @@ int Conv2DLayer::conv2d_gemm(const float *mkernel, TensorDim kdim, in_col.resize(kdim.width() * kdim.height() * outdim.width()); } - Tensor in_padded = zero_pad(0, in, pad); + Tensor in_padded = zero_pad(0, in, pad.data()); status = im2col(in_padded, kdim, in_col.data(), outdim, mstride, channel_mode); if (status != ML_ERROR_NONE) @@ -543,7 +543,8 @@ int Conv2DLayer::conv2d_gemm(const float *mkernel, TensorDim kdim, } int Conv2DLayer::im2col(Tensor in_padded, TensorDim kdim, float *in_col, - TensorDim outdim, unsigned int const *mstride, + TensorDim outdim, + const std::array &mstride, bool channel_mode) { int status = ML_ERROR_NONE; diff --git a/nntrainer/src/model_loader.cpp b/nntrainer/src/model_loader.cpp index 36c3b21..3a8b2d9 100644 --- a/nntrainer/src/model_loader.cpp +++ b/nntrainer/src/model_loader.cpp @@ -14,6 +14,7 @@ #include #include +#include #include #include #include @@ -169,36 +170,16 @@ int ModelLoader::loadLayerConfigIni(dictionary *ini, iniparser_getstring(ini, (layer_name + ":Type").c_str(), unknown); LayerType layer_type = (LayerType)parseType(layer_type_str, TOKEN_LAYER); - switch (layer_type) { - case LayerType::LAYER_IN: - layer = std::make_shared(); - break; - case LayerType::LAYER_CONV2D: - layer = std::make_shared(); - break; - case LayerType::LAYER_POOLING2D: - layer = std::make_shared(); - break; - case LayerType::LAYER_FLATTEN: - layer = std::make_shared(); - break; - case LayerType::LAYER_FC: - layer = std::make_shared(); - break; - case LayerType::LAYER_BN: - layer = std::make_shared(); - break; - case LayerType::LAYER_ACTIVATION: - layer = std::make_shared(); - break; - case LayerType::LAYER_UNKNOWN: - default: - ml_loge("Error: Unknown layer type from %s, parsed to %d", - layer_type_str.c_str(), - static_cast::type>(layer_type)); + try { + layer = createLayer(layer_type); + } catch (const std::exception &e) { + ml_loge("%s %s", typeid(e).name(), e.what()); + status = ML_ERROR_INVALID_PARAMETER; + } catch (...) { + ml_loge("unknown error type thrown"); status = ML_ERROR_INVALID_PARAMETER; - NN_RETURN_STATUS(); } + NN_RETURN_STATUS(); unsigned int property_end = static_cast(Layer::PropertyType::unknown); diff --git a/nntrainer/src/pooling2d_layer.cpp b/nntrainer/src/pooling2d_layer.cpp index f2deb74..6cb6bc9 100644 --- a/nntrainer/src/pooling2d_layer.cpp +++ b/nntrainer/src/pooling2d_layer.cpp @@ -63,7 +63,7 @@ sharedConstTensor Pooling2DLayer::forwarding(sharedConstTensor in) { hidden.setZero(); for (unsigned int b = 0; b < input_dim.batch(); ++b) { - Tensor in_padded = zero_pad(b, input, padding); + Tensor in_padded = zero_pad(b, input, padding.data()); Tensor result = pooling2d(b, in_padded); memcpy(hidden.getAddress(b * hidden.getDim().getFeatureLen()), result.getData(), result.getDim().getDataLen() * sizeof(float)); @@ -208,7 +208,7 @@ void Pooling2DLayer::setProperty(const PropertyType type, } case PropertyType::pool_size: if (!value.empty()) { - status = getValues(POOLING2D_DIM, value, (int *)(pool_size)); + status = getValues(POOLING2D_DIM, value, (int *)(pool_size.data())); throw_status(status); if (pool_size[0] == 0 || pool_size[1] == 0) { throw std::invalid_argument( @@ -218,7 +218,7 @@ void Pooling2DLayer::setProperty(const PropertyType type, break; case PropertyType::stride: if (!value.empty()) { - status = getValues(POOLING2D_DIM, value, (int *)(stride)); + status = getValues(POOLING2D_DIM, value, (int *)(stride.data())); throw_status(status); if (stride[0] == 0 || stride[1] == 0) { throw std::invalid_argument( @@ -228,7 +228,7 @@ void Pooling2DLayer::setProperty(const PropertyType type, break; case PropertyType::padding: if (!value.empty()) { - status = getValues(POOLING2D_DIM, value, (int *)(padding)); + status = getValues(POOLING2D_DIM, value, (int *)(padding.data())); throw_status(status); if ((int)padding[0] < 0 || (int)padding[1] < 0) { throw std::invalid_argument( diff --git a/packaging/nntrainer.spec b/packaging/nntrainer.spec index 282c085..aad9840 100644 --- a/packaging/nntrainer.spec +++ b/packaging/nntrainer.spec @@ -316,6 +316,7 @@ cp -r result %{buildroot}%{_datadir}/nntrainer/unittest/ %{_includedir}/nntrainer/databuffer_file.h %{_includedir}/nntrainer/databuffer_func.h %{_includedir}/nntrainer/layer.h +%{_includedir}/nntrainer/layer_factory.h %{_includedir}/nntrainer/input_layer.h %{_includedir}/nntrainer/fc_layer.h %{_includedir}/nntrainer/bn_layer.h -- 2.7.4