nntrainer::OptType
ml_optimizer_to_nntrainer_type(ml_train_optimizer_type_e type);
+/**
+ * @brief Convert nntrainer API layer type to neural network layer type
+ * @param[in] type Layer type API enum
+ * @return nntrainer::LayerType layer type
+ */
+nntrainer::LayerType ml_layer_to_nntrainer_type(ml_train_layer_type_e type);
+
#endif
#include <databuffer.h>
#include <databuffer_file.h>
#include <databuffer_func.h>
+#include <layer_factory.h>
#include <neuralnet.h>
#include <nntrainer_error.h>
#include <nntrainer_internal.h>
int ml_train_layer_create(ml_train_layer_h *layer, ml_train_layer_type_e type) {
int status = ML_ERROR_NONE;
- returnable f;
ml_train_layer *nnlayer;
check_feature_state();
nnlayer = new ml_train_layer;
nnlayer->magic = ML_NNTRAINER_MAGIC;
+ nnlayer->in_use = false;
- switch (type) {
- case ML_TRAIN_LAYER_TYPE_INPUT:
- status =
- exception_bounded_make_shared<nntrainer::InputLayer>(nnlayer->layer);
- break;
- case ML_TRAIN_LAYER_TYPE_FC:
- status = exception_bounded_make_shared<nntrainer::FullyConnectedLayer>(
- nnlayer->layer);
- break;
- default:
- delete nnlayer;
- ml_loge("Error: Unknown layer type");
- status = ML_ERROR_INVALID_PARAMETER;
- return status;
- }
+ returnable f = [&]() {
+ nnlayer->layer = createLayer(ml_layer_to_nntrainer_type(type));
+ return ML_ERROR_NONE;
+ };
+ status = nntrainer_exception_boundary(f);
if (status != ML_ERROR_NONE) {
delete nnlayer;
ml_loge("Error: Create layer failed");
- return status;
+ } else {
+ *layer = nnlayer;
}
- nnlayer->in_use = false;
- *layer = nnlayer;
return status;
}
return nntrainer::OptType::unknown;
}
}
+
+/**
+ * @brief Convert nntrainer API layer type to neural network layer type
+ */
+nntrainer::LayerType ml_layer_to_nntrainer_type(ml_train_layer_type_e type) {
+ switch (type) {
+ case ML_TRAIN_LAYER_TYPE_FC:
+ return nntrainer::LayerType::LAYER_FC;
+ case ML_TRAIN_LAYER_TYPE_INPUT:
+ return nntrainer::LayerType::LAYER_IN;
+ default:
+ return nntrainer::LayerType::LAYER_UNKNOWN;
+ }
+}
/**
* @brief Constructor of Activation Layer
*/
- ActivationLayer(ActivationType at = ActivationType::ACT_NONE);
+ template <typename... Args>
+ ActivationLayer(ActivationType at = ActivationType::ACT_NONE, Args... args) :
+ Layer(LayerType::LAYER_ACTIVATION, args...) {
+ setActivation(at);
+ }
/**
* @brief Destructor of Activation Layer
/**
* @brief Constructor of Addition Layer
*/
- AdditionLayer() {
- setType(LayerType::LAYER_ADDITION);
- num_inputs = 0;
- };
+ template <typename... Args>
+ AdditionLayer(unsigned int num_inputs_ = 0, Args... args) :
+ Layer(LayerType::LAYER_ADDITION, args...) {
+ num_inputs = num_inputs_;
+ }
/**
* @brief Destructor of Addition Layer
/**
* @brief Constructor of Batch Noramlization Layer
*/
- BatchNormalizationLayer(float epsilon = 0.001, float momentum = 0.99,
- int axis = -1) :
+ template <typename... Args>
+ BatchNormalizationLayer(
+ int axis = -1, float momentum = 0.99,
+
+ float epsilon = 0.001,
+ WeightInitializer moving_mean_initializer = WeightInitializer::WEIGHT_ZEROS,
+ WeightInitializer moving_variance_initializer =
+ WeightInitializer::WEIGHT_ZEROS,
+ WeightInitializer gamma_initializer = WeightInitializer::WEIGHT_ONES,
+ WeightInitializer beta_initializer = WeightInitializer::WEIGHT_ONES,
+ Args... args) :
+ Layer(LayerType::LAYER_BN, args...),
epsilon(epsilon),
momentum(momentum),
axis(axis),
- initializers{
- WeightInitializer::WEIGHT_ZEROS, WeightInitializer::WEIGHT_ONES,
- WeightInitializer::WEIGHT_ZEROS, WeightInitializer::WEIGHT_ONES} {
- setType(LayerType::LAYER_BN);
- };
+ initializers{moving_variance_initializer, moving_variance_initializer,
+ gamma_initializer, beta_initializer} {}
/**
* @brief Destructor of BatchNormalizationLayer
*/
- ~BatchNormalizationLayer(){};
+ ~BatchNormalizationLayer() {}
/**
* @brief Move constructor of Pooling 2D Layer.
/**
* @brief Constructor of Conv 2D Layer
*/
- Conv2DLayer() :
- filter_size(0),
- kernel_size{0, 0},
- stride{1, 1},
- padding{0, 0},
- normalization(false),
- standardization(false) {
- setType(LayerType::LAYER_CONV2D);
- };
+ template <typename... Args>
+ Conv2DLayer(unsigned int filter_size_ = 0,
+ const std::array<unsigned int, CONV2D_DIM> &kernel_size_ = {0, 0},
+ const std::array<unsigned int, CONV2D_DIM> &stride_ = {1, 1},
+ const std::array<unsigned int, CONV2D_DIM> &padding_ = {0, 0},
+ bool normalization_ = false, bool standardization_ = false,
+ Args... args) :
+ Layer(LayerType::LAYER_CONV2D, args...),
+ filter_size(filter_size_),
+ kernel_size(kernel_size_),
+ stride(stride_),
+ padding(padding_),
+ normalization(normalization_),
+ standardization(standardization_) {}
/**
* @brief Destructor of Conv 2D Layer
*/
- ~Conv2DLayer(){};
+ ~Conv2DLayer() {}
/**
* @brief Move constructor of Conv 2D Layer.
private:
unsigned int filter_size;
- unsigned int kernel_size[CONV2D_DIM];
- unsigned int stride[CONV2D_DIM];
- unsigned int padding[CONV2D_DIM];
+ std::array<unsigned int, CONV2D_DIM> kernel_size;
+ std::array<unsigned int, CONV2D_DIM> stride;
+ std::array<unsigned int, CONV2D_DIM> padding;
bool normalization;
bool standardization;
* @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
*/
int conv2d_gemm(const float *mkernel, TensorDim kdim, Tensor const &in,
- TensorDim outdim, unsigned int const *mstride,
- unsigned int const *pad, float *out, unsigned int osize,
- bool channel_mode);
+ TensorDim outdim,
+ const std::array<unsigned int, CONV2D_DIM> &stride,
+ const std::array<unsigned int, CONV2D_DIM> &pad, float *out,
+ unsigned int osize, bool channel_mode);
/**
* @brief reform the data to 2d matrix
* @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
*/
int im2col(Tensor in_padded, TensorDim kdim, float *inCol, TensorDim outdim,
- unsigned int const *mstride, bool channel_mode);
+ const std::array<unsigned int, CONV2D_DIM> &mstride,
+ bool channel_mode);
};
} // namespace nntrainer
/**
* @brief Constructor of Fully Connected Layer
*/
- FullyConnectedLayer() : unit(0) { setType(LayerType::LAYER_FC); };
+ template <typename... Args>
+ FullyConnectedLayer(unsigned int unit_ = 0, Args... args) :
+ Layer(LayerType::LAYER_FC, args...),
+ unit(unit_) {}
/**
* @brief Destructor of Fully Connected Layer
/**
* @brief Constructor of Flatten Layer
*/
- FlattenLayer() { setType(LayerType::LAYER_FLATTEN); };
+ template <typename... Args>
+ FlattenLayer(Args... args) : Layer(LayerType::LAYER_FLATTEN, args...) {}
/**
* @brief Destructor of Flatten Layer
/**
* @brief Constructor of InputLayer
*/
- InputLayer() : normalization(false), standardization(false) {
- setType(LayerType::LAYER_IN);
- };
+ template <typename... Args>
+ InputLayer(bool normalization = false, bool standardization = false,
+ Args... args) :
+ Layer(LayerType::LAYER_IN, args...),
+ normalization(false),
+ standardization(false) {}
/**
* @brief Destructor of InputLayer
*/
- ~InputLayer(){};
+ ~InputLayer() {}
/**
* @brief Move constructor of Pooling 2D Layer.
/**
* @brief Constructor of Layer Class
*/
- Layer() :
+ Layer(
+ LayerType type_, ActivationType activation_type_ = ActivationType::ACT_NONE,
+ WeightRegularizerType weight_regularizer_ = WeightRegularizerType::unknown,
+ const float weight_regularizer_constant_ = 1.0f,
+ WeightInitializer weight_initializer_ =
+ WeightInitializer::WEIGHT_XAVIER_UNIFORM,
+ WeightInitializer bias_initializer_ = WeightInitializer::WEIGHT_ZEROS,
+ bool trainable_ = true, bool flatten_ = false) :
name(std::string()),
- type(LayerType::LAYER_UNKNOWN),
+ type(type_),
loss(0.0f),
- activation_type(ActivationType::ACT_NONE),
- weight_regularizer(WeightRegularizerType::unknown),
- weight_regularizer_constant(0.0f),
- weight_initializer(WeightInitializer::WEIGHT_XAVIER_UNIFORM),
- bias_initializer(WeightInitializer::WEIGHT_ZEROS),
- flatten(false),
- trainable(true),
+ activation_type(activation_type_),
+ weight_regularizer(weight_regularizer_),
+ weight_regularizer_constant(weight_regularizer_constant_),
+ weight_initializer(weight_initializer_),
+ bias_initializer(bias_initializer_),
+ flatten(flatten_),
+ trainable(trainable_),
num_weights(0),
num_inputs(1),
num_outputs(1) {}
--- /dev/null
+// SPDX-License-Identifier: Apache-2.0
+/**
+ * Copyright (C) 2020 Parichay Kapoor <pk.kapoor@samsung.com>
+ *
+ * @file optimizer_factory.h
+ * @date 7 October 2020
+ * @see https://github.com/nnstreamer/nntrainer
+ * @author Parichay Kapoor <pk.kapoor@samsung.com>
+ * @bug No known bugs except for NYI items
+ * @brief This is the layer factory.
+ */
+
+#ifndef __LAYER_FACTORY_H__
+#define __LAYER_FACTORY_H__
+#ifdef __cplusplus
+
+#include <activation_layer.h>
+#include <addition_layer.h>
+#include <bn_layer.h>
+#include <conv2d_layer.h>
+#include <fc_layer.h>
+#include <flatten_layer.h>
+#include <input_layer.h>
+#include <layer.h>
+#include <loss_layer.h>
+#include <pooling2d_layer.h>
+
+namespace nntrainer {
+
+/**
+ * @brief Factory creator with copy constructor
+ */
+// std::unique_ptr<Optimizer> createOptimizer(OptType type, const Optimizer
+// &opt);
+
+/**
+ * @brief Factory creator with constructor
+ */
+template <typename... Args>
+std::unique_ptr<Layer> createLayer(LayerType type, Args... args) {
+ switch (type) {
+ case LayerType::LAYER_IN:
+ return std::make_unique<InputLayer>(args...);
+ case LayerType::LAYER_FC:
+ return std::make_unique<FullyConnectedLayer>(args...);
+ case LayerType::LAYER_BN:
+ return std::make_unique<BatchNormalizationLayer>(args...);
+ case LayerType::LAYER_CONV2D:
+ return std::make_unique<Conv2DLayer>(args...);
+ case LayerType::LAYER_POOLING2D:
+ return std::make_unique<Pooling2DLayer>(args...);
+ case LayerType::LAYER_FLATTEN:
+ return std::make_unique<FlattenLayer>(args...);
+ case LayerType::LAYER_ACTIVATION:
+ return std::make_unique<ActivationLayer>(args...);
+ case LayerType::LAYER_ADDITION:
+ return std::make_unique<AdditionLayer>(args...);
+ case LayerType::LAYER_LOSS:
+ return std::make_unique<LossLayer>(args...);
+ case LayerType::LAYER_UNKNOWN:
+ /** fallthrough intended */
+ default:
+ throw std::invalid_argument("Unknown type for the layer");
+ }
+}
+
+} /* namespace nntrainer */
+
+#endif /* __cplusplus */
+#endif /* __LAYER_FACTORY_H__ */
/**
* @brief Constructor of Loss Layer
*/
- LossLayer() : loss_type(LossType::LOSS_UNKNOWN) {
- setType(LayerType::LAYER_LOSS);
- };
+ template <typename... Args>
+ LossLayer(LossType loss_type_ = LossType::LOSS_UNKNOWN, Args... args) :
+ Layer(LayerType::LAYER_LOSS, args...),
+ loss_type(LossType::LOSS_UNKNOWN) {}
/**
* @brief Destructor of Loss Layer
/**
* @brief Constructor of Pooling 2D Layer
*/
- Pooling2DLayer() :
- pool_size{0, 0},
- stride{1, 1},
- padding{0, 0},
- pooling_type(PoolingType::average) {
- setType(LayerType::LAYER_POOLING2D);
- };
+ template <typename... Args>
+ Pooling2DLayer(
+ PoolingType pooling_type_ = PoolingType::average,
+ const std::array<unsigned int, POOLING2D_DIM> &pool_size_ = {0, 0},
+ const std::array<unsigned int, POOLING2D_DIM> &stride_ = {1, 1},
+ const std::array<unsigned int, POOLING2D_DIM> &padding_ = {0, 0},
+ Args... args) :
+ Layer(LayerType::LAYER_POOLING2D, args...),
+ pool_size(pool_size_),
+ stride(stride_),
+ padding(padding_),
+ pooling_type(pooling_type_) {}
/**
* @brief Destructor of Pooling 2D Layer
*/
- ~Pooling2DLayer(){};
+ ~Pooling2DLayer() {}
/**
* @brief Move constructor of Pooling 2D Layer.
void setProperty(const PropertyType type, const std::string &value = "");
private:
- unsigned int pool_size[POOLING2D_DIM];
- unsigned int stride[POOLING2D_DIM];
- unsigned int padding[POOLING2D_DIM];
+ std::array<unsigned int, POOLING2D_DIM> pool_size;
+ std::array<unsigned int, POOLING2D_DIM> stride;
+ std::array<unsigned int, POOLING2D_DIM> padding;
std::vector<unsigned int> max_idx;
std::vector<std::vector<unsigned int>> max_idx_global;
PoolingType pooling_type;
'include/flatten_layer.h',
'include/input_layer.h',
'include/layer.h',
+ 'include/layer_factory.h',
'include/lazy_tensor.h',
'include/loss_layer.h',
'include/model_loader.h',
namespace nntrainer {
/**
- * @brief Constructor of Activation Layer
- */
-ActivationLayer::ActivationLayer(ActivationType at) : Layer() {
- setType(LayerType::LAYER_ACTIVATION);
- setActivation(at);
-}
-
-/**
* @brief Initialize the layer
*
* @retval #ML_ERROR_NONE Successful.
sharedConstTensor Conv2DLayer::backwarding(sharedConstTensor derivative,
int iteration) {
- unsigned int same_pad[CONV2D_DIM];
+ std::array<unsigned int, CONV2D_DIM> same_pad;
same_pad[0] = kernel_size[0] - 1;
same_pad[1] = kernel_size[1] - 1;
opt->apply_gradients(weight_list, num_weights, iteration);
}
- return MAKE_SHARED_TENSOR(std::move(strip_pad(ret, padding)));
+ return MAKE_SHARED_TENSOR(std::move(strip_pad(ret, padding.data())));
}
void Conv2DLayer::copy(std::shared_ptr<Layer> l) {
} break;
case PropertyType::kernel_size:
if (!value.empty()) {
- status = getValues(CONV2D_DIM, value, (int *)(kernel_size));
+ status = getValues(CONV2D_DIM, value, (int *)(kernel_size.data()));
throw_status(status);
if (kernel_size[0] == 0 || kernel_size[1] == 0) {
throw std::invalid_argument(
break;
case PropertyType::stride:
if (!value.empty()) {
- status = getValues(CONV2D_DIM, value, (int *)(stride));
+ status = getValues(CONV2D_DIM, value, (int *)(stride.data()));
throw_status(status);
if (stride[0] == 0 || stride[1] == 0) {
throw std::invalid_argument(
break;
case PropertyType::padding:
if (!value.empty()) {
- status = getValues(CONV2D_DIM, value, (int *)(padding));
+ status = getValues(CONV2D_DIM, value, (int *)(padding.data()));
throw_status(status);
}
break;
return status;
}
-int Conv2DLayer::conv2d_gemm(const float *mkernel, TensorDim kdim,
- Tensor const &in, TensorDim outdim,
- unsigned int const *mstride,
- unsigned int const *pad, float *out,
- unsigned int osize, bool channel_mode) {
+int Conv2DLayer::conv2d_gemm(
+ const float *mkernel, TensorDim kdim, Tensor const &in, TensorDim outdim,
+ const std::array<unsigned int, CONV2D_DIM> &mstride,
+ const std::array<unsigned int, CONV2D_DIM> &pad, float *out,
+ unsigned int osize, bool channel_mode) {
int status = ML_ERROR_NONE;
std::vector<float> in_col;
in_col.resize(kdim.width() * kdim.height() * outdim.width());
}
- Tensor in_padded = zero_pad(0, in, pad);
+ Tensor in_padded = zero_pad(0, in, pad.data());
status =
im2col(in_padded, kdim, in_col.data(), outdim, mstride, channel_mode);
if (status != ML_ERROR_NONE)
}
int Conv2DLayer::im2col(Tensor in_padded, TensorDim kdim, float *in_col,
- TensorDim outdim, unsigned int const *mstride,
+ TensorDim outdim,
+ const std::array<unsigned int, CONV2D_DIM> &mstride,
bool channel_mode) {
int status = ML_ERROR_NONE;
#include <databuffer_file.h>
#include <databuffer_func.h>
+#include <layer_factory.h>
#include <model_loader.h>
#include <neuralnet.h>
#include <nntrainer_error.h>
iniparser_getstring(ini, (layer_name + ":Type").c_str(), unknown);
LayerType layer_type = (LayerType)parseType(layer_type_str, TOKEN_LAYER);
- switch (layer_type) {
- case LayerType::LAYER_IN:
- layer = std::make_shared<InputLayer>();
- break;
- case LayerType::LAYER_CONV2D:
- layer = std::make_shared<Conv2DLayer>();
- break;
- case LayerType::LAYER_POOLING2D:
- layer = std::make_shared<Pooling2DLayer>();
- break;
- case LayerType::LAYER_FLATTEN:
- layer = std::make_shared<FlattenLayer>();
- break;
- case LayerType::LAYER_FC:
- layer = std::make_shared<FullyConnectedLayer>();
- break;
- case LayerType::LAYER_BN:
- layer = std::make_shared<BatchNormalizationLayer>();
- break;
- case LayerType::LAYER_ACTIVATION:
- layer = std::make_shared<ActivationLayer>();
- break;
- case LayerType::LAYER_UNKNOWN:
- default:
- ml_loge("Error: Unknown layer type from %s, parsed to %d",
- layer_type_str.c_str(),
- static_cast<std::underlying_type<LayerType>::type>(layer_type));
+ try {
+ layer = createLayer(layer_type);
+ } catch (const std::exception &e) {
+ ml_loge("%s %s", typeid(e).name(), e.what());
+ status = ML_ERROR_INVALID_PARAMETER;
+ } catch (...) {
+ ml_loge("unknown error type thrown");
status = ML_ERROR_INVALID_PARAMETER;
- NN_RETURN_STATUS();
}
+ NN_RETURN_STATUS();
unsigned int property_end =
static_cast<unsigned int>(Layer::PropertyType::unknown);
hidden.setZero();
for (unsigned int b = 0; b < input_dim.batch(); ++b) {
- Tensor in_padded = zero_pad(b, input, padding);
+ Tensor in_padded = zero_pad(b, input, padding.data());
Tensor result = pooling2d(b, in_padded);
memcpy(hidden.getAddress(b * hidden.getDim().getFeatureLen()),
result.getData(), result.getDim().getDataLen() * sizeof(float));
}
case PropertyType::pool_size:
if (!value.empty()) {
- status = getValues(POOLING2D_DIM, value, (int *)(pool_size));
+ status = getValues(POOLING2D_DIM, value, (int *)(pool_size.data()));
throw_status(status);
if (pool_size[0] == 0 || pool_size[1] == 0) {
throw std::invalid_argument(
break;
case PropertyType::stride:
if (!value.empty()) {
- status = getValues(POOLING2D_DIM, value, (int *)(stride));
+ status = getValues(POOLING2D_DIM, value, (int *)(stride.data()));
throw_status(status);
if (stride[0] == 0 || stride[1] == 0) {
throw std::invalid_argument(
break;
case PropertyType::padding:
if (!value.empty()) {
- status = getValues(POOLING2D_DIM, value, (int *)(padding));
+ status = getValues(POOLING2D_DIM, value, (int *)(padding.data()));
throw_status(status);
if ((int)padding[0] < 0 || (int)padding[1] < 0) {
throw std::invalid_argument(
%{_includedir}/nntrainer/databuffer_file.h
%{_includedir}/nntrainer/databuffer_func.h
%{_includedir}/nntrainer/layer.h
+%{_includedir}/nntrainer/layer_factory.h
%{_includedir}/nntrainer/input_layer.h
%{_includedir}/nntrainer/fc_layer.h
%{_includedir}/nntrainer/bn_layer.h