# ignore build directory
/build
-# ignore iniparser downloaded by jni
+# jni build files
/jni/iniparser
+libs/
+obj/
# CTag
/tags
NNTRAINER_INCLUDES := $(NNTRAINER_ROOT)/nntrainer/include \
$(NNTRAINER_ROOT)/api \
+ $(NNTRAINER_ROOT)/api/ccapi/include \
$(NNTRAINER_ROOT)/api/capi/include/platform
LOCAL_MODULE := nntrainer
std::shared_ptr<nntrainer::DataBufferFromCallback> DB =
std::make_shared<nntrainer::DataBufferFromCallback>();
- DB->setFunc(nntrainer::BUF_TRAIN, getBatch_train);
+ DB->setFunc(nntrainer::BufferType::BUF_TRAIN, getBatch_train);
/**
* @brief Create NN
*/
std::shared_ptr<nntrainer::DataBufferFromCallback> DB =
std::make_shared<nntrainer::DataBufferFromCallback>();
- DB->setFunc(nntrainer::BUF_TRAIN, getBatch_train);
- DB->setFunc(nntrainer::BUF_VAL, getBatch_val);
+ DB->setFunc(nntrainer::BufferType::BUF_TRAIN, getBatch_train);
+ DB->setFunc(nntrainer::BufferType::BUF_VAL, getBatch_val);
/**
* @brief Neural Network Create & Initialization
NNTRAINER_INCLUDES := $(NNTRAINER_ROOT)/nntrainer/include \
$(NNTRAINER_ROOT)/api \
+ $(NNTRAINER_ROOT)/api/ccapi/include \
$(NNTRAINER_ROOT)/api/capi/include/platform
LOCAL_MODULE := nntrainer
NNTRAINER_INCLUDES := $(NNTRAINER_ROOT)/nntrainer/include \
$(NNTRAINER_ROOT)/api \
+ $(NNTRAINER_ROOT)/api/ccapi/include \
$(NNTRAINER_ROOT)/api/capi/include/platform
NNTRAINER_APPLICATION := $(NNTRAINER_ROOT)/Applications
*/
std::shared_ptr<nntrainer::DataBufferFromCallback> DB =
std::make_shared<nntrainer::DataBufferFromCallback>();
- DB->setFunc(nntrainer::BUF_TRAIN, getBatch_train);
- DB->setFunc(nntrainer::BUF_VAL, getBatch_val);
+ DB->setFunc(nntrainer::BufferType::BUF_TRAIN, getBatch_train);
+ DB->setFunc(nntrainer::BufferType::BUF_VAL, getBatch_val);
/**
* @brief Neural Network Create & Initialization
NNTRAINER_INCLUDES := $(NNTRAINER_ROOT)/nntrainer/include \
$(NNTRAINER_ROOT)/api \
+ $(NNTRAINER_ROOT)/api/ccapi/include \
$(NNTRAINER_ROOT)/api/capi/include/platform
LOCAL_MODULE := nntrainer
std::shared_ptr<nntrainer::DataBufferFromCallback> DB =
std::make_shared<nntrainer::DataBufferFromCallback>();
- DB->setFunc(nntrainer::BUF_TRAIN, getBatch_train_file);
- DB->setFunc(nntrainer::BUF_VAL, getBatch_val_file);
+ DB->setFunc(nntrainer::BufferType::BUF_TRAIN, getBatch_train_file);
+ DB->setFunc(nntrainer::BufferType::BUF_VAL, getBatch_val_file);
/**
* @brief Neural Network Create & Initialization
subdir('utils/jni')
subdir('KNN/jni')
subdir('LogisticRegression/jni')
-subdir('MNIST/jni')
+if get_option('enable-ccapi')
+ subdir('MNIST/jni')
+endif
subdir('VGG/jni')
subdir('ReinforcementLearning/DeepQ/jni')
subdir('TransferLearning/CIFAR_Classification/jni')
typedef void *ml_train_dataset_h;
/**
- * @brief Enumeration for the neural network layer type of NNTrainer.
- * @since_tizen 6.0
- */
-typedef enum {
- ML_TRAIN_LAYER_TYPE_INPUT = 0, /**< Input Layer */
- ML_TRAIN_LAYER_TYPE_FC, /**< Fully Connected Layer */
- ML_TRAIN_LAYER_TYPE_UNKNOWN = 999 /**< Unknown Layer */
-} ml_train_layer_type_e;
-
-/**
- * @brief Enumeration for the neural network optimizer type of NNTrainer.
- * @since_tizen 6.0
- */
-typedef enum {
- ML_TRAIN_OPTIMIZER_TYPE_ADAM = 0, /**< Adam Optimizer */
- ML_TRAIN_OPTIMIZER_TYPE_SGD, /**< Stochastic Gradient Descent Optimizer */
- ML_TRAIN_OPTIMIZER_TYPE_UNKNOWN = 999 /**< Unknown Optimizer */
-} ml_train_optimizer_type_e;
-
-/**
* @brief Constructs the neural network model.
* @details Use this function to create neural network model.
* @since_tizen 6.0
#ifndef __NNTRAINER_INTERNAL_H__
#define __NNTRAINER_INTERNAL_H__
-#include <layer.h>
+#include <layer_internal.h>
#include <mutex>
#include <neuralnet.h>
#include <nntrainer.h>
#include <nntrainer_log.h>
-#include <optimizer.h>
+#include <optimizer_internal.h>
#include <string>
#include <unordered_map>
* @author Parichay Kapoor <pk.kapoor@samsung.com>
* @bug No known bugs except for NYI items
*/
-#include <databuffer.h>
-#include <databuffer_file.h>
-#include <databuffer_func.h>
+#include <databuffer_factory.h>
#include <layer_factory.h>
+#include <layer_internal.h>
#include <neuralnet.h>
#include <nntrainer_error.h>
#include <nntrainer_internal.h>
nnlayer->in_use = false;
returnable f = [&]() {
- nnlayer->layer = createLayer(ml_layer_to_nntrainer_type(type));
+ nnlayer->layer = nntrainer::createLayer(ml_layer_to_nntrainer_type(type));
return ML_ERROR_NONE;
};
nnopt->in_use = false;
returnable f = [&]() {
- nnopt->optimizer = createOptimizer(ml_optimizer_to_nntrainer_type(type));
+ nnopt->optimizer =
+ nntrainer::createOptimizer(ml_optimizer_to_nntrainer_type(type));
return ML_ERROR_NONE;
};
if (!train_cb)
return ML_ERROR_INVALID_PARAMETER;
- std::shared_ptr<nntrainer::DataBufferFromCallback> data_buffer;
+ std::shared_ptr<nntrainer::DataBuffer> data_buffer;
+
+ returnable f = [&]() {
+ data_buffer =
+ nntrainer::createDataBuffer(nntrainer::DataBufferType::GENERATOR);
+ return ML_ERROR_NONE;
+ };
- status = exception_bounded_make_shared<nntrainer::DataBufferFromCallback>(
- data_buffer);
+ status = nntrainer_exception_boundary(f);
if (status != ML_ERROR_NONE) {
ml_loge("Error: Create dataset failed");
return status;
}
- returnable f = [&]() {
- return data_buffer->setFunc(nntrainer::BUF_TRAIN, train_cb);
+ f = [&]() {
+ return data_buffer->setFunc(nntrainer::BufferType::BUF_TRAIN, train_cb);
};
status = nntrainer_exception_boundary(f);
return status;
}
- f = [&]() { return data_buffer->setFunc(nntrainer::BUF_VAL, valid_cb); };
+ f = [&]() {
+ return data_buffer->setFunc(nntrainer::BufferType::BUF_VAL, valid_cb);
+ };
status = nntrainer_exception_boundary(f);
if (status != ML_ERROR_NONE) {
return status;
}
- f = [&]() { return data_buffer->setFunc(nntrainer::BUF_TEST, test_cb); };
+ f = [&]() {
+ return data_buffer->setFunc(nntrainer::BufferType::BUF_TEST, test_cb);
+ };
status = nntrainer_exception_boundary(f);
if (status != ML_ERROR_NONE) {
check_feature_state();
- std::shared_ptr<nntrainer::DataBufferFromDataFile> data_buffer;
+ std::shared_ptr<nntrainer::DataBuffer> data_buffer;
+ std::shared_ptr<nntrainer::DataBufferFromDataFile> data_buffer_file;
+
+ returnable f = [&]() {
+ data_buffer = nntrainer::createDataBuffer(nntrainer::DataBufferType::FILE);
+ return ML_ERROR_NONE;
+ };
- status = exception_bounded_make_shared<nntrainer::DataBufferFromDataFile>(
- data_buffer);
+ status = nntrainer_exception_boundary(f);
if (status != ML_ERROR_NONE) {
ml_loge("Error: Create dataset failed");
return status;
}
+ data_buffer_file =
+ std::static_pointer_cast<nntrainer::DataBufferFromDataFile>(data_buffer);
+
if (train_file) {
- status = data_buffer->setDataFile(train_file, nntrainer::DATA_TRAIN);
+ status = data_buffer_file->setDataFile(train_file, nntrainer::DATA_TRAIN);
if (status != ML_ERROR_NONE) {
return status;
}
}
if (valid_file) {
- status = data_buffer->setDataFile(valid_file, nntrainer::DATA_VAL);
+ status = data_buffer_file->setDataFile(valid_file, nntrainer::DATA_VAL);
if (status != ML_ERROR_NONE) {
return status;
}
}
if (test_file) {
- status = data_buffer->setDataFile(test_file, nntrainer::DATA_TEST);
+ status = data_buffer_file->setDataFile(test_file, nntrainer::DATA_TEST);
if (status != ML_ERROR_NONE) {
return status;
}
ml_optimizer_to_nntrainer_type(ml_train_optimizer_type_e type) {
switch (type) {
case ML_TRAIN_OPTIMIZER_TYPE_ADAM:
- return nntrainer::OptType::adam;
+ return nntrainer::OptType::ADAM;
case ML_TRAIN_OPTIMIZER_TYPE_SGD:
- return nntrainer::OptType::sgd;
+ return nntrainer::OptType::SGD;
default:
- return nntrainer::OptType::unknown;
+ return nntrainer::OptType::UNKNOWN;
}
}
--- /dev/null
+// SPDX-License-Identifier: Apache-2.0
+/**
+ * Copyright (C) 2020 Parichay Kapoor <pk.kapoor@samsung.com>
+ *
+ * @file dataset.h
+ * @date 14 October 2020
+ * @see https://github.com/nnstreamer/nntrainer
+ * @author Jijoong Moon <jijoong.moon@samsung.com>
+ * @author Parichay Kapoor <pk.kapoor@samsung.com>
+ * @bug No known bugs except for NYI items
+ * @brief This is dataset interface for c++ API
+ *
+ * @note This is experimental API and not stable.
+ */
+
+#ifndef __ML_TRAIN_DATASET_H__
+#define __ML_TRAIN_DATASET_H__
+
+#if __cplusplus >= MIN_CPP_VERSION
+
+#include <functional>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include <nntrainer-api-common.h>
+
+namespace ml {
+namespace train {
+
+/**
+ * @brief Dataset generator callback type declaration
+ */
+typedef std::function<std::remove_pointer<ml_train_datagen_cb>::type>
+ datagen_cb;
+
+/**
+ * @brief Enumeration for dataset type
+ */
+enum class DatasetType {
+ GENERATOR, /** Dataset with generators */
+ FILE, /** Dataset with files */
+ UNKNOWN /** Unknown dataset type */
+};
+
+/**
+ * @brief Enumeration of buffer type
+ */
+enum class BufferType {
+ BUF_TRAIN, /** BUF_TRAIN ( Buffer for training ) */
+ BUF_VAL, /** BUF_VAL ( Buffer for validation ) */
+ BUF_TEST, /** BUF_TEST ( Buffer for test ) */
+ BUF_UNKNOWN /** BUF_UNKNOWN ( unknown ) */
+};
+
+/**
+ * @class Dataset for class for input data
+ * @brief Dataset for read and manage data
+ */
+class Dataset {
+public:
+ /**
+ * @brief Destructor
+ */
+ virtual ~Dataset() = default;
+
+ /**
+ * @brief set function pointer for each type
+ * @param[in] type Buffer Type
+ * @param[in] call back function pointer
+ * @retval #ML_ERROR_NONE Successful.
+ * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
+ */
+ virtual int setFunc(BufferType type, datagen_cb func) = 0;
+
+ /**
+ * @brief set property
+ * @param[in] values values of property
+ * @retval #ML_ERROR_NONE Successful.
+ * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
+ */
+ virtual int setProperty(std::vector<std::string> values) = 0;
+};
+
+/**
+ * @brief Factory creator with constructor for dataset
+ */
+std::unique_ptr<Dataset>
+createDataset(DatasetType type,
+ const std::vector<std::string> &properties = {});
+
+} // namespace train
+} // namespace ml
+
+#else
+#error "CPP versions c++14 or over are only supported"
+#endif // __cpluscplus
+#endif // __ML_TRAIN_DATASET_H__
--- /dev/null
+// SPDX-License-Identifier: Apache-2.0
+/**
+ * Copyright (C) 2020 Parichay Kapoor <pk.kapoor@samsung.com>
+ *
+ * @file layer.h
+ * @date 14 October 2020
+ * @see https://github.com/nnstreamer/nntrainer
+ * @author Parichay Kapoor <pk.kapoor@samsung.com>
+ * @bug No known bugs except for NYI items
+ * @brief This is layers interface for c++ API
+ *
+ * @note This is experimental API and not stable.
+ */
+
+#ifndef __ML_TRAIN_LAYER_H__
+#define __ML_TRAIN_LAYER_H__
+
+#if __cplusplus >= MIN_CPP_VERSION
+
+#include <string>
+#include <vector>
+
+#include <nntrainer-api-common.h>
+
+namespace ml {
+namespace train {
+
+/**
+ * @brief Enumeration of layer type
+ */
+enum class LayerType {
+ LAYER_IN = ML_TRAIN_LAYER_TYPE_INPUT, /** Input Layer type */
+ LAYER_FC = ML_TRAIN_LAYER_TYPE_FC, /** Fully Connected Layer type */
+ LAYER_BN, /** Batch Normalization Layer type */
+ LAYER_CONV2D, /** Convolution 2D Layer type */
+ LAYER_POOLING2D, /** Pooling 2D Layer type */
+ LAYER_FLATTEN, /** Flatten Layer type */
+ LAYER_ACTIVATION, /** Loss Layer type */
+ LAYER_ADDITION, /** Activation Layer type */
+ LAYER_LOSS, /** Addition Layer type */
+ LAYER_UNKNOWN = ML_TRAIN_LAYER_TYPE_UNKNOWN /** Unknown */
+};
+
+/**
+ * @class Layer Base class for layers
+ * @brief Base class for all layers
+ */
+class Layer {
+public:
+ /**
+ * @brief Destructor of Layer Class
+ */
+ virtual ~Layer() = default;
+
+ /**
+ * @brief set Property of layer
+ * @param[in] values values of property
+ * @retval #ML_ERROR_NONE Successful.
+ * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
+ */
+ virtual int setProperty(std::vector<std::string> values) = 0;
+
+ /**
+ * @brief Property Enumeration
+ * 0. input shape : string
+ * 1. bias zero : bool
+ * 2. normalization : bool
+ * 3. standardization : bool
+ * 4. activation : string (type)
+ * 5. epsilon : float
+ * 6. weight_regularizer : string (type)
+ * 7. weight_regularizer_constant : float
+ * 8. unit : int
+ * 9. weight_initializer : string (type)
+ * 10. filter_size : int
+ * 11. kernel_size : ( n , m )
+ * 12. stride : ( n, m )
+ * 13. padding : ( n, m )
+ * 14. pool_size : ( n,m )
+ * 15. pooling : max, average, global_max, global_average
+ * 16. flatten : bool
+ * 17. name : string (type)
+ * 18. num_inputs : unsigned int (minimum 1)
+ * 19. num_outputs : unsigned int (minimum 1)
+ * 20. momentum : float,
+ * 21. moving_mean_initializer : string (type),
+ * 22. moving_variance_initializer : string (type),
+ * 23. gamma_initializer : string (type),
+ * 24. beta_initializer" : string (type)
+ */
+ enum class PropertyType {
+ input_shape = 0,
+ normalization = 1,
+ standardization = 2,
+ activation = 3,
+ epsilon = 4,
+ weight_regularizer = 5,
+ weight_regularizer_constant = 6,
+ unit = 7,
+ weight_initializer = 8,
+ bias_initializer = 9,
+ filters = 10,
+ kernel_size = 11,
+ stride = 12,
+ padding = 13,
+ pool_size = 14,
+ pooling = 15,
+ flatten = 16,
+ name = 17,
+ num_inputs = 18,
+ num_outputs = 19,
+ momentum = 20,
+ moving_mean_initializer = 21,
+ moving_variance_initializer = 22,
+ gamma_initializer = 23,
+ beta_initializer = 24,
+ unknown
+ };
+
+ /**
+ * @brief setProperty by PropertyType
+ * @note By passing empty string, this can validate if @a type is valid
+ * @param[in] type property type to be passed
+ * @param[in] value value to be passed, if empty string is passed, do nothing
+ * but throws error when @a type is invalid
+ * @exception exception::not_supported when property type is not valid for
+ * the particular layer
+ * @exception std::invalid_argument invalid argument
+ * @note A layer need not support all the properties from PropertyType, but
+ * the supported properties will be a subset of PropertyType.
+ */
+ virtual void setProperty(const PropertyType type,
+ const std::string &value = "") = 0;
+
+ /**
+ * @brief check hyper parameter for the layer
+ * @retval #ML_ERROR_NONE Successful.
+ * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
+ */
+ virtual int checkValidation() = 0;
+
+ /**
+ * @brief get the loss value added by this layer
+ * @retval loss value
+ */
+ virtual float getLoss() = 0;
+
+ /**
+ * @brief set trainable for this layer
+ * @param[in] train to enable/disable train
+ */
+ virtual void setTrainable(bool train) = 0;
+
+ /**
+ * @brief get if the output of this layer must be flatten
+ * @retval flatten value
+ */
+ virtual bool getFlatten() = 0;
+
+ /**
+ * @brief Get name of the layer
+ */
+ virtual std::string getName() noexcept = 0;
+
+ /**
+ * @brief Preset modes for printing summary for the layer
+ */
+ enum class PrintPreset {
+ PRINT_NONE = 0, /**< Print nothing */
+ PRINT_SUMMARY, /**< Print preset including summary information */
+ PRINT_SUMMARY_META, /**< Print summary preset that includes meta information
+ */
+ PRINT_ALL /**< Print everything possible */
+ };
+
+ /**
+ * @brief print using PrintPreset
+ *
+ * @param out oustream
+ * @param preset preset to be used
+ */
+ virtual void printPreset(std::ostream &out,
+ PrintPreset preset = PrintPreset::PRINT_SUMMARY) = 0;
+};
+
+/**
+ * @brief Factory creator with constructor for layer
+ */
+std::unique_ptr<Layer>
+createLayer(LayerType type, const std::vector<std::string> &properties = {});
+
+} // namespace train
+} // namespace ml
+
+#else
+#error "CPP versions c++14 or over are only supported"
+#endif // __cpluscplus
+#endif // __ML_TRAIN_LAYER_H__
--- /dev/null
+// SPDX-License-Identifier: Apache-2.0
+/**
+ * Copyright (C) 2020 Parichay Kapoor <pk.kapoor@samsung.com>
+ *
+ * @file model.h
+ * @date 14 October 2020
+ * @see https://github.com/nnstreamer/nntrainer
+ * @author Jijoong Moon <jijoong.moon@samsung.com>
+ * @author Parichay Kapoor <pk.kapoor@samsung.com>
+ * @bug No known bugs except for NYI items
+ * @brief This is model interface for c++ API
+ *
+ * @note This is experimental API and not stable.
+ */
+
+#ifndef __ML_TRAIN_MODEL_H__
+#define __ML_TRAIN_MODEL_H__
+
+#if __cplusplus >= MIN_CPP_VERSION
+
+#include <string>
+#include <vector>
+
+#include <dataset.h>
+#include <layer.h>
+#include <optimizer.h>
+
+/** Define more aliases for the model in the API */
+namespace ml {
+namespace train {
+
+/**
+ * @brief Enumeration of Network Type
+ */
+enum class ModelType {
+ KNN, /** k Nearest Neighbor */
+ REGRESSION, /** Logistic Regression */
+ NEURAL_NET, /** Neural Network */
+ UNKNOWN /** Unknown */
+};
+
+/**
+ * @class Model Class
+ * @brief Model Class containing configuration, layers, optimizer and dataset
+ */
+class Model {
+public:
+ /**
+ * @brief Destructor of Model Class
+ */
+ virtual ~Model() = default;
+
+ /**
+ * @brief Get Loss from the previous ran batch of data
+ * @retval loss value
+ */
+ virtual float getLoss() = 0;
+
+ /**
+ * @brief Get Loss from the previous epoch of training data
+ * @retval loss value
+ */
+ virtual float getTrainingLoss() = 0;
+
+ /**
+ * @brief Get Loss from the previous epoch of validation data
+ * @retval loss value
+ */
+ virtual float getValidationLoss() = 0;
+
+ /**
+ * @brief Get Learning rate
+ * @retval Learning rate
+ */
+ virtual float getLearningRate() = 0;
+
+ /**
+ * @brief Create and load the Network with ini configuration file.
+ * @param[in] config config file path
+ * @retval #ML_ERROR_NONE Successful.
+ * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
+ */
+ virtual int loadFromConfig(std::string config) = 0;
+
+ /**
+ * @brief set Property of Network
+ * @param[in] values values of property
+ * @retval #ML_ERROR_NONE Successful.
+ * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
+ */
+ virtual int setProperty(std::vector<std::string> values) = 0;
+
+ /**
+ * @brief Initialize Network. This should be called after set all
+ * hyperparameters.
+ * @retval #ML_ERROR_NONE Successful.
+ * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
+ */
+ virtual int init() = 0;
+
+ /**
+ * @brief save model and training parameters into file
+ */
+ virtual void saveModel() = 0;
+
+ /**
+ * @brief read model and training parameters from file
+ */
+ virtual void readModel() = 0;
+
+ /**
+ * @brief get Epochs
+ * @retval epochs
+ */
+ virtual unsigned int getEpochs() = 0;
+
+ /**
+ * @brief Run Model train
+ * @param[in] values hyper parameters
+ * @retval #ML_ERROR_NONE Successful.
+ * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
+ */
+ virtual int train(std::vector<std::string> values = {}) = 0;
+
+ /**
+ * @brief Run Model train with callback function by user
+ * @param[in] dataset set the dataset
+ * @retval #ML_ERROR_NONE Successful.
+ * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
+ */
+ virtual int setDataset(std::shared_ptr<Dataset> dataset) = 0;
+
+ /**
+ * @brief add layer into neural network model
+ * @retval #ML_ERROR_NONE Successful.
+ * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
+ */
+ virtual int addLayer(std::shared_ptr<Layer> layer) = 0;
+
+ /**
+ * @brief set optimizer for the neural network model
+ * @retval #ML_ERROR_NONE Successful.
+ * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
+ */
+ virtual int setOptimizer(std::shared_ptr<Optimizer> optimizer) = 0;
+
+ /*
+ * @brief get layer by name from neural network model
+ * @param[in] name name of the layer to get
+ * @param[out] layer shared_ptr to hold the layer to get
+ * @retval #ML_ERROR_NONE Successful.
+ * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
+ */
+ virtual int getLayer(const char *name, std::shared_ptr<Layer> *layer) = 0;
+
+ /**
+ * @brief Property Enumeration
+ */
+ enum class PropertyType {
+ loss = 0,
+ loss_type = 1,
+ batch_size = 2,
+ epochs = 3,
+ save_path = 4,
+ continue_train = 5,
+ unknown = 6
+ };
+
+ /**
+ * @brief Print Option when printing model info. The function delegates to the
+ * `print`
+ * @param out std::ostream to print
+ * @param preset preset from `ml_train_summary_type_e`
+ */
+ virtual void printPreset(std::ostream &out, unsigned int preset) = 0;
+};
+
+/**
+ * @brief Factory creator with constructor for optimizer
+ */
+std::unique_ptr<Model>
+createModel(ModelType type, const std::vector<std::string> &properties = {});
+
+} // namespace train
+} // namespace ml
+
+#else
+#error "CPP versions c++14 or over are only supported"
+#endif // __cpluscplus
+#endif // __ML_TRAIN_MODEL_H__
--- /dev/null
+// SPDX-License-Identifier: Apache-2.0
+/**
+ * Copyright (C) 2020 Parichay Kapoor <pk.kapoor@samsung.com>
+ *
+ * @file optimizer.h
+ * @date 14 October 2020
+ * @see https://github.com/nnstreamer/nntrainer
+ * @author Jijoong Moon <jijoong.moon@samsung.com>
+ * @author Parichay Kapoor <pk.kapoor@samsung.com>
+ * @bug No known bugs except for NYI items
+ * @brief This is optimizers interface for c++ API
+ *
+ * @note This is experimental API and not stable.
+ */
+
+#ifndef __ML_TRAIN_OPTIMIZER_H__
+#define __ML_TRAIN_OPTIMIZER_H__
+
+#if __cplusplus >= MIN_CPP_VERSION
+
+#include <string>
+#include <vector>
+
+#include <nntrainer-api-common.h>
+
+namespace ml {
+namespace train {
+
+/**
+ * @brief Enumeration of optimizer type
+ */
+enum class OptimizerType {
+ ADAM = ML_TRAIN_OPTIMIZER_TYPE_ADAM, /** adam */
+ SGD = ML_TRAIN_OPTIMIZER_TYPE_SGD, /** sgd */
+ UNKNOWN = ML_TRAIN_OPTIMIZER_TYPE_UNKNOWN /** unknown */
+};
+
+/**
+ * @class Optimizer Base class for optimizers
+ * @brief Base class for all optimizers
+ */
+class Optimizer {
+public:
+ /**
+ * @brief Destructor of Optimizer Class
+ */
+ virtual ~Optimizer() = default;
+
+ /**
+ * @brief get Optimizer Type
+ * @retval Optimizer type
+ */
+ virtual OptimizerType getType() = 0;
+
+ /**
+ * @brief get Learning Rate
+ * @retval Learning rate
+ */
+ virtual float getLearningRate() = 0;
+
+ /**
+ * @brief get Decay Rate for learning rate decay
+ * @retval decay rate
+ */
+ virtual float getDecayRate() = 0;
+
+ /**
+ * @brief get Decay Steps for learning rate decay
+ * @retval decay steps
+ */
+ virtual float getDecaySteps() = 0;
+
+ /**
+ * @brief set Optimizer Parameters
+ * @param[in] values Optimizer Parameter list
+ * @retval #ML_ERROR_NONE Successful.
+ * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
+ */
+ virtual int setProperty(std::vector<std::string> values) = 0;
+
+ /**
+ * @brief Property Enumeration
+ * learning_rate : float ,
+ * decay_rate : float,
+ * decay_steps : float,
+ * beta1 : float,
+ * beta2 : float,
+ * epsilon : float,
+ */
+ enum class PropertyType {
+ learning_rate = 0,
+ decay_rate = 1,
+ decay_steps = 2,
+ beta1 = 3,
+ beta2 = 4,
+ epsilon = 5,
+ continue_train = 6,
+ unknown = 7,
+ };
+
+ /**
+ * @brief setProperty by PropertyType
+ * @note By passing empty string, this can validate if @a type is valid
+ * @param[in] type property type to be passed
+ * @param[in] value value to be passed, if empty string is passed, do nothing
+ * but throws error when @a type is invalid
+ * @exception exception::not_supported when property type is not valid for
+ * the particular layer
+ * @exception std::invalid_argument invalid argument
+ */
+ virtual void setProperty(const PropertyType type,
+ const std::string &value = "") = 0;
+
+ /**
+ * @brief validate the optimizer
+ */
+ virtual void checkValidation() = 0;
+};
+
+/**
+ * @brief Factory creator with constructor for optimizer
+ */
+std::unique_ptr<Optimizer>
+createOptimizer(OptimizerType type,
+ const std::vector<std::string> &properties = {});
+
+} // namespace train
+} // namespace ml
+
+#else
+#error "CPP versions c++14 or over are only supported"
+#endif // __cpluscplus
+#endif // __ML_TRAIN_OPTIMIZER_H__
--- /dev/null
+ccapi_inc = []
+ccapi_inc += include_directories('include')
+ccapi_inc += include_directories('..')
+
+ccapi_src = []
+ccapi_src += join_paths(meson.current_source_dir(), 'src', 'factory.cpp')
+
+ccapi_headers = []
+ccapi_headers += join_paths(meson.current_source_dir(), 'include', 'dataset.h')
+ccapi_headers += join_paths(meson.current_source_dir(), 'include', 'layer.h')
+ccapi_headers += join_paths(meson.current_source_dir(), 'include', 'model.h')
+ccapi_headers += join_paths(meson.current_source_dir(), 'include', 'optimizer.h')
+ccapi_headers += join_paths(meson.current_source_dir(), '..', 'nntrainer-api-common.h')
+
+ccapi_deps = [
+ nntrainer_dep
+]
+
+shared_library('ccapi-nntrainer',
+ ccapi_src,
+ dependencies: ccapi_deps,
+ include_directories: ccapi_inc,
+ install: true,
+ install_dir: nntrainer_libdir,
+)
+
+nntrainer_ccapi_lib = static_library('ccapi-nntrainer',
+ ccapi_src,
+ dependencies: ccapi_deps,
+ include_directories: ccapi_inc,
+ install: true,
+ install_dir: nntrainer_libdir,
+)
+
+nntrainer_ccapi_dep = declare_dependency(link_with: nntrainer_ccapi_lib,
+ dependencies: ccapi_deps,
+ include_directories: ccapi_inc,
+)
+
+# configure_file(input: 'ccapi-nntrainer.pc.in', output: 'ccapi-nntrainer.pc',
+# install_dir: join_paths(nntrainer_libdir, 'pkgconfig'),
+# configuration: nntrainer_conf
+# )
+
+install_headers(ccapi_headers,
+ subdir: 'nntrainer'
+)
--- /dev/null
+// SPDX-License-Identifier: Apache-2.0
+/**
+ * Copyright (C) 2020 Parichay Kapoor <pk.kapoor@samsung.com>
+ *
+ * @file factory.cpp
+ * @date 14 October 2020
+ * @see https://github.com/nnstreamer/nntrainer
+ * @author Parichay Kapoor <pk.kapoor@samsung.com>
+ * @bug No known bugs except for NYI items
+ * @brief This is implementaion for factory builder interface for c++ API
+ */
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include <databuffer.h>
+#include <databuffer_factory.h>
+#include <layer.h>
+#include <layer_factory.h>
+#include <model.h>
+#include <neuralnet.h>
+#include <nntrainer_error.h>
+#include <optimizer.h>
+#include <optimizer_factory.h>
+
+namespace ml {
+namespace train {
+
+/**
+ * @brief Factory creator with constructor for layer
+ */
+std::unique_ptr<Layer> createLayer(LayerType type,
+ const std::vector<std::string> &properties) {
+ std::unique_ptr<Layer> layer = nntrainer::createLayer(type);
+
+ if (layer->setProperty(properties) != ML_ERROR_NONE)
+ throw std::invalid_argument("Set properties failed for layer");
+
+ return layer;
+}
+
+/**
+ * @brief Factory creator with constructor for optimizer
+ */
+std::unique_ptr<Optimizer>
+createOptimizer(OptimizerType type,
+ const std::vector<std::string> &properties) {
+ std::unique_ptr<Optimizer> optimizer = nntrainer::createOptimizer(type);
+
+ if (optimizer->setProperty(properties) != ML_ERROR_NONE)
+ throw std::invalid_argument("Set properties failed for optimizer");
+
+ return optimizer;
+}
+
+/**
+ * @brief Factory creator with constructor for model
+ */
+std::unique_ptr<Model> createModel(ModelType type,
+ const std::vector<std::string> &properties) {
+ std::unique_ptr<Model> model;
+ switch (type) {
+ case ModelType::NEURAL_NET:
+ model = std::make_unique<nntrainer::NeuralNetwork>();
+ break;
+ default:
+ throw std::invalid_argument("This type of model is not yet supported");
+ }
+
+ if (model->setProperty(properties) != ML_ERROR_NONE)
+ throw std::invalid_argument("Set properties failed for model");
+
+ return model;
+}
+
+/**
+ * @brief Factory creator with constructor for dataset
+ */
+std::unique_ptr<Dataset>
+createDataset(DatasetType type, const std::vector<std::string> &properties) {
+ std::unique_ptr<Dataset> dataset = nntrainer::createDataBuffer(type);
+
+ if (dataset->setProperty(properties) != ML_ERROR_NONE)
+ throw std::invalid_argument("Set properties failed for dataset");
+
+ return dataset;
+}
+
+} // namespace train
+} // namespace ml
if get_option('enable-capi')
subdir('capi')
endif
+
+
+if get_option('enable-ccapi')
+ subdir('ccapi')
+endif
#ifndef __TIZEN_MACHINELEARNING_NNTRAINER_API_COMMON_H__
#define __TIZEN_MACHINELEARNING_NNTRAINER_API_COMMON_H__
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
/**
* @addtogroup CAPI_ML_NNTRAINER_TRAIN_MODULE
* @{
*/
/**
+ * @brief Enumeration for the neural network layer type of NNTrainer.
+ * @since_tizen 6.0
+ */
+typedef enum {
+ ML_TRAIN_LAYER_TYPE_INPUT = 0, /**< Input Layer */
+ ML_TRAIN_LAYER_TYPE_FC, /**< Fully Connected Layer */
+ ML_TRAIN_LAYER_TYPE_UNKNOWN = 999 /**< Unknown Layer */
+} ml_train_layer_type_e;
+
+/**
+ * @brief Enumeration for the neural network optimizer type of NNTrainer.
+ * @since_tizen 6.0
+ */
+typedef enum {
+ ML_TRAIN_OPTIMIZER_TYPE_ADAM = 0, /**< Adam Optimizer */
+ ML_TRAIN_OPTIMIZER_TYPE_SGD, /**< Stochastic Gradient Descent Optimizer */
+ ML_TRAIN_OPTIMIZER_TYPE_UNKNOWN = 999 /**< Unknown Optimizer */
+} ml_train_optimizer_type_e;
+
+/**
* @brief Dataset generator callback function for train/valid/test data.
*
* @details The user of the API must provide this callback function to supply
/**
* @}
*/
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
#endif /* __TIZEN_MACHINELEARNING_NNTRAINER_API_COMMON_H__ */
NNTRAINER_INCLUDES := $(NNTRAINER_ROOT)/nntrainer/include \
$(NNTRAINER_ROOT)/api \
+ $(NNTRAINER_ROOT)/api/ccapi/include \
$(NNTRAINER_ROOT)/api/capi/include/platform
INIPARSER_SRCS := $(INIPARSER_ROOT)/src/iniparser.c \
LOCAL_MODULE := nntrainer
LOCAL_SRC_FILES := $(NNTRAINER_SRCS) $(INIPARSER_SRCS)
-LOCAL_C_INCLUDES += $(NNTRAINER_INCLUDES) $(INIPARSER_INCLUDES)
+LOCAL_C_INCLUDES := $(NNTRAINER_INCLUDES) $(INIPARSER_INCLUDES)
include $(BUILD_SHARED_LIBRARY)
CAPI_NNTRAINER_INCLUDES := $(NNTRAINER_ROOT)/nntrainer/include \
$(NNTRAINER_ROOT)/api \
+ $(NNTRAINER_ROOT)/api/ccapi/include \
$(NNTRAINER_ROOT)/api/capi/include \
$(NNTRAINER_ROOT)/api/capi/include/platform
LOCAL_MODULE := capi-nntrainer
LOCAL_SRC_FILES := $(CAPI_NNTRAINER_SRCS)
-LOCAL_C_INCLUDES += $(CAPI_NNTRAINER_INCLUDES)
+LOCAL_C_INCLUDES := $(CAPI_NNTRAINER_INCLUDES)
+
+include $(BUILD_SHARED_LIBRARY)
+
+include $(CLEAR_VARS)
+
+CCAPI_NNTRAINER_INCLUDES := $(NNTRAINER_ROOT)/nntrainer/include \
+ $(NNTRAINER_ROOT)/api \
+ $(NNTRAINER_ROOT)/api/ccapi/include
+
+LOCAL_SHARED_LIBRARIES := nntrainer
+
+LOCAL_ARM_NEON := true
+LOCAL_CFLAGS += -pthread -fopenmp -fexceptions
+LOCAL_CXXFLAGS += -std=c++14 -frtti -fexceptions
+LOCAL_LDFLAGS += -fuse-ld=bfd -fopenmp
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_LDLIBS := -llog
+
+LOCAL_MODULE := ccapi-nntrainer
+LOCAL_SRC_FILES :=
+LOCAL_C_INCLUDES := $(CCAPI_NNTRAINER_INCLUDES)
include $(BUILD_SHARED_LIBRARY)
'cpp_std=c++14'
]
)
+add_project_arguments('-DMIN_CPP_VERSION=201402', language:['c','cpp'])
cc = meson.get_compiler('c')
cxx = meson.get_compiler('cpp')
option('install-app', type: 'boolean', value: true)
option('use_gym', type: 'boolean', value: false)
option('enable-capi', type: 'boolean', value: true)
+option('enable-ccapi', type: 'boolean', value: true)
option('enable-test', type: 'boolean', value: true)
option('enable-logging', type: 'boolean', value: true)
option('enable-tizen-feature-check', type: 'boolean', value: true)
#define __ACTIVATION_LAYER_H__
#ifdef __cplusplus
-#include <layer.h>
+#include <layer_internal.h>
#include <tensor.h>
namespace nntrainer {
#define __ADAM_H__
#ifdef __cplusplus
-#include <optimizer.h>
+#include <optimizer_internal.h>
namespace nntrainer {
template <typename... Args>
Adam(float lr = 0.001f, double b1 = 0.9f, double b2 = 0.999f,
double ep = 1.0e-7f, Args... args) :
- Optimizer(OptType::adam, lr, args...),
+ Optimizer(OptType::ADAM, lr, args...),
beta1(b1),
beta2(b2),
epsilon(ep) {}
#define __ADDITION_LAYER_H__
#ifdef __cplusplus
-#include <layer.h>
+#include <layer_internal.h>
#include <tensor.h>
namespace nntrainer {
#include <functional>
#include <vector>
-#include <layer.h>
+#include <layer_internal.h>
#include <tensor.h>
namespace nntrainer {
#define __CONV2D_LAYER_H_
#ifdef __cplusplus
-#include <layer.h>
+#include <layer_internal.h>
#include <tensor.h>
namespace nntrainer {
#include <thread>
#include <vector>
+#include <dataset.h>
#include <tensor_dim.h>
namespace nntrainer {
/**
- * @brief Enumeration of buffer type
- * 0. BUF_TRAIN ( Buffer for training )
- * 1. BUF_VAL ( Buffer for validation )
- * 2. BUF_TEST ( Buffer for test )
- * 3. BUF_UNKNOWN
- */
-typedef enum { BUF_TRAIN, BUF_VAL, BUF_TEST, BUF_UNKNOWN } BufferType;
-
-/**
* @brief Enumeration of data type
* 0. DATA_TRAIN ( Data for training )
* 1. DATA_VAL ( Data for validation )
} DataType;
/**
- * @brief Enumeration for data buffer type
- * 0. DATA_BUFFER_GENERATOR
- * 1. DATA_BUFFER_FILE
- * 2. DATA_BUFFER_UNKNOWN
+ * @brief Aliasing from ccapi ml::train
*/
-typedef enum {
- DATA_BUFFER_GENERATOR,
- DATA_BUFFER_FILE,
- DATA_BUFFER_UNKNOWN
-} DataBufferType;
+using DataBufferType = ml::train::DatasetType;
+using BufferType = ml::train::BufferType;
+using datagen_cb = ml::train::datagen_cb;
/**
* @class DataBuffer Data Buffers
* @brief Data Buffer for read and manage data
*/
-class DataBuffer {
+class DataBuffer : public ml::train::Dataset {
public:
/**
* @brief Create Buffer
DataBuffer(DataBufferType type);
/**
- * @brief Destructor
- */
- virtual ~DataBuffer(){};
-
- /**
* @brief Initialize Buffer with data buffer private variables
* @retval #ML_ERROR_NONE Successful.
* @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
*/
int setProperty(std::vector<void *> values);
+ /**
+ * @brief set function pointer for each type
+ * @param[in] type Buffer Type
+ * @param[in] call back function pointer
+ * @retval #ML_ERROR_NONE Successful.
+ * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
+ */
+ virtual int setFunc(BufferType type, datagen_cb func);
+
enum class PropertyType {
train_data = 0,
val_data = 1,
--- /dev/null
+// SPDX-License-Identifier: Apache-2.0
+/**
+ * Copyright (C) 2020 Parichay Kapoor <pk.kapoor@samsung.com>
+ *
+ * @file databuffer_factory.h
+ * @date 19 October 2020
+ * @see https://github.com/nnstreamer/nntrainer
+ * @author Parichay Kapoor <pk.kapoor@samsung.com>
+ * @bug No known bugs except for NYI items
+ * @brief This is the layer factory.
+ */
+
+#ifndef __DATABUFFER_FACTORY_H__
+#define __DATABUFFER_FACTORY_H__
+#ifdef __cplusplus
+
+#include <databuffer.h>
+#include <databuffer_file.h>
+#include <databuffer_func.h>
+
+namespace nntrainer {
+
+/**
+ * @brief Factory creator with constructor
+ */
+std::unique_ptr<DataBuffer> createDataBuffer(DataBufferType type) {
+ switch (type) {
+ case DataBufferType::GENERATOR:
+ return std::make_unique<DataBufferFromCallback>();
+ case DataBufferType::FILE:
+ return std::make_unique<DataBufferFromDataFile>();
+ case DataBufferType::UNKNOWN:
+ /** fallthrough intended */
+ default:
+ throw std::invalid_argument("Unknown type for the dataset");
+ }
+}
+
+} /* namespace nntrainer */
+
+#endif /* __cplusplus */
+#endif /* __DATABUFFER_FACTORY_H__ */
/**
* @brief Constructor
*/
- DataBufferFromDataFile() : DataBuffer(DataBufferType::DATA_BUFFER_FILE){};
+ DataBufferFromDataFile() : DataBuffer(DataBufferType::FILE) {}
/**
* @brief Destructor
*/
- ~DataBufferFromDataFile(){};
+ ~DataBufferFromDataFile() = default;
/**
* @brief Initialize Buffer with data buffer private variables
namespace nntrainer {
/**
- * @brief Dataset generator callback type declaration
- */
-typedef std::function<std::remove_pointer<ml_train_datagen_cb>::type>
- datagen_cb;
-
-/**
* @class DataBufferFromCallback Data Buffer from callback given by user
* @brief Data Buffer from callback function
*/
/**
* @brief Constructor
*/
- DataBufferFromCallback() :
- DataBuffer(DataBufferType::DATA_BUFFER_GENERATOR){};
+ DataBufferFromCallback() : DataBuffer(DataBufferType::GENERATOR){};
/**
* @brief Destructor
#define NN_EXCEPTION_NOTI(val) \
do { \
switch (type) { \
- case BUF_TRAIN: { \
+ case BufferType::BUF_TRAIN: { \
std::lock_guard<std::mutex> lgtrain(readyTrainData); \
trainReadyFlag = val; \
cv_train.notify_all(); \
} break; \
- case BUF_VAL: { \
+ case BufferType::BUF_VAL: { \
std::lock_guard<std::mutex> lgval(readyValData); \
valReadyFlag = val; \
cv_val.notify_all(); \
} break; \
- case BUF_TEST: { \
+ case BufferType::BUF_TEST: { \
std::lock_guard<std::mutex> lgtest(readyTestData); \
testReadyFlag = val; \
cv_test.notify_all(); \
#define __FC_LAYER_H__
#ifdef __cplusplus
-#include <layer.h>
+#include <layer_internal.h>
#include <tensor.h>
namespace nntrainer {
#define __FLATTEN_LAYER_H__
#ifdef __cplusplus
-#include <layer.h>
+#include <layer_internal.h>
#include <tensor.h>
namespace nntrainer {
#define __INPUT_LAYER_H__
#ifdef __cplusplus
-#include <layer.h>
+#include <layer_internal.h>
#include <tensor.h>
namespace nntrainer {
/**
* Copyright (C) 2020 Parichay Kapoor <pk.kapoor@samsung.com>
*
- * @file optimizer_factory.h
+ * @file layer_factory.h
* @date 7 October 2020
* @see https://github.com/nnstreamer/nntrainer
* @author Parichay Kapoor <pk.kapoor@samsung.com>
#include <fc_layer.h>
#include <flatten_layer.h>
#include <input_layer.h>
-#include <layer.h>
+#include <layer_internal.h>
#include <loss_layer.h>
#include <pooling2d_layer.h>
namespace nntrainer {
/**
- * @brief Factory creator with copy constructor
- */
-// std::unique_ptr<Optimizer> createOptimizer(OptType type, const Optimizer
-// &opt);
-
-/**
* @brief Factory creator with constructor
*/
template <typename... Args>
* See the License for the specific language governing permissions and
* limitations under the License.
*
- * @file layer.h
+ * @file layer_internal.h
* @date 04 December 2019
* @brief This is Layer classes of Neural Network
* @see https://github.com/nnstreamer/nntrainer
* @bug No known bugs except for NYI items
*
*/
-#ifndef __LAYERS_H__
-#define __LAYERS_H__
+#ifndef __LAYER_H__
+#define __LAYER_H__
#ifdef __cplusplus
#include <memory>
#include <set>
#include <vector>
-#include <optimizer.h>
+#include <layer.h>
+#include <optimizer_internal.h>
#include <tensor.h>
#include <tensor_dim.h>
#include <weight.h>
ACT_UNKNOWN /** unknown */
};
-/**
- * @brief Enumeration of layer type
- */
-enum class LayerType {
- LAYER_IN, /** Input Layer type */
- LAYER_FC, /** Fully Connected Layer type */
- LAYER_BN, /** Batch Normalization Layer type */
- LAYER_CONV2D, /** Convolution 2D Layer type */
- LAYER_POOLING2D, /** Pooling 2D Layer type */
- LAYER_FLATTEN, /** Flatten Layer type */
- LAYER_ACTIVATION, /** Loss Layer type */
- LAYER_ADDITION, /** Activation Layer type */
- LAYER_LOSS, /** Addition Layer type */
- LAYER_UNKNOWN /** Unknown */
-};
+using LayerType = ml::train::LayerType;
/**
* @class Layer Base class for layers
* @brief Base class for all layers
*/
-class Layer {
+class Layer : public ml::train::Layer {
/** model classes can call private methods which arent exposed to public */
friend class NeuralNetwork;
friend class ModelLoader;
public:
- enum class PrintPreset {
- PRINT_NONE = 0, /**< Print nothing */
- PRINT_SUMMARY, /**< Print preset including summary information */
- PRINT_SUMMARY_META, /**< Print summary preset that includes meta information
- */
- PRINT_ALL /**< Print everything possible */
- };
-
/**
* @brief Constructor of Layer Class
*/
num_outputs(1) {}
/**
- * @brief Destructor of Layer Class
- */
- virtual ~Layer(){};
-
- /**
* @brief Move constructor of Layer.
* @param[in] Layer &&
*/
int setProperty(std::vector<std::string> values);
/**
- * @brief Property Enumeration
- * 0. input shape : string
- * 1. bias zero : bool
- * 2. normalization : bool
- * 3. standardization : bool
- * 4. activation : string (type)
- * 5. epsilon : float
- * 6. weight_regularizer : string (type)
- * 7. weight_regularizer_constant : float
- * 8. unit : int
- * 9. weight_initializer : string (type)
- * 10. filter_size : int
- * 11. kernel_size : ( n , m )
- * 12. stride : ( n, m )
- * 13. padding : ( n, m )
- * 14. pool_size : ( n,m )
- * 15. pooling : max, average, global_max, global_average
- * 16. flatten : bool
- * 17. name : string (type)
- * 18. num_inputs : unsigned int (minimum 1)
- * 19. num_outputs : unsigned int (minimum 1)
- * 20. momentum : float,
- * 21. moving_mean_initializer : string (type),
- * 22. moving_variance_initializer : string (type),
- * 23. gamma_initializer : string (type),
- * 24. beta_initializer" : string (type)
- */
- enum class PropertyType {
- input_shape = 0,
- normalization = 1,
- standardization = 2,
- activation = 3,
- epsilon = 4,
- weight_regularizer = 5,
- weight_regularizer_constant = 6,
- unit = 7,
- weight_initializer = 8,
- bias_initializer = 9,
- filters = 10,
- kernel_size = 11,
- stride = 12,
- padding = 13,
- pool_size = 14,
- pooling = 15,
- flatten = 16,
- name = 17,
- num_inputs = 18,
- num_outputs = 19,
- momentum = 20,
- moving_mean_initializer = 21,
- moving_variance_initializer = 22,
- gamma_initializer = 23,
- beta_initializer = 24,
- unknown
- };
-
- /**
* @brief setProperty by PropertyType
* @note By passing empty string, this can validate if @a type is valid
* @param[in] type property type to be passed
*/
unsigned int getNumWeights() { return num_weights; }
-#if defined(ENABLE_TEST)
/**
* @brief Set the batch for the layer
* @param batch Batch value to be set
virtual void setBatch(unsigned int batch);
protected:
-#else
-protected:
- /**
- * @brief Set the batch for the layer
- * @param batch Batch value to be set
- * @todo Make this private. Only model should be able to do this.
- */
- virtual void setBatch(unsigned int batch);
-#endif
-
/**
* @brief Print Options when printing layer info
*/
} // namespace nntrainer
#endif /* __cplusplus */
-#endif /* __LAYERS_H__ */
+#endif /* __LAYER_H__ */
#define __LOSS_LAYER_H__
#ifdef __cplusplus
-#include <layer.h>
+#include <layer_internal.h>
#include <tensor.h>
namespace nntrainer {
#define __NEURALNET_H__
#ifdef __cplusplus
+#include <memory>
#include <vector>
#include <activation_layer.h>
#include <fc_layer.h>
#include <flatten_layer.h>
#include <input_layer.h>
-#include <layer.h>
+#include <layer_internal.h>
#include <loss_layer.h>
#include <ml-api-common.h>
-#include <nntrainer-api-common.h>
-#include <optimizer.h>
+#include <optimizer_internal.h>
#include <pooling2d_layer.h>
#include <tensor.h>
+#include <model.h>
+#include <nntrainer-api-common.h>
+
namespace nntrainer {
/**
* @brief Enumeration of Network Type
*/
-enum class NetType {
- NET_KNN, /** k Nearest Neighbor */
- NET_REG, /** Logistic Regression */
- NET_NEU, /** Neural Network */
- NET_UNKNOWN /** Unknown */
-};
+using NetType = ml::train::ModelType;
/**
* @brief Statistics from running or training a model
* @class NeuralNetwork Class
* @brief NeuralNetwork Class which has Network Configuration & Layers
*/
-class NeuralNetwork {
+class NeuralNetwork : public ml::train::Model {
friend class ModelLoader; /** access private members of ModelLoader */
public:
loss(0.0f),
loss_type(LossType::LOSS_UNKNOWN),
weight_initializer(WeightInitializer::WEIGHT_UNKNOWN),
- net_type(NetType::NET_UNKNOWN),
- data_buffer(NULL),
+ net_type(NetType::UNKNOWN),
+ data_buffer(nullptr),
continue_train(false),
iter(0),
initialized(false),
/**
* @brief Run NeuralNetwork train with callback function by user
- * @param[in] train_func callback function to get train data. This provides
- * batch size data per every call.
- * @param[in] val_func callback function to get validation data. This provides
- * batch size data per every call.
- * @param[in] test_func callback function to get test data. This provides
- * batch size data per every call.
- * @param[in] values hyper-parameter list
+ * @param[in] dataset set the dataset
+ * @retval #ML_ERROR_NONE Successful.
+ * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
+ */
+ int setDataset(std::shared_ptr<ml::train::Dataset> dataset) {
+ return setDataBuffer(std::static_pointer_cast<DataBuffer>(dataset));
+ }
+
+ /**
+ * @brief Run NeuralNetwork train with callback function by user
+ * @param[in] databuffer set the databuffer
* @retval #ML_ERROR_NONE Successful.
* @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
*/
* @retval #ML_ERROR_NONE Successful.
* @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
*/
+ int addLayer(std::shared_ptr<ml::train::Layer> layer) {
+ return addLayer(std::static_pointer_cast<Layer>(layer));
+ }
+
+ /**
+ * @brief add layer into neural network model
+ * @retval #ML_ERROR_NONE Successful.
+ * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
+ */
int addLayer(NodeType layer);
/**
* @retval #ML_ERROR_NONE Successful.
* @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
*/
- int setOptimizer(std::shared_ptr<Optimizer> optimizer);
+ int setOptimizer(std::shared_ptr<ml::train::Optimizer> optimizer);
+
+ /*
+ * @brief get layer by name from neural network model
+ * @param[in] name name of the layer to get
+ * @param[out] layer shared_ptr to hold the layer to get
+ * @retval #ML_ERROR_NONE Successful.
+ * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
+ */
+ int getLayer(const char *name, std::shared_ptr<ml::train::Layer> *layer);
/*
* @brief get layer by name from neural network model
*/
int setLoss(LossType loss);
- enum class PropertyType {
- loss = 0,
- loss_type = 1,
- batch_size = 2,
- epochs = 3,
- save_path = 4,
- continue_train = 5,
- unknown = 6
- };
-
/**
* @brief Print Option when printing model info. The function delegates to the
* `print`
#ifdef __cplusplus
#include <adam.h>
-#include <optimizer.h>
+#include <optimizer_internal.h>
#include <sgd.h>
namespace nntrainer {
template <typename... Args>
std::unique_ptr<Optimizer> createOptimizer(OptType type, Args... args) {
switch (type) {
- case OptType::sgd:
+ case OptType::SGD:
return std::make_unique<SGD>(args...);
- case OptType::adam:
+ case OptType::ADAM:
return std::make_unique<Adam>(args...);
- case OptType::unknown:
+ case OptType::UNKNOWN:
/** fallthrough intended */
default:
throw std::invalid_argument("Unknown type for the optimizer");
* See the License for the specific language governing permissions and
* limitations under the License.
*
- * @file optimizer.h
+ * @file optimizer_internal.h
* @date 08 April 2020
* @brief This is Optimizer classes of Neural Network
* @see https://github.com/nnstreamer/nntrainer
#ifdef __cplusplus
#include <memory>
+#include <optimizer.h>
#include <tensor.h>
#include <weight.h>
* 1. ADAM
* 2. Unknown
*/
-enum class OptType { sgd = 0, adam = 1, unknown = 2 };
+using OptType = ml::train::OptimizerType;
-class Optimizer {
+/**
+ * @class Optimizer Base class for optimizers
+ * @brief Base class for all optimizers
+ */
+class Optimizer : public ml::train::Optimizer {
/** Allow layer to initialize optimizer with itself */
friend class Layer;
}
/**
- * @brief Destructor of Optimizer Class
- */
- virtual ~Optimizer() {}
-
- /**
* @brief copy constructor
* @param[in] rhs Optimizer to be copied
*/
int iteration);
/**
- * @brief Property Enumeration
- * learning_rate : float ,
- * decay_rate : float,
- * decay_steps : float,
- * beta1 : float,
- * beta2 : float,
- * epsilon : float,
- */
- enum class PropertyType {
- learning_rate = 0,
- decay_rate = 1,
- decay_steps = 2,
- beta1 = 3,
- beta2 = 4,
- epsilon = 5,
- continue_train = 6,
- unknown = 7,
- };
-
- /**
* @brief Read Training optimizer paramters from file
* @param[in] file input stream file
*/
#define __POOLING2D_LAYER_H__
#ifdef __cplusplus
-#include <layer.h>
+#include <layer_internal.h>
#include <tensor.h>
#include <vector>
#define __SGD_H__
#ifdef __cplusplus
-#include <optimizer.h>
+#include <optimizer_internal.h>
namespace nntrainer {
*/
template <typename... Args>
SGD(float lr = 0.0001f, Args... args) :
- Optimizer(OptType::sgd, lr, args...) {}
+ Optimizer(OptType::SGD, lr, args...) {}
/**
* @copydoc apply_gradient(Weight &weight, int tensor_idx, double updated_lr,
nntrainer_inc = [
include_directories('./include'),
- include_directories('../api')
+ include_directories('../api'),
+ include_directories('../api/ccapi/include')
]
# pc file is not present for 'ml-api-common' yet
'include/bn_layer.h',
'include/conv2d_layer.h',
'include/databuffer.h',
+ 'include/databuffer_factory.h',
'include/databuffer_file.h',
'include/databuffer_func.h',
'include/databuffer_util.h',
'include/fc_layer.h',
'include/flatten_layer.h',
'include/input_layer.h',
- 'include/layer.h',
+ 'include/layer_internal.h',
'include/layer_factory.h',
'include/lazy_tensor.h',
'include/loss_layer.h',
'include/neuralnet.h',
'include/nntrainer_log.h',
'include/nntrainer_logger.h',
- 'include/optimizer.h',
+ 'include/optimizer_internal.h',
'include/parse_util.h',
'include/pooling2d_layer.h',
'include/sgd.h',
'include/util_func.h',
'include/weight.h',
'include/optimizer_factory.h',
- '../api/nntrainer-api-common.h'
]
# Build libraries
#include <activation_layer.h>
#include <blas_interface.h>
-#include <layer.h>
+#include <layer_internal.h>
#include <lazy_tensor.h>
#include <nntrainer_error.h>
#include <nntrainer_log.h>
-#include <optimizer.h>
+#include <optimizer_internal.h>
#include <parse_util.h>
#include <tensor.h>
#include <util_func.h>
*/
#include <addition_layer.h>
-#include <layer.h>
+#include <layer_internal.h>
#include <nntrainer_error.h>
#include <nntrainer_log.h>
#include <parse_util.h>
#include <assert.h>
#include <bn_layer.h>
-#include <layer.h>
+#include <layer_internal.h>
#include <lazy_tensor.h>
#include <nntrainer_error.h>
#include <nntrainer_log.h>
#include <blas_interface.h>
#include <conv2d_layer.h>
-#include <layer.h>
+#include <layer_internal.h>
#include <lazy_tensor.h>
#include <nntrainer_error.h>
#include <nntrainer_log.h>
int DataBuffer::run(BufferType type) {
int status = ML_ERROR_NONE;
switch (type) {
- case BUF_TRAIN:
+ case BufferType::BUF_TRAIN:
if (trainReadyFlag == DATA_ERROR)
return ML_ERROR_INVALID_PARAMETER;
return ML_ERROR_INVALID_PARAMETER;
}
break;
- case BUF_VAL:
+ case BufferType::BUF_VAL:
if (valReadyFlag == DATA_ERROR)
return ML_ERROR_INVALID_PARAMETER;
if (validation[DATA_VAL]) {
return ML_ERROR_INVALID_PARAMETER;
}
break;
- case BUF_TEST:
+ case BufferType::BUF_TEST:
if (testReadyFlag == DATA_ERROR)
return ML_ERROR_INVALID_PARAMETER;
int status = ML_ERROR_NONE;
NN_EXCEPTION_NOTI(DATA_NOT_READY);
switch (type) {
- case BUF_TRAIN: {
+ case BufferType::BUF_TRAIN: {
train_running = false;
if (validation[DATA_TRAIN] && true == train_thread.joinable())
train_thread.join();
this->cur_train_bufsize = 0;
this->rest_train = max_train;
} break;
- case BUF_VAL: {
+ case BufferType::BUF_VAL: {
val_running = false;
if (validation[DATA_VAL] && true == val_thread.joinable())
val_thread.join();
this->cur_val_bufsize = 0;
this->rest_val = max_val;
} break;
- case BUF_TEST: {
+ case BufferType::BUF_TEST: {
test_running = false;
if (validation[DATA_TEST] && true == test_thread.joinable())
test_thread.join();
unsigned int i;
int status = ML_ERROR_NONE;
- for (i = BUF_TRAIN; i <= BUF_TEST; ++i) {
+ for (i = (int)BufferType::BUF_TRAIN; i <= (int)BufferType::BUF_TEST; ++i) {
BufferType type = static_cast<BufferType>(i);
status = this->clear(type);
/// facade that wait for the databuffer to be filled and pass it to outparam
/// note that batch_size is passed as an argument because it can vary by
- /// BUF_TYPE later...
+ /// BufferType::BUF_TYPE later...
auto fill_out_params =
[&](std::mutex &ready_mutex, std::condition_variable &cv, DataStatus &flag,
QueueType &data_q, QueueType &label_q, const unsigned int batch_size,
};
switch (type) {
- case BUF_TRAIN:
+ case BufferType::BUF_TRAIN:
if (!fill_out_params(readyTrainData, cv_train, trainReadyFlag, train_data,
train_data_label, batch_size, cur_train_bufsize))
return false;
break;
- case BUF_VAL:
+ case BufferType::BUF_VAL:
if (!fill_out_params(readyValData, cv_val, valReadyFlag, val_data,
val_data_label, batch_size, cur_val_bufsize))
return false;
break;
- case BUF_TEST:
+ case BufferType::BUF_TEST:
if (!fill_out_params(readyTestData, cv_test, testReadyFlag, test_data,
test_data_label, batch_size, cur_test_bufsize))
return false;
int barWidth = 20;
float max_size = max_train;
switch (type) {
- case BUF_TRAIN:
+ case BufferType::BUF_TRAIN:
max_size = max_train;
break;
- case BUF_VAL:
+ case BufferType::BUF_VAL:
max_size = max_val;
break;
- case BUF_TEST:
+ case BufferType::BUF_TEST:
max_size = max_test;
break;
default:
return status;
}
+int DataBuffer::setFunc(BufferType type, datagen_cb func) {
+ return ML_ERROR_NOT_SUPPORTED;
+}
+
} /* namespace nntrainer */
std::vector<std::vector<float>> *datalabel = NULL;
std::ifstream file;
switch (type) {
- case BUF_TRAIN: {
+ case BufferType::BUF_TRAIN: {
max_size = max_train;
buf_size = train_bufsize;
rest_size = &rest_train;
readyTrainData.unlock();
} break;
- case BUF_VAL: {
+ case BufferType::BUF_VAL: {
max_size = max_val;
buf_size = val_bufsize;
rest_size = &rest_val;
readyValData.unlock();
} break;
- case BUF_TEST: {
+ case BufferType::BUF_TEST: {
max_size = max_test;
buf_size = test_bufsize;
rest_size = &rest_test;
std::string &value) {
int status = ML_ERROR_NONE;
- if (data_buffer_type != DATA_BUFFER_FILE)
+ if (data_buffer_type != DataBufferType::FILE)
return ML_ERROR_INVALID_PARAMETER;
switch (type) {
int status = ML_ERROR_NONE;
switch (type) {
- case BUF_TRAIN:
+ case BufferType::BUF_TRAIN:
if (!func)
return ML_ERROR_INVALID_PARAMETER;
callback_train = func;
if (func)
validation[0] = true;
break;
- case BUF_VAL:
+ case BufferType::BUF_VAL:
callback_val = func;
if (func)
validation[1] = true;
break;
- case BUF_TEST:
+ case BufferType::BUF_TEST:
callback_test = func;
if (func)
validation[2] = true;
datagen_cb callback;
switch (type) {
- case BUF_TRAIN: {
+ case BufferType::BUF_TRAIN: {
buf_size = train_bufsize;
cur_size = &cur_train_bufsize;
running = &train_running;
datalabel = &train_data_label;
callback = callback_train;
} break;
- case BUF_VAL: {
+ case BufferType::BUF_VAL: {
buf_size = val_bufsize;
cur_size = &cur_val_bufsize;
running = &val_running;
datalabel = &val_data_label;
callback = callback_val;
} break;
- case BUF_TEST: {
+ case BufferType::BUF_TEST: {
buf_size = test_bufsize;
cur_size = &cur_test_bufsize;
running = &test_running;
*/
#include <fc_layer.h>
-#include <layer.h>
+#include <layer_internal.h>
#include <lazy_tensor.h>
#include <nntrainer_error.h>
#include <nntrainer_log.h>
*/
#include <flatten_layer.h>
-#include <layer.h>
+#include <layer_internal.h>
#include <nntrainer_error.h>
#include <nntrainer_log.h>
#include <parse_util.h>
*
*/
-#include <layer.h>
+#include <layer_internal.h>
#include <nntrainer_error.h>
#include <nntrainer_log.h>
#include <optimizer_factory.h>
#include <activation_layer.h>
#include <cmath>
-#include <layer.h>
+#include <layer_internal.h>
#include <lazy_tensor.h>
#include <loss_layer.h>
#include <nntrainer_error.h>
iniparser_getstring(ini, "Model:Optimizer", "adam"), TOKEN_OPT);
try {
- model.opt = createOptimizer(opt_type);
+ model.opt = nntrainer::createOptimizer(opt_type);
} catch (std::exception &e) {
ml_loge("%s %s", typeid(e).name(), e.what());
return ML_ERROR_INVALID_PARAMETER;
ini, "Model:Decay_rate",
std::to_string(model.opt->getDecayRate()).c_str()))});
- if (model.opt->getType() == OptType::adam) {
+ if (model.opt->getType() == OptType::ADAM) {
std::shared_ptr<Adam> opt_adam = std::static_pointer_cast<Adam>(model.opt);
optimizer_prop.push_back(
LayerType layer_type = (LayerType)parseType(layer_type_str, TOKEN_LAYER);
try {
- layer = createLayer(layer_type);
+ layer = nntrainer::createLayer(layer_type);
} catch (const std::exception &e) {
ml_loge("%s %s", typeid(e).name(), e.what());
status = ML_ERROR_INVALID_PARAMETER;
status = loss_layer->setLoss(updated_loss_type);
NN_RETURN_STATUS();
- addLayer(loss_layer);
+ addLayer(std::static_pointer_cast<Layer>(loss_layer));
return status;
}
for (unsigned int epoch_idx = 1; epoch_idx <= epochs; ++epoch_idx) {
training.loss = 0.0f;
- status = data_buffer->run(nntrainer::BUF_TRAIN);
+ status = data_buffer->run(nntrainer::BufferType::BUF_TRAIN);
if (status != ML_ERROR_NONE) {
- data_buffer->clear(BUF_TRAIN);
+ data_buffer->clear(BufferType::BUF_TRAIN);
return status;
}
- if (data_buffer->getValidation()[nntrainer::BUF_TEST]) {
- status = data_buffer->run(nntrainer::BUF_TEST);
+ if (data_buffer->getValidation()[(int)nntrainer::BufferType::BUF_TEST]) {
+ status = data_buffer->run(nntrainer::BufferType::BUF_TEST);
if (status != ML_ERROR_NONE) {
- data_buffer->clear(BUF_TEST);
+ data_buffer->clear(BufferType::BUF_TEST);
return status;
}
}
sharedTensor label = MAKE_SHARED_TENSOR(getOutputDimension());
while (true) {
- if (data_buffer->getDataFromBuffer(nntrainer::BUF_TRAIN, in->getData(),
- label->getData())) {
+ if (data_buffer->getDataFromBuffer(nntrainer::BufferType::BUF_TRAIN,
+ in->getData(), label->getData())) {
try {
backwarding(in, label, iter++);
} catch (...) {
- data_buffer->clear(nntrainer::BUF_TRAIN);
+ data_buffer->clear(nntrainer::BufferType::BUF_TRAIN);
ml_loge("Error: training error in #%d/%d.", epoch_idx, epochs);
std::rethrow_exception(std::current_exception());
}
std::cout << "#" << epoch_idx << "/" << epochs;
- data_buffer->displayProgress(count++, nntrainer::BUF_TRAIN, getLoss());
+ data_buffer->displayProgress(count++, nntrainer::BufferType::BUF_TRAIN,
+ getLoss());
training.loss += getLoss();
} else {
- data_buffer->clear(nntrainer::BUF_TRAIN);
+ data_buffer->clear(nntrainer::BufferType::BUF_TRAIN);
break;
}
}
std::cout << "#" << epoch_idx << "/" << epochs
<< " - Training Loss: " << training.loss;
- if (data_buffer->getValidation()[nntrainer::BUF_VAL]) {
+ if (data_buffer->getValidation()[(int)nntrainer::BufferType::BUF_VAL]) {
int right = 0;
validation.loss = 0.0f;
unsigned int tcases = 0;
- status = data_buffer->run(nntrainer::BUF_VAL);
+ status = data_buffer->run(nntrainer::BufferType::BUF_VAL);
if (status != ML_ERROR_NONE) {
- data_buffer->clear(BUF_VAL);
+ data_buffer->clear(BufferType::BUF_VAL);
return status;
}
while (true) {
- if (data_buffer->getDataFromBuffer(nntrainer::BUF_VAL, in->getData(),
- label->getData())) {
+ if (data_buffer->getDataFromBuffer(nntrainer::BufferType::BUF_VAL,
+ in->getData(), label->getData())) {
sharedConstTensor Y = forwarding(in, label);
auto model_out = Y->argmax();
auto label_out = label->argmax();
validation.loss += getLoss();
tcases++;
} else {
- data_buffer->clear(nntrainer::BUF_VAL);
+ data_buffer->clear(nntrainer::BufferType::BUF_VAL);
break;
}
}
return status;
}
-int NeuralNetwork::setOptimizer(std::shared_ptr<Optimizer> optimizer) {
+int NeuralNetwork::setOptimizer(
+ std::shared_ptr<ml::train::Optimizer> optimizer) {
- if (optimizer->getType() == OptType::unknown)
+ if (optimizer->getType() == OptType::UNKNOWN)
return ML_ERROR_INVALID_PARAMETER;
if (initialized) {
return ML_ERROR_NOT_SUPPORTED;
}
- opt = optimizer;
+ opt = std::static_pointer_cast<Optimizer>(optimizer);
return ML_ERROR_NONE;
}
}
}
+int NeuralNetwork::getLayer(const char *name,
+ std::shared_ptr<ml::train::Layer> *layer) {
+ std::shared_ptr<Layer> layer_;
+ int ret = getLayer(name, &layer_);
+ if (ret == ML_ERROR_NONE)
+ *layer = layer_;
+ return ret;
+}
+
int NeuralNetwork::getLayer(const char *name, NodeType *layer) {
int status = ML_ERROR_INVALID_PARAMETER;
std::string name_str(name);
#include <lazy_tensor.h>
#include <nntrainer_error.h>
#include <nntrainer_log.h>
-#include <optimizer.h>
+#include <optimizer_internal.h>
#include <parse_util.h>
#include <util_func.h>
OptType loaded_type;
file.read((char *)&loaded_type, sizeof(OptType));
- if (loaded_type >= OptType::unknown)
+ if (loaded_type >= OptType::UNKNOWN)
throw std::runtime_error("Saved file has unknown optimizer");
}
void Optimizer::save(std::ofstream &file) {
- if (type >= OptType::unknown)
+ if (type >= OptType::UNKNOWN)
throw std::runtime_error("Cannot save unknown optimizer");
file.write((char *)&type, sizeof(OptType));
*/
#include <adam.h>
-#include <optimizer.h>
+#include <optimizer_internal.h>
#include <sgd.h>
namespace nntrainer {
*/
std::unique_ptr<Optimizer> createOptimizer(OptType type, const Optimizer &opt) {
switch (type) {
- case OptType::sgd:
+ case OptType::SGD:
return std::make_unique<SGD>(static_cast<const SGD &>(opt));
- case OptType::adam:
+ case OptType::ADAM:
return std::make_unique<Adam>(static_cast<const Adam &>(opt));
- case OptType::unknown:
+ case OptType::UNKNOWN:
/** fallthrough intended */
default:
throw std::invalid_argument("Unknown type for the optimizer");
#include <assert.h>
#include <cstring>
#include <iostream>
-#include <layer.h>
+#include <layer_internal.h>
#include <neuralnet.h>
#include <nntrainer_error.h>
#include <nntrainer_log.h>
-#include <optimizer.h>
+#include <optimizer_internal.h>
#include <parse_util.h>
#include <pooling2d_layer.h>
#include <regex>
unsigned int i;
/**
* @brief Optimizer String from configure file
- * "sgd" : Stochestic Gradient Descent
* "adam" : Adaptive Moment Estimation
+ * "sgd" : Stochestic Gradient Descent
*/
- std::array<std::string, 2> optimizer_string = {"sgd", "adam"};
+ std::array<std::string, 2> optimizer_string = {"adam", "sgd"};
/**
* @brief Loss Function String from configure file
return (i);
}
}
- ret = (unsigned int)OptType::unknown;
+ ret = (unsigned int)OptType::UNKNOWN;
break;
case TOKEN_LOSS:
for (i = 0; i < loss_string.size(); i++) {
return (i);
}
}
- ret = (unsigned int)NetType::NET_UNKNOWN;
+ ret = (unsigned int)NetType::UNKNOWN;
break;
case TOKEN_ACTI:
for (i = 0; i < activation_string.size(); i++) {
#include <cstring>
#include <limits>
-#include <layer.h>
+#include <layer_internal.h>
#include <lazy_tensor.h>
#include <nntrainer_error.h>
#include <nntrainer_log.h>
%define use_cblas 1
%define nnstreamer_filter 1
%define use_gym 0
+%define support_ccapi 1
%define nntrainerapplicationdir %{_libdir}/nntrainer/bin
%define test_script $(pwd)/packaging/run_unittests.sh
%define gen_input $(pwd)/test/input_gen/genInput.py
%description -n capi-nntrainer-devel-static
Static library of capi-nntrainer-devel package.
+%if 0%{?support_ccapi}
+%package -n ccapi-nntrainer
+Summary: Tizen Native API for NNTrainer
+Group: Multimedia/Framework
+Requires: %{name} = %{version}-%{release}
+%description -n ccapi-nntrainer
+Tizen Native API wrapper for NNTrainer.
+You can train neural networks efficiently.
+
+%post -n ccapi-nntrainer -p /sbin/ldconfig
+%postun -n ccapi-nntrainer -p /sbin/ldconfig
+
+%package -n ccapi-nntrainer-devel
+Summary: Tizen Native API Devel Kit for NNTrainer
+Group: Multimedia/Framework
+Requires: ccapi-nntrainer = %{version}-%{release}
+%description -n ccapi-nntrainer-devel
+Developmental kit for Tizen Native NNTrainer API.
+
+%package -n ccapi-nntrainer-devel-static
+Summary: Static library for Tizen c++ API
+Group: Multimedia/Framework
+Requires: ccapi-nntrainer-devel = %{version}-%{release}
+%description -n ccapi-nntrainer-devel-static
+Static library of ccapi-nntrainer-devel package.
+%endif
+
%if 0%{?nnstreamer_filter}
%package -n nnstreamer-nntrainer
Summary: NNStreamer NNTrainer support
%define enable_tizen -Denable-tizen=false
%define enable_tizen_feature_check -Denable-tizen-feature-check=true
%define install_app -Dinstall-app=true
+%define enable_ccapi -Denable-ccapi=false
%if %{with tizen}
%define enable_tizen -Denable-tizen=true
-%endif
+
+%if 0%{?support_ccapi}
+%define enable_ccapi -Denable-ccapi=true
+%endif # support_ccapi
+%endif # tizen
# Using cblas for Matrix calculation
%if 0%{?use_cblas}
mkdir -p build
meson --buildtype=plain --prefix=%{_prefix} --sysconfdir=%{_sysconfdir} \
- --libdir=%{_libdir} --bindir=%{nntrainerapplicationdir} --includedir=%{_includedir}\
- %{install_app} %{enable_tizen} %{enable_tizen_feature_check} %{enable_cblas} %{enable_gym} %{enable_nnstreamer_tensor_filter} build
+ --libdir=%{_libdir} --bindir=%{nntrainerapplicationdir} \
+ --includedir=%{_includedir} %{install_app} %{enable_tizen} \
+ %{enable_tizen_feature_check} %{enable_cblas} %{enable_ccapi} \
+ %{enable_gym} %{enable_nnstreamer_tensor_filter} build
ninja -C build %{?_smp_mflags}
export NNSTREAMER_CONF=$(pwd)/test/nnstreamer_filter_nntrainer/nnstreamer-test.ini
export NNSTREAMER_FILTERS=$(pwd)/build/nnstreamer/tensor_filter
pushd build
+
rm -rf model.bin
TF_APP=Applications/TransferLearning/Draw_Classification
./${TF_APP}/jni/nntrainer_training ../${TF_APP}/res/Training.ini ../${TF_APP}/res
+%if 0%{?support_ccapi}
rm -rf model.bin
cp ../Applications/MNIST/jni/mnist_trainingSet.dat .
MNIST_APP=Applications/MNIST
./${MNIST_APP}/jni/nntrainer_mnist ../${MNIST_APP}/res/mnist.ini
+%endif # support_ccapi
+
popd
# unittest for nntrainer plugin for nnstreamer
%files devel
%{_includedir}/nntrainer/databuffer.h
+%{_includedir}/nntrainer/databuffer_factory.h
%{_includedir}/nntrainer/databuffer_file.h
%{_includedir}/nntrainer/databuffer_func.h
%{_includedir}/nntrainer/databuffer_util.h
-%{_includedir}/nntrainer/layer.h
+%{_includedir}/nntrainer/layer_internal.h
%{_includedir}/nntrainer/layer_factory.h
%{_includedir}/nntrainer/input_layer.h
%{_includedir}/nntrainer/fc_layer.h
%{_includedir}/nntrainer/tensor_dim.h
%{_includedir}/nntrainer/nntrainer_log.h
%{_includedir}/nntrainer/nntrainer_logger.h
-%{_includedir}/nntrainer/optimizer.h
+%{_includedir}/nntrainer/optimizer_internal.h
%{_includedir}/nntrainer/util_func.h
%{_includedir}/nntrainer/parse_util.h
%{_includedir}/nntrainer/addition_layer.h
%files -n capi-nntrainer-devel-static
%{_libdir}/libcapi-nntrainer.a
-%{_libdir}/libnnstreamer_filter_nntrainer.a
+
+%if 0%{?support_ccapi}
+%files -n ccapi-nntrainer
+%manifest capi-nntrainer.manifest
+%license LICENSE
+%{_libdir}/libccapi-nntrainer.so
+
+%files -n ccapi-nntrainer-devel
+%{_includedir}/nntrainer/model.h
+%{_includedir}/nntrainer/layer.h
+%{_includedir}/nntrainer/optimizer.h
+%{_includedir}/nntrainer/dataset.h
+# %{_libdir}/pkgconfig/ccapi-nntrainer.pc
+
+%files -n ccapi-nntrainer-devel-static
+%{_libdir}/libccapi-nntrainer.a
+%endif # support_ccapi
%if 0%{?nnstreamer_filter}
%files -n nnstreamer-nntrainer
ASSERT_EQ(status, ML_ERROR_NONE);
status = data_buffer.init();
ASSERT_EQ(status, ML_ERROR_NONE);
- status = data_buffer.run(nntrainer::BUF_TRAIN);
+ status = data_buffer.run(nntrainer::BufferType::BUF_TRAIN);
ASSERT_EQ(status, ML_ERROR_NONE);
- status = data_buffer.run(nntrainer::BUF_TEST);
+ status = data_buffer.run(nntrainer::BufferType::BUF_TEST);
ASSERT_EQ(status, ML_ERROR_NONE);
- status = data_buffer.run(nntrainer::BUF_VAL);
+ status = data_buffer.run(nntrainer::BufferType::BUF_VAL);
ASSERT_EQ(status, ML_ERROR_NONE);
status = data_buffer.clear();
EXPECT_EQ(status, ML_ERROR_NONE);
nntrainer::DataBufferFromDataFile data_buffer;
status = data_buffer.setDataFile("testSet.dat", nntrainer::DATA_TEST);
ASSERT_EQ(status, ML_ERROR_NONE);
- status = data_buffer.clear(nntrainer::BUF_TEST);
+ status = data_buffer.clear(nntrainer::BufferType::BUF_TEST);
EXPECT_EQ(status, ML_ERROR_NONE);
}
nntrainer::DataBufferFromDataFile data_buffer;
status = data_buffer.setDataFile("testSet.dat", nntrainer::DATA_TEST);
ASSERT_EQ(status, ML_ERROR_NONE);
- status = data_buffer.clear(nntrainer::BUF_TEST);
+ status = data_buffer.clear(nntrainer::BufferType::BUF_TEST);
EXPECT_EQ(status, ML_ERROR_NONE);
status = data_buffer.clear();
EXPECT_EQ(status, ML_ERROR_NONE);
nntrainer::DataBufferFromDataFile data_buffer;
status = data_buffer.setDataFile("testSet.dat", nntrainer::DATA_TEST);
ASSERT_EQ(status, ML_ERROR_NONE);
- status = data_buffer.clear(nntrainer::BUF_TEST);
+ status = data_buffer.clear(nntrainer::BufferType::BUF_TEST);
EXPECT_EQ(status, ML_ERROR_NONE);
status = data_buffer.clear();
EXPECT_EQ(status, ML_ERROR_NONE);
}
/**
- * @brief Data buffer clear BUF_UNKNOWN
+ * @brief Data buffer clear BufferType::BUF_UNKNOWN
*/
TEST(nntrainer_DataBuffer, clear_05_n) {
int status = ML_ERROR_NONE;
nntrainer::DataBufferFromDataFile data_buffer;
- status = data_buffer.clear(nntrainer::BUF_UNKNOWN);
+ status = data_buffer.clear(nntrainer::BufferType::BUF_UNKNOWN);
EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER);
}
*/
TEST(nntrainer_Optimizer, create_01_p) {
std::shared_ptr<nntrainer::Optimizer> op;
- EXPECT_NO_THROW(op = createOptimizer(nntrainer::OptType::adam));
+ EXPECT_NO_THROW(op = nntrainer::createOptimizer(nntrainer::OptType::ADAM));
}
/**
*/
TEST(nntrainer_Optimizer, setType_02_p) {
std::shared_ptr<nntrainer::Optimizer> op;
- EXPECT_NO_THROW(op = createOptimizer(nntrainer::OptType::sgd));
+ EXPECT_NO_THROW(op = nntrainer::createOptimizer(nntrainer::OptType::SGD));
}
/**
*/
TEST(nntrainer_Optimizer, setType_03_n) {
std::shared_ptr<nntrainer::Optimizer> op;
- EXPECT_THROW(op = createOptimizer(nntrainer::OptType::unknown),
+ EXPECT_THROW(op = nntrainer::createOptimizer(nntrainer::OptType::UNKNOWN),
std::invalid_argument);
}
#include <fc_layer.h>
#include <flatten_layer.h>
#include <input_layer.h>
-#include <layer.h>
+#include <layer_internal.h>
#include <loss_layer.h>
#include <nntrainer_error.h>
#include <nntrainer_test_util.h>
}
std::shared_ptr<nntrainer::Optimizer> op;
- EXPECT_NO_THROW(op = createOptimizer(type));
+ EXPECT_NO_THROW(op = nntrainer::createOptimizer(type));
status = op->setProperty(input_str);
EXPECT_EQ(status, ML_ERROR_NONE);
* @brief Input Layer
*/
TEST_F(nntrainer_InputLayer, setOptimizer_01_p) {
- status = setOptimizer(nntrainer::OptType::adam, "learning_rate=0.001 |"
+ status = setOptimizer(nntrainer::OptType::ADAM, "learning_rate=0.001 |"
"beta1=0.9 |"
"beta2=0.9999 |"
"epsilon=1e-7");
* @brief Fully Connected Layer
*/
TEST_F(nntrainer_FullyConnectedLayer, setOptimizer_01_p) {
- status = setOptimizer(nntrainer::OptType::adam, "learning_rate=0.001 |"
+ status = setOptimizer(nntrainer::OptType::ADAM, "learning_rate=0.001 |"
"beta1=0.9 |"
"beta2=0.9999 |"
"epsilon=1e-7");
* @brief FullyConnected Layer
*/
TEST_F(nntrainer_FullyConnectedLayer, setOptimizer_02_p) {
- status = setOptimizer(nntrainer::OptType::sgd, "learning_rate=0.1");
+ status = setOptimizer(nntrainer::OptType::SGD, "learning_rate=0.1");
EXPECT_EQ(status, ML_ERROR_NONE);
}
std::vector<float> weight_data;
std::vector<float> bias_data;
- setOptimizer(nntrainer::OptType::adam, "learning_rate=1.0");
+ setOptimizer(nntrainer::OptType::ADAM, "learning_rate=1.0");
sharedConstTensor out;
std::vector<float> weight_data;
std::vector<float> bias_data;
- setOptimizer(nntrainer::OptType::adam, "learning_rate=0.0001");
+ setOptimizer(nntrainer::OptType::ADAM, "learning_rate=0.0001");
addLoss(nntrainer::LossType::LOSS_ENTROPY_SOFTMAX);
matchForwarding("tc_fc_1_goldenFCResultSoftmaxCrossAdam.out");
*/
TEST_F(nntrainer_FullyConnectedLayer_TFmatch, forwarding_backwarding_01_p) {
- setOptimizer(nntrainer::OptType::sgd, "learning_rate=1.0");
+ setOptimizer(nntrainer::OptType::SGD, "learning_rate=1.0");
/** Verify forwarding and backwarding without loss */
matchForwarding("tc_fc_1_goldenFCResultActNone.out");
addActivation(nntrainer::ActivationType::ACT_SIGMOID);
addLoss(nntrainer::LossType::LOSS_MSE);
- setOptimizer(nntrainer::OptType::sgd, "learning_rate=1.0");
+ setOptimizer(nntrainer::OptType::SGD, "learning_rate=1.0");
/** Verify forwarding value */
matchForwarding("tc_fc_1_goldenFCResultSigmoidMse.out");
addActivation(nntrainer::ActivationType::ACT_SOFTMAX);
addLoss(nntrainer::LossType::LOSS_MSE);
- setOptimizer(nntrainer::OptType::sgd, "learning_rate=1.0");
+ setOptimizer(nntrainer::OptType::SGD, "learning_rate=1.0");
/** Verify forwarding value */
matchForwarding("tc_fc_1_goldenFCResultSoftmaxMse.out");
TEST_F(nntrainer_FullyConnectedLayer_TFmatch, forwarding_backwarding_04_p) {
addLoss(nntrainer::LossType::LOSS_MSE);
- setOptimizer(nntrainer::OptType::sgd, "learning_rate=1.0");
+ setOptimizer(nntrainer::OptType::SGD, "learning_rate=1.0");
/** Verify forwarding value */
matchForwarding("tc_fc_1_goldenFCResultActNone.out");
addActivation(nntrainer::ActivationType::ACT_SIGMOID);
addLoss(nntrainer::LossType::LOSS_MSE);
- setOptimizer(nntrainer::OptType::sgd, "learning_rate=1.0");
+ setOptimizer(nntrainer::OptType::SGD, "learning_rate=1.0");
/** Verify forwarding value */
matchForwarding("tc_fc_1_goldenFCResultSigmoidMse.out");
addActivation(nntrainer::ActivationType::ACT_SOFTMAX);
addLoss(nntrainer::LossType::LOSS_MSE);
- setOptimizer(nntrainer::OptType::sgd, "learning_rate=1.0");
+ setOptimizer(nntrainer::OptType::SGD, "learning_rate=1.0");
/** Verify forwarding value */
matchForwarding("tc_fc_1_goldenFCResultSoftmaxMse.out");
TEST_F(nntrainer_FullyConnectedLayer_TFmatch, forwarding_backwarding_07_p) {
addLoss(nntrainer::LossType::LOSS_ENTROPY_SIGMOID);
- setOptimizer(nntrainer::OptType::sgd, "learning_rate=1.0");
+ setOptimizer(nntrainer::OptType::SGD, "learning_rate=1.0");
/** Verify forwarding value */
matchForwarding("tc_fc_1_goldenFCResultSigmoidCross.out");
TEST_F(nntrainer_FullyConnectedLayer_TFmatch, forwarding_backwarding_08_p) {
addLoss(nntrainer::LossType::LOSS_ENTROPY_SOFTMAX);
- setOptimizer(nntrainer::OptType::sgd, "learning_rate=1.0");
+ setOptimizer(nntrainer::OptType::SGD, "learning_rate=1.0");
/** Verify forwarding value */
matchForwarding("tc_fc_1_goldenFCResultSoftmaxCross.out");
virtual void prepareLayer() {
setProperty("input_shape=1:1:12 | epsilon=0.001 | momentum=0.90");
setBatch(3);
- setOptimizer(nntrainer::OptType::sgd, "learning_rate=1");
+ setOptimizer(nntrainer::OptType::SGD, "learning_rate=1");
}
};
*/
TEST_F(nntrainer_BatchNormalizationLayer, setOptimizer_01_p) {
status = setOptimizer(
- nntrainer::OptType::adam,
+ nntrainer::OptType::ADAM,
"learning_rate=0.001 | beta1=0.9 | beta2=0.9999 | epsilon=1e-7");
EXPECT_EQ(status, ML_ERROR_NONE);
}
virtual void prepareLayer() {
setProperty("input_shape=2:4:5 | epsilon=0.001 | momentum=0.90");
setBatch(3);
- setOptimizer(nntrainer::OptType::sgd, "learning_rate=1");
+ setOptimizer(nntrainer::OptType::SGD, "learning_rate=1");
}
};
virtual void prepareLayer() {
setProperty("input_shape=2:4:5 | epsilon=0.001 | momentum=0.90");
setBatch(1);
- setOptimizer(nntrainer::OptType::sgd, "learning_rate=1");
+ setOptimizer(nntrainer::OptType::SGD, "learning_rate=1");
}
};
"stride=1, 1 |"
"padding=0,0");
- setOptimizer(nntrainer::OptType::sgd, "learning_rate=1.0");
+ setOptimizer(nntrainer::OptType::SGD, "learning_rate=1.0");
unsigned int filter_size = 2;
std::vector<float> grad_data;
std::vector<float> weight_data;
"stride=1,1 |"
"padding=0,0");
- setOptimizer(nntrainer::OptType::sgd, "learning_rate=1.0");
+ setOptimizer(nntrainer::OptType::SGD, "learning_rate=1.0");
unsigned int filter_size = 12;
std::vector<float> grad_data;
std::vector<float> weight_data;
"padding=0,0",
2);
- setOptimizer(nntrainer::OptType::sgd, "learning_rate=1.0");
+ setOptimizer(nntrainer::OptType::SGD, "learning_rate=1.0");
unsigned int filter_size = 3;
std::vector<float> grad_data;
EXPECT_EQ(status, ML_ERROR_NONE);
std::shared_ptr<nntrainer::Optimizer> op;
- EXPECT_NO_THROW(op = createOptimizer(nntrainer::OptType::sgd));
+ EXPECT_NO_THROW(op = nntrainer::createOptimizer(nntrainer::OptType::SGD));
status = op->setProperty({"learning_rate=1.0"});
EXPECT_EQ(status, ML_ERROR_NONE);
status = layer1.setOptimizer(op);
EXPECT_EQ(status, ML_ERROR_NONE);
std::shared_ptr<nntrainer::Optimizer> op2;
- EXPECT_NO_THROW(op2 = createOptimizer(nntrainer::OptType::sgd));
+ EXPECT_NO_THROW(op2 = nntrainer::createOptimizer(nntrainer::OptType::SGD));
status = op2->setProperty({"learning_rate=1.0"});
EXPECT_EQ(status, ML_ERROR_NONE);
status = layer2.setOptimizer(op2);
EXPECT_EQ(status, ML_ERROR_NONE);
- setOptimizer(nntrainer::OptType::sgd, "learning_rate=1.0");
+ setOptimizer(nntrainer::OptType::SGD, "learning_rate=1.0");
unsigned int filter_size;
std::vector<float> grad_data;