*/
nntrainer::Tensor test =
mainNet.forwarding(nntrainer::Tensor({input}), status);
- float* data = test.getData();
+ float *data = test.getData();
unsigned int len = test.getDim().getDataLen();
std::vector<float> temp(data, data + len);
action.push_back(argmax(temp));
*/
nntrainer::Tensor NQ =
targetNet.forwarding(nntrainer::Tensor(next_inbatch), status);
- float* nqa = NQ.getData();
+ float *nqa = NQ.getData();
/**
* @brief Update Q values & udpate mainNetwork
*
*/
-#include <stdio.h>
#include <nntrainer.h>
+#include <stdio.h>
#define NN_RETURN_STATUS() \
do { \
} \
} while (0)
-int
-main (int argc, char *argv[])
-{
+int main(int argc, char *argv[]) {
int status = ML_ERROR_NONE;
ml_nnopt_h optimizer;
/* model create */
- status = ml_nnmodel_construct (&model);
- NN_RETURN_STATUS ();
+ status = ml_nnmodel_construct(&model);
+ NN_RETURN_STATUS();
/* input layer create */
- status = ml_nnlayer_create (&layers[0], ML_LAYER_TYPE_INPUT);
- NN_RETURN_STATUS ();
+ status = ml_nnlayer_create(&layers[0], ML_LAYER_TYPE_INPUT);
+ NN_RETURN_STATUS();
/* set property for input layer */
status =
- ml_nnlayer_set_property (layers[0], "input_shape= 32:1:1:62720",
- "normalization=true", "bias_init_zero=true", NULL);
- NN_RETURN_STATUS ();
+ ml_nnlayer_set_property(layers[0], "input_shape= 32:1:1:62720",
+ "normalization=true", "bias_init_zero=true", NULL);
+ NN_RETURN_STATUS();
/* add input layer into model */
- status = ml_nnmodel_add_layer (model, layers[0]);
- NN_RETURN_STATUS ();
+ status = ml_nnmodel_add_layer(model, layers[0]);
+ NN_RETURN_STATUS();
/* create fully connected layer */
- status = ml_nnlayer_create (&layers[1], ML_LAYER_TYPE_FC);
- NN_RETURN_STATUS ();
+ status = ml_nnlayer_create(&layers[1], ML_LAYER_TYPE_FC);
+ NN_RETURN_STATUS();
/* set property for fc layer */
- status = ml_nnlayer_set_property (layers[1], "unit= 10", "activation=softmax",
- "bias_init_zero=true", "weight_decay=l2norm",
- "weight_decay_lambda=0.005", "weight_ini=xavier_uniform", NULL);
- NN_RETURN_STATUS ();
+ status = ml_nnlayer_set_property(layers[1], "unit= 10", "activation=softmax",
+ "bias_init_zero=true", "weight_decay=l2norm",
+ "weight_decay_lambda=0.005",
+ "weight_ini=xavier_uniform", NULL);
+ NN_RETURN_STATUS();
/* add fc layer into model */
- status = ml_nnmodel_add_layer (model, layers[1]);
- NN_RETURN_STATUS ();
+ status = ml_nnmodel_add_layer(model, layers[1]);
+ NN_RETURN_STATUS();
/* create optimizer */
- status = ml_nnoptimizer_create (&optimizer, "adam");
- NN_RETURN_STATUS ();
+ status = ml_nnoptimizer_create(&optimizer, "adam");
+ NN_RETURN_STATUS();
/* set property for optimizer */
- status =
- ml_nnoptimizer_set_property (optimizer, "learning_rate=0.0001",
- "decay_rate=0.96", "decay_steps=1000", "beta1=0.9", "beta2=0.9999",
- "epsilon=1e-7", NULL);
- NN_RETURN_STATUS ();
+ status = ml_nnoptimizer_set_property(
+ optimizer, "learning_rate=0.0001", "decay_rate=0.96", "decay_steps=1000",
+ "beta1=0.9", "beta2=0.9999", "epsilon=1e-7", NULL);
+ NN_RETURN_STATUS();
/* compile model with cross entropy loss function */
- status = ml_nnmodel_compile (model, optimizer, "loss=cross", NULL);
- NN_RETURN_STATUS ();
+ status = ml_nnmodel_compile(model, optimizer, "loss=cross", NULL);
+ NN_RETURN_STATUS();
- /* train model with data files : epochs = 10 and store model file named "model.bin" */
- status =
- ml_nnmodel_train_with_file (model, "epochs=10", "batch_size=32",
- "train_data=trainingSet.dat", "val_data=trainingSet.dat",
- "label_data=label.dat", "buffer_size=100", "model_file=model.bin", NULL);
- NN_RETURN_STATUS ();
+ /* train model with data files : epochs = 10 and store model file named
+ * "model.bin" */
+ status = ml_nnmodel_train_with_file(
+ model, "epochs=10", "batch_size=32", "train_data=trainingSet.dat",
+ "val_data=trainingSet.dat", "label_data=label.dat", "buffer_size=100",
+ "model_file=model.bin", NULL);
+ NN_RETURN_STATUS();
/* delete layers */
- status = ml_nnlayer_delete (layers[0]);
- NN_RETURN_STATUS ();
+ status = ml_nnlayer_delete(layers[0]);
+ NN_RETURN_STATUS();
- status = ml_nnlayer_delete (layers[1]);
- NN_RETURN_STATUS ();
+ status = ml_nnlayer_delete(layers[1]);
+ NN_RETURN_STATUS();
/* delete optimizer */
- status = ml_nnoptimizer_delete (optimizer);
- NN_RETURN_STATUS ();
+ status = ml_nnoptimizer_delete(optimizer);
+ NN_RETURN_STATUS();
/* delete model */
- status = ml_nnmodel_destruct (model);
- NN_RETURN_STATUS ();
+ status = ml_nnmodel_destruct(model);
+ NN_RETURN_STATUS();
return 0;
}
static bool alloc_train = false;
static bool alloc_val = false;
-bool gen_data_train (float *outVec, float *outLabel, int *status);
-bool gen_data_val (float *outVec, float *outLabel, int *status);
-bool file_exists (const char *filename);
+bool gen_data_train(float *outVec, float *outLabel, int *status);
+bool gen_data_val(float *outVec, float *outLabel, int *status);
+bool file_exists(const char *filename);
-bool
-file_exists (const char *filename)
-{
+bool file_exists(const char *filename) {
struct stat buffer;
- return (stat (filename, &buffer) == 0);
+ return (stat(filename, &buffer) == 0);
}
#define NN_RETURN_STATUS() \
* @param[in] max : maximum value
* @retval min < random value < max
*/
-static int
-range_random (int min, int max)
-{
+static int range_random(int min, int max) {
int n = max - min + 1;
int remainder = RAND_MAX % n;
int x;
do {
- x = rand ();
+ x = rand();
} while (x >= RAND_MAX - remainder);
return min + x % n;
}
* @param[in] id th data to get
* @retval true/false false : end of data
*/
-static bool
-get_data (const char *file_name, float *outVec, float *outLabel,
- int id, int file_length)
-{
+static bool get_data(const char *file_name, float *outVec, float *outLabel,
+ int id, int file_length) {
uint64_t position;
FILE *F;
unsigned int i;
if (id < 0)
return false;
- position = (feature_size + num_class) * id * sizeof (float);
+ position = (feature_size + num_class) * id * sizeof(float);
if (position > file_length || position > ULLONG_MAX) {
return false;
}
- F = fopen (file_name, "rb");
+ F = fopen(file_name, "rb");
if (F == NULL) {
- printf ("Cannot open %s\n", file_name);
+ printf("Cannot open %s\n", file_name);
return false;
}
- fseek (F, position, SEEK_SET);
+ fseek(F, position, SEEK_SET);
for (i = 0; i < feature_size; i++) {
float f;
- ret = fread ((void *) (&f), sizeof (float), 1, F);
+ ret = fread((void *)(&f), sizeof(float), 1, F);
if (!ret)
return false;
outVec[i] = f;
}
for (i = 0; i < num_class; i++) {
float f;
- ret = fread ((void *) (&f), sizeof (float), 1, F);
+ ret = fread((void *)(&f), sizeof(float), 1, F);
if (!ret)
return false;
outLabel[i] = f;
}
- fclose (F);
+ fclose(F);
return true;
}
* @param[out] status for error handling
* @retval true/false
*/
-bool
-gen_data_train (float *outVec, float *outLabel, int *status)
-{
+bool gen_data_train(float *outVec, float *outLabel, int *status) {
int memI[mini_batch];
long file_size;
unsigned int count = 0;
const char *file_name = "trainingSet.dat";
- if (!file_exists (file_name)) {
- printf ("%s does not exists\n", file_name);
+ if (!file_exists(file_name)) {
+ printf("%s does not exists\n", file_name);
return false;
}
- file = fopen (file_name, "r");
- fseek (file, 0, SEEK_END);
- file_size = ftell (file);
- fclose (file);
+ file = fopen(file_name, "r");
+ fseek(file, 0, SEEK_END);
+ file_size = ftell(file);
+ fclose(file);
data_size =
- (unsigned int) (file_size / ((num_class +
- feature_size) * sizeof (float)));
+ (unsigned int)(file_size / ((num_class + feature_size) * sizeof(float)));
if (!alloc_train) {
- duplicate = (bool *) malloc (sizeof (bool) * data_size);
+ duplicate = (bool *)malloc(sizeof(bool) * data_size);
for (i = 0; i < data_size; ++i) {
duplicate[i] = false;
}
if (count < mini_batch) {
if (duplicate == NULL) {
- printf ("Error: memory allocation.\n");
+ printf("Error: memory allocation.\n");
return false;
}
- free (duplicate);
+ free(duplicate);
alloc_train = false;
return false;
}
count = 0;
while (count < mini_batch) {
- int nomI = range_random (0, data_size - 1);
+ int nomI = range_random(0, data_size - 1);
if (!duplicate[nomI]) {
memI[count] = nomI;
duplicate[nomI] = true;
float o[feature_size];
float l[num_class];
- get_data (file_name, o, l, memI[i], file_size);
+ get_data(file_name, o, l, memI[i], file_size);
for (j = 0; j < feature_size; ++j)
outVec[i * feature_size + j] = o[j];
* @param[out] status for error handling
* @retval true/false false : end of data
*/
-bool
-gen_data_val (float *outVec, float *outLabel, int *status)
-{
+bool gen_data_val(float *outVec, float *outLabel, int *status) {
int memI[mini_batch];
unsigned int i, j;
const char *file_name = "trainingSet.dat";
- FILE *file = fopen (file_name, "r");
- fseek (file, 0, SEEK_END);
- file_size = ftell (file);
- fclose (file);
+ FILE *file = fopen(file_name, "r");
+ fseek(file, 0, SEEK_END);
+ file_size = ftell(file);
+ fclose(file);
data_size =
- (unsigned int) (file_size / ((num_class +
- feature_size) * sizeof (float)));
+ (unsigned int)(file_size / ((num_class + feature_size) * sizeof(float)));
if (!alloc_val) {
- valduplicate = (bool *) malloc (sizeof (bool) * data_size);
+ valduplicate = (bool *)malloc(sizeof(bool) * data_size);
for (i = 0; i < data_size; ++i) {
valduplicate[i] = false;
}
}
if (count < mini_batch) {
- free (valduplicate);
+ free(valduplicate);
alloc_val = false;
return false;
}
count = 0;
while (count < mini_batch) {
- int nomI = range_random (0, data_size - 1);
+ int nomI = range_random(0, data_size - 1);
if (!valduplicate[nomI]) {
memI[count] = nomI;
valduplicate[nomI] = true;
float o[feature_size];
float l[num_class];
- get_data (file_name, o, l, memI[i], file_size);
+ get_data(file_name, o, l, memI[i], file_size);
for (j = 0; j < feature_size; ++j)
outVec[i * feature_size + j] = o[j];
return true;
}
-int
-main (int argc, char *argv[])
-{
+int main(int argc, char *argv[]) {
int status = ML_ERROR_NONE;
ml_nnopt_h optimizer;
/* model create */
- status = ml_nnmodel_construct (&model);
- NN_RETURN_STATUS ();
+ status = ml_nnmodel_construct(&model);
+ NN_RETURN_STATUS();
/* input layer create */
- status = ml_nnlayer_create (&layers[0], ML_LAYER_TYPE_INPUT);
- NN_RETURN_STATUS ();
+ status = ml_nnlayer_create(&layers[0], ML_LAYER_TYPE_INPUT);
+ NN_RETURN_STATUS();
/* set property for input layer */
status =
- ml_nnlayer_set_property (layers[0], "input_shape= 32:1:1:62720",
- "normalization=true", "bias_init_zero=true", NULL);
- NN_RETURN_STATUS ();
+ ml_nnlayer_set_property(layers[0], "input_shape= 32:1:1:62720",
+ "normalization=true", "bias_init_zero=true", NULL);
+ NN_RETURN_STATUS();
/* add input layer into model */
- status = ml_nnmodel_add_layer (model, layers[0]);
- NN_RETURN_STATUS ();
+ status = ml_nnmodel_add_layer(model, layers[0]);
+ NN_RETURN_STATUS();
/* create fully connected layer */
- status = ml_nnlayer_create (&layers[1], ML_LAYER_TYPE_FC);
- NN_RETURN_STATUS ();
+ status = ml_nnlayer_create(&layers[1], ML_LAYER_TYPE_FC);
+ NN_RETURN_STATUS();
/* set property for fc layer */
- status = ml_nnlayer_set_property (layers[1], "unit= 10", "activation=softmax",
- "bias_init_zero=true", "weight_decay=l2norm",
- "weight_decay_lambda=0.005", "weight_ini=xavier_uniform", NULL);
- NN_RETURN_STATUS ();
+ status = ml_nnlayer_set_property(layers[1], "unit= 10", "activation=softmax",
+ "bias_init_zero=true", "weight_decay=l2norm",
+ "weight_decay_lambda=0.005",
+ "weight_ini=xavier_uniform", NULL);
+ NN_RETURN_STATUS();
/* add fc layer into model */
- status = ml_nnmodel_add_layer (model, layers[1]);
- NN_RETURN_STATUS ();
+ status = ml_nnmodel_add_layer(model, layers[1]);
+ NN_RETURN_STATUS();
/* create optimizer */
- status = ml_nnoptimizer_create (&optimizer, "adam");
- NN_RETURN_STATUS ();
+ status = ml_nnoptimizer_create(&optimizer, "adam");
+ NN_RETURN_STATUS();
/* set property for optimizer */
- status =
- ml_nnoptimizer_set_property (optimizer, "learning_rate=0.0001",
- "decay_rate=0.96", "decay_steps=1000", "beta1=0.9", "beta2=0.9999",
- "epsilon=1e-7", NULL);
- NN_RETURN_STATUS ();
+ status = ml_nnoptimizer_set_property(
+ optimizer, "learning_rate=0.0001", "decay_rate=0.96", "decay_steps=1000",
+ "beta1=0.9", "beta2=0.9999", "epsilon=1e-7", NULL);
+ NN_RETURN_STATUS();
/* compile model with cross entropy loss function */
- status = ml_nnmodel_compile (model, optimizer, "loss=cross", NULL);
- NN_RETURN_STATUS ();
+ status = ml_nnmodel_compile(model, optimizer, "loss=cross", NULL);
+ NN_RETURN_STATUS();
/* train model with data files : epochs = 10 and store model file named
* "model.bin" */
- status =
- ml_nnmodel_train_with_generator (model, gen_data_train, gen_data_val,
- NULL, "epochs=10", "batch_size=32", "model_file=model.bin",
- "buffer_size = 32", NULL);
- NN_RETURN_STATUS ();
+ status = ml_nnmodel_train_with_generator(
+ model, gen_data_train, gen_data_val, NULL, "epochs=10", "batch_size=32",
+ "model_file=model.bin", "buffer_size = 32", NULL);
+ NN_RETURN_STATUS();
/* delete layers */
- status = ml_nnlayer_delete (layers[0]);
- NN_RETURN_STATUS ();
+ status = ml_nnlayer_delete(layers[0]);
+ NN_RETURN_STATUS();
- status = ml_nnlayer_delete (layers[1]);
- NN_RETURN_STATUS ();
+ status = ml_nnlayer_delete(layers[1]);
+ NN_RETURN_STATUS();
/* delete optimizer */
- status = ml_nnoptimizer_delete (optimizer);
- NN_RETURN_STATUS ();
+ status = ml_nnoptimizer_delete(optimizer);
+ NN_RETURN_STATUS();
/* delete model */
- status = ml_nnmodel_destruct (model);
- NN_RETURN_STATUS ();
+ status = ml_nnmodel_destruct(model);
+ NN_RETURN_STATUS();
return 0;
}
#include <nntrainer.h>
-int
-main (int argc, char *argv[])
-{
+int main(int argc, char *argv[]) {
int status = ML_ERROR_NONE;
ml_nnmodel_h handle = NULL;
const char *config_file = "./Tizen_CAPI_config.ini";
- status = ml_nnmodel_construct (&handle);
+ status = ml_nnmodel_construct(&handle);
if (status != ML_ERROR_NONE)
return status;
- status = ml_nnmodel_compile_with_conf (config_file, handle);
+ status = ml_nnmodel_compile_with_conf(config_file, handle);
if (status != ML_ERROR_NONE)
return status;
- status = ml_nnmodel_train_with_file (handle);
+ status = ml_nnmodel_train_with_file(handle);
if (status != ML_ERROR_NONE)
return status;
- status = ml_nnmodel_destruct (handle);
+ status = ml_nnmodel_destruct(handle);
if (status != ML_ERROR_NONE)
return status;
return status;
snprintf(ad->edj_path, sizeof(ad->edj_path), "%s%s", res_path, EDJ_PATH);
free(res_path);
-
+
view_init(ad);
if (view_routes_to(ad, "home", &ad->home))
/* Take necessary actions when application becomes visible. */
}
-static void app_terminate(void *data) { /* Release all resources. */ }
+static void app_terminate(void *data) { /* Release all resources. */
+}
static void ui_app_lang_changed(app_event_info_h event_info, void *user_data) {
/*APP_EVENT_LANGUAGE_CHANGED*/
ad);
nf_it = elm_naviframe_item_push(ad->naviframe, NULL, NULL, NULL, ad->layout,
- "empty");
+ "empty");
if (nf_it == NULL)
return APP_ERROR_INVALID_PARAMETER;
- if(data != NULL)
+ if (data != NULL)
*data = nf_it;
return APP_ERROR_NONE;
* @details Use this function to initialize neural network model
* @since_tizen 6.x
* @param[in] model The NNTrainer model handler from the given description.
- * @param[in] optimizer The NNTrainer optimizer handler from the given description.
+ * @param[in] optimizer The NNTrainer optimizer handler from the given
+ * description.
* @param[in] ... hyper parmeter for compile model
* @return @c 0 on success. Otherwise a negative error value.
* @retval #ML_ERROR_NONE Successful.
* @retval #ML_ERROR_NONE Successful.
* @retval #ML_ERROR_INVALID_PARAMETER Invalid parameter.
*/
-int ml_nnoptimizer_set_property(ml_nnopt_h opt,...);
+int ml_nnoptimizer_set_property(ml_nnopt_h opt, ...);
- /**
+/**
* @}
*/
#ifdef __cplusplus
std::shared_ptr<nntrainer::Optimizer> Opt;
Opt = nnopt->optimizer;
- returnable f = [&]() {
- return Opt->setProperty(arg_list);
- };
+ returnable f = [&]() { return Opt->setProperty(arg_list); };
status = nntrainer_exception_boundary(f);
stride{1, 1},
padding{0, 0},
normalization(false),
- standardization(false) { setType(LAYER_CONV2D); };
+ standardization(false) {
+ setType(LAYER_CONV2D);
+ };
/**
* @brief Destructor of Conv 2D Layer
/**
* @brief Constructor of InputLayer
*/
- InputLayer() :
- normalization(false),
- standardization(false) { setType(LAYER_IN); };
+ InputLayer() : normalization(false), standardization(false) {
+ setType(LAYER_IN);
+ };
/**
* @brief Destructor of InputLayer
*/
class Layer {
public:
- Layer()
- : last_layer(false),
- bias_init_zero(false),
- type(LAYER_UNKNOWN),
- loss(0.0),
- cost(COST_UNKNOWN),
- activation_type(ACT_NONE),
- bn_follow(false),
- weight_decay(),
- weight_ini_type(WEIGHT_XAVIER_UNIFORM),
- flatten(false),
- trainable(true) {}
+ Layer() :
+ last_layer(false),
+ bias_init_zero(false),
+ type(LAYER_UNKNOWN),
+ loss(0.0),
+ cost(COST_UNKNOWN),
+ activation_type(ACT_NONE),
+ bn_follow(false),
+ weight_decay(),
+ weight_ini_type(WEIGHT_XAVIER_UNIFORM),
+ flatten(false),
+ trainable(true) {}
/**
* @brief Destructor of Layer Class
* @brief get gradients
* @retval shared ptr of vector of all tensors
*/
- std::shared_ptr<std::vector<Tensor>> getGradients() { return getObjFromRef(gradients); }
+ std::shared_ptr<std::vector<Tensor>> getGradients() {
+ return getObjFromRef(gradients);
+ }
/**
* @brief get weights
* @retval shared ptr of vector of all tensors
*/
- std::shared_ptr<std::vector<Tensor>> getWeights() { return getObjFromRef(weights); }
+ std::shared_ptr<std::vector<Tensor>> getWeights() {
+ return getObjFromRef(weights);
+ }
/**
* @brief get if the output of this layer must be flatten
};
protected:
-
/**
* @brief check if current layer's weight decay type is l2norm
* @return bool is weightdecay type is L2 Norm
/**
* @brief Convert vector of reference to vector of objects
*/
- std::shared_ptr<std::vector<Tensor>> getObjFromRef(
- std::vector<std::reference_wrapper<Tensor>> &elements);
+ std::shared_ptr<std::vector<Tensor>>
+ getObjFromRef(std::vector<std::reference_wrapper<Tensor>> &elements);
};
} // namespace nntrainer
decay_rate(1.0),
decay_steps(1.0),
continue_train(false) {
- if (type == OptType::sgd) {
- learning_rate = 0.01;
- }
+ if (type == OptType::sgd) {
+ learning_rate = 0.01;
}
+ }
} OptParam;
class Optimizer {
* @param[in] iteration nth epoch number
*/
void apply_gradients(std::vector<std::reference_wrapper<Tensor>> &weights,
- std::vector<std::reference_wrapper<Tensor>> &gradients, int iteration);
+ std::vector<std::reference_wrapper<Tensor>> &gradients,
+ int iteration);
/**
* @brief Property Enumeration
* @param[in] delimiter delimiter for the string
* @retval output string
*/
-const char* getValues(std::vector<int> values, const char* delimiter = ",");
+const char *getValues(std::vector<int> values, const char *delimiter = ",");
int getValues(int n_str, std::string str, int *value);
dim(d),
strides{{1, 2, 3}},
is_contiguous(true),
- data(new float[d.getDataLen()], std::default_delete<float[]>())
- {
+ data(new float[d.getDataLen()], std::default_delete<float[]>()) {
// todo: initialize appropriate strides
if (buf == nullptr) {
setZero();
// Update K / bias
for (unsigned int i = 0; i < filter_size; ++i) {
Tensor djdw = delK[i]
- .chain()
- .applyIf(this->isWeightDecayL2Norm(), _LIFT(add_i),
- filters[i], weight_decay.lambda)
- .run();
+ .chain()
+ .applyIf(this->isWeightDecayL2Norm(), _LIFT(add_i),
+ filters[i], weight_decay.lambda)
+ .run();
gradients.push_back(djdw);
gradients.push_back(delBias[i]);
return status;
}
-std::shared_ptr<std::vector<Tensor>> Layer::getObjFromRef(std::vector<std::reference_wrapper<Tensor>> &elements) {
+std::shared_ptr<std::vector<Tensor>>
+Layer::getObjFromRef(std::vector<std::reference_wrapper<Tensor>> &elements) {
std::vector<Tensor> ele;
for (auto iter = elements.begin(); iter != elements.end(); ++iter)
ele.push_back(*iter);
NN_INI_RETURN_STATUS();
OptParam popt(opt.getType());
- popt.learning_rate = iniparser_getdouble(ini, "Network:Learning_rate",
- popt.learning_rate);
- popt.decay_steps = iniparser_getint(ini, "Network:Decay_steps",
- popt.decay_steps);
- popt.decay_rate = iniparser_getdouble(ini, "Network:Decay_rate",
- popt.decay_rate);
+ popt.learning_rate =
+ iniparser_getdouble(ini, "Network:Learning_rate", popt.learning_rate);
+ popt.decay_steps =
+ iniparser_getint(ini, "Network:Decay_steps", popt.decay_steps);
+ popt.decay_rate =
+ iniparser_getdouble(ini, "Network:Decay_rate", popt.decay_rate);
popt.beta1 = iniparser_getdouble(ini, "Network:beta1", popt.beta1);
popt.beta2 = iniparser_getdouble(ini, "Network:beta2", popt.beta2);
popt.epsilon = iniparser_getdouble(ini, "Network:epsilon", popt.epsilon);
std::make_shared<Conv2DLayer>();
std::string input_shape_str = iniparser_getstring(
- ini, (layer_name + ":Input_Shape").c_str(), unknown);
+ ini, (layer_name + ":Input_Shape").c_str(), unknown);
if (input_shape_str.compare("Unknown") != 0) {
TensorDim d;
NN_INI_RETURN_STATUS();
}
- status =
- getValues(CONV2D_DIM,
- iniparser_getstring(
- ini, (layer_name + ":kernel_size").c_str(), unknown),
- (int *)size);
+ status = getValues(CONV2D_DIM,
+ iniparser_getstring(
+ ini, (layer_name + ":kernel_size").c_str(), unknown),
+ (int *)size);
NN_INI_RETURN_STATUS();
status = conv2d_layer->setSize(size, Layer::PropertyType::kernel_size);
NN_INI_RETURN_STATUS();
- status = getValues(
- CONV2D_DIM,
- iniparser_getstring(ini, (layer_name + ":stride").c_str(), getValues({1,1})),
- (int *)size);
+ status =
+ getValues(CONV2D_DIM,
+ iniparser_getstring(ini, (layer_name + ":stride").c_str(),
+ getValues({1, 1})),
+ (int *)size);
NN_INI_RETURN_STATUS();
status = conv2d_layer->setSize(size, Layer::PropertyType::stride);
NN_INI_RETURN_STATUS();
- status = getValues(CONV2D_DIM,
- iniparser_getstring(
- ini, (layer_name + ":padding").c_str(), getValues({0,0})),
- (int *)size);
-
+ status =
+ getValues(CONV2D_DIM,
+ iniparser_getstring(ini, (layer_name + ":padding").c_str(),
+ getValues({0, 0})),
+ (int *)size);
+
NN_INI_RETURN_STATUS();
status = conv2d_layer->setSize(size, Layer::PropertyType::padding);
NN_INI_RETURN_STATUS();
status = conv2d_layer->setFilter(
- iniparser_getint(ini, (layer_name + ":filter").c_str(), 0));
+ iniparser_getint(ini, (layer_name + ":filter").c_str(), 0));
NN_INI_RETURN_STATUS();
conv2d_layer->setBiasZero(b_zero);
conv2d_layer->setWeightInit((WeightIniType)parseType(
- iniparser_getstring(ini, (layer_name + ":WeightIni").c_str(),
- "xavier_uniform"),
- TOKEN_WEIGHTINI));
+ iniparser_getstring(ini, (layer_name + ":WeightIni").c_str(),
+ "xavier_uniform"),
+ TOKEN_WEIGHTINI));
status = parseWeightDecay(ini, layer_name, weight_decay);
NN_INI_RETURN_STATUS();
int idx = 0;
std::vector<std::reference_wrapper<Tensor>>::iterator w_iter, g_iter;
for (w_iter = weights.begin(), g_iter = gradients.begin();
- w_iter != weights.end(); ++w_iter, ++g_iter) {
+ w_iter != weights.end(); ++w_iter, ++g_iter) {
Tensor &x = *w_iter;
Tensor x_grad = *g_iter;
}
}
ml_logw("Input activation %s cannot be identified. "
- "Moved to NO activation layer by default.", ll.c_str());
+ "Moved to NO activation layer by default.",
+ ll.c_str());
ret = (unsigned int)ActiType::ACT_NONE;
break;
case TOKEN_LAYER:
* BatchNormalizationLayer has 0, 1, 5, 6, 7 properties.
*/
std::array<std::string, 18> property_string = {
- "input_shape", "bias_init_zero", "normalization", "standardization",
- "activation", "epsilon", "weight_decay", "weight_decay_lambda",
- "unit", "weight_ini", "filter", "kernel_size",
- "stride", "padding", "pooling_size", "pooling",
+ "input_shape", "bias_init_zero", "normalization", "standardization",
+ "activation", "epsilon", "weight_decay", "weight_decay_lambda",
+ "unit", "weight_ini", "filter", "kernel_size",
+ "stride", "padding", "pooling_size", "pooling",
"flatten", "unknown"};
for (i = 0; i < property_string.size(); i++) {
return status;
}
-const char* getValues(std::vector<int> values, const char* delimiter) {
+const char *getValues(std::vector<int> values, const char *delimiter) {
std::stringstream vec_str;
if (values.empty())
return "unknown";
std::copy(values.begin(), values.end() - 1,
- std::ostream_iterator<int>(vec_str, delimiter));
+ std::ostream_iterator<int>(vec_str, delimiter));
vec_str << values.back();
return std::move(vec_str.str().c_str());
Tensor &Tensor::operator=(const Tensor &rhs) {
using std::swap;
-
+
Tensor tmp(rhs);
swap(*this, tmp);
return *this;
throw std::runtime_error("cannot set value of non-contiguous tensor");
}
- getData()[batch * dim.getFeatureLen() +
- c * dim.height() * dim.width() + h * dim.width() + w] = value;
+ getData()[batch * dim.getFeatureLen() + c * dim.height() * dim.width() +
+ h * dim.width() + w] = value;
}
template <typename T> void Tensor::setDist(T dist) {
dim.height(d[0][0].size());
dim.width(d[0][0][0].size());
data = std::shared_ptr<float>(new float[dim.getDataLen()],
- std::default_delete<float[]>());
+ std::default_delete<float[]>());
is_contiguous = true;
for (unsigned int i = 0; i < dim.batch(); ++i)
#ifdef __cplusplus
#include "nntrainer_log.h"
-#include <tensor.h>
#include <fstream>
#include <gtest/gtest.h>
+#include <tensor.h>
#define tolerance 10e-5
* @brief return a tensor filled with contant value with dimension
*/
nntrainer::Tensor constant(float value, unsigned int batch, unsigned channel,
- unsigned height, unsigned width) ;
+ unsigned height, unsigned width);
/**
* @brief replace string and save in file
// chain and add_i(float) add_i(Tensor)
TEST_F(nntrainer_LazyTensorOpsTest, LazyTensorOps_05_p) {
expected = original.add(6.1);
- EXPECT_TRUE(target.chain().add_i(2.1).add_i(constant_(2.0), 2).run() == expected);
+ EXPECT_TRUE(target.chain().add_i(2.1).add_i(constant_(2.0), 2).run() ==
+ expected);
}
// chain and add_i(float) subtract(float)
TEST_F(nntrainer_LazyTensorOpsTest, ApplyIf_01_p) {
- EXPECT_TRUE(target.chain().applyIf(true, _LIFT(add_i), constant_(4.0), 0.5).run() ==
- original.add(2.0));
+ EXPECT_TRUE(
+ target.chain().applyIf(true, _LIFT(add_i), constant_(4.0), 0.5).run() ==
+ original.add(2.0));
EXPECT_TRUE(target.chain().applyIf(true, _LIFT(add_i), 2.0f).run() ==
- original.add(2.0));
+ original.add(2.0));
EXPECT_TRUE(target.chain().applyIf(true, _LIFT(add_i), 2.0).run() ==
- original.add(2.0));
-
+ original.add(2.0));
}
TEST_F(nntrainer_LazyTensorOpsTest, ApplyIf_01_n) {