}
nntrainer->num_push_data++;
- ml_logd("Received data (%" PRId64 "/%" PRId64 "(total))",
- nntrainer->num_push_data, nntrainer->total_num_samples);
+ ml_logd("Received data (%d/%d(total))", nntrainer->num_push_data,
+ nntrainer->total_num_samples);
if (nntrainer->total_num_samples < nntrainer->num_push_data) {
ml_logd("Already received all data required for the train, "
"but push_data is called");
return -1;
}
- ml_logd("number of inputs(%" PRId64 ") and labels(%" PRId64 ")",
- nntrainer->num_inputs, nntrainer->num_labels);
+ ml_logd("number of inputs(%d) and labels(%d)", nntrainer->num_inputs,
+ nntrainer->num_labels);
unsigned int idx = 0, i = 0;
i = data->queue_rear;
data->is_data_full_locked = FALSE;
}
- ml_logd("(pop/push: %" PRId64 "/%" PRId64 ")", data->pop_count,
- data->push_count);
- ml_logd("T-pushed: %" PRId64 "/%" PRId64 ", V-pushed:%" PRId64 "/%" PRId64
- "\n",
+ ml_logd("(pop/push: %d/%d)", data->pop_count, data->push_count);
+ ml_logd("T-pushed: %d/%d, V-pushed:%d/%d\n",
nntrainer->train_data->push_count, nntrainer->num_training_samples,
nntrainer->valid_data->push_count, nntrainer->num_validation_samples);
auto data = reinterpret_cast<NNTrainer::InputTensorsInfo *>(user_data);
ml_logd("<called>");
- ml_logd("(pop/push: %" PRId64 "/%" PRId64 ")", data->pop_count,
- data->push_count);
+ ml_logd("(pop/push: %d/%d)", data->pop_count, data->push_count);
pid_t pid = getpid();
pid_t tid = syscall(SYS_gettid);
ml_logd("<called>");
ml_logd("pid[%d], tid[%d]", pid, tid);
ml_logd("front:%d, rear:%d", data->queue_front, data->queue_rear);
- ml_logd("num_inputs: %" PRId64 ", num_labels: %" PRId64 "", data->num_inputs,
- data->num_labels);
+ ml_logd("num_inputs: %d, num_labels: %d", data->num_inputs, data->num_labels);
unsigned int i = 0;
unsigned int idx = data->queue_front;
ml_logd("Delete, queue_front: %d", idx);
for (i = 0; i < data->num_inputs; i++) {
- ml_logd("memcpy Addr %p, %p, size=%" PRId64 "\n", *(input + i),
+ ml_logd("memcpy Addr %p, %p, size=%d\n", *(input + i),
data->tensor_data[idx].inputs[i], data->input_size[i]);
std::memcpy(*(input + i), data->tensor_data[idx].inputs[i],
data->input_size[i]);
}
for (i = 0; i < data->num_labels; i++) {
- ml_logd("memcpy Addr %p, %p, size=%" PRId64 "", *(label + i),
+ ml_logd("memcpy Addr %p, %p, size=%d", *(label + i),
data->tensor_data[idx].labels[i], data->label_size[i]);
std::memcpy(*(label + i), data->tensor_data[idx].labels[i],
data->label_size[i]);
data->queue_count--;
data->queue_front = (data->queue_front + 1) % data->queue_size;
- ml_logd("(pop/push: %" PRId64 "/%" PRId64 ")", data->pop_count,
- data->push_count);
+ ml_logd("(pop/push: %d/%d)", data->pop_count, data->push_count);
if (data->pop_count < data->total_num_samples) { // train or valid num samples
*last = false;
ml_logd("<leave>");
}
-NNTrainer::InputTensorsInfo::InputTensorsInfo(int64_t _total_num_samples,
- int64_t _num_inputs,
- int64_t _num_labels,
- int64_t _tensors_size[]) :
+NNTrainer::InputTensorsInfo::InputTensorsInfo(unsigned int _total_num_samples,
+ unsigned int _num_inputs,
+ unsigned int _num_labels,
+ unsigned int _tensors_size[]) :
is_data_wait_locked(0),
is_data_full_locked(0),
queue_front(0),
num_labels(_num_labels) {
ml_logd("<called>");
- const int min_queue_size = 30;
+ const unsigned int min_queue_size = 30;
queue_size =
(_total_num_samples > min_queue_size) ? min_queue_size : _total_num_samples;
ml_logd("queue_size:%d", queue_size);
tensor_data.reserve(queue_size);
- int64_t idx = 0, i = 0;
+ unsigned int idx = 0, i = 0;
for (i = 0; i < num_inputs; i++) {
input_size[i] = _tensors_size[idx++];
- ml_logd("input_size[%" PRId64 "]=%" PRId64 "", i, input_size[i]);
+ ml_logd("input_size[%d]=%d", i, input_size[i]);
}
for (i = 0; i < num_labels; i++) {
label_size[i] = _tensors_size[idx++];
- ml_logd("label_size[%" PRId64 "]=%" PRId64 "", i, label_size[i]);
+ ml_logd("label_size[%d]=%d", i, label_size[i]);
}
unsigned int cur_queue_size = 0;
/* make queue */
while (cur_queue_size < queue_size) {
NNTrainer::TensorData t_data;
- int i = 0;
+ unsigned int i = 0;
char *p_data = nullptr;
for (i = 0; i < num_inputs; i++) {
p_data = new char[input_size[i]];
void NNTrainer::NNTrainerTrain::getNNStreamerProperties(
const GstTensorTrainerProperties *prop) {
- int64_t i;
+ unsigned int i;
ml_logd("<called>");
num_tensors = prop->input_meta.num_tensors;
- ml_logd("num_tensors: %" PRId64 "", num_tensors);
+ ml_logd("num_tensors: %d", num_tensors);
for (i = 0; i < num_tensors; i++) {
tensors_inputsize[i] = gst_tensor_info_get_size(&prop->input_meta.info[i]);
- ml_logd("tensors_inputsize[%" PRId64 "]:%" PRId64 "", i,
- tensors_inputsize[i]);
+ ml_logd("tensors_inputsize[%d]:%d", i, tensors_inputsize[i]);
}
- // for mnist test
-#if 0
- tensors_inputsize[1] = 40;
- // tensors_inputsize[0] = 3686400; // 3:640:480:1 float32, 3x640x480x1x4
- // tensors_inputsize[1] = 40; // 1:1:10:1 uint8, 1x1x10x1x4
- ml_logd("for Test: tensors_inputsize[1]:%d", tensors_inputsize[1]);
-#endif
+
num_inputs = prop->num_inputs;
num_labels = prop->num_labels;
num_training_samples = prop->num_training_samples;
total_num_samples =
(num_training_samples + num_validation_samples) * num_epochs;
- ml_logd("num_inputs: %" PRId64 "", num_inputs);
- ml_logd("num_labels: %" PRId64 "", num_labels);
- ml_logd("num_training_samples: %" PRId64 "", num_training_samples);
- ml_logd("num_validation_samples: %" PRId64 "", num_validation_samples);
- ml_logd("num_epochs: %" PRId64 "", num_epochs);
- ml_logd("Total number of data to be received: %" PRId64 "",
- total_num_samples);
+ ml_logd("num_inputs: %d", num_inputs);
+ ml_logd("num_labels: %d", num_labels);
+ ml_logd("num_training_samples: %d", num_training_samples);
+ ml_logd("num_validation_samples: %d", num_validation_samples);
+ ml_logd("num_epochs: %d", num_epochs);
+ ml_logd("Total number of data to be received: %d", total_num_samples);
ml_logd("model_config: %s", model_config.c_str());
ml_logd("model_save_path: %s", model_save_path.c_str());
ml_logd("<leave>");
ml_logd("<called>");
if (nntrainer)
nntrainer_model_destructor(fw, prop, private_data);
-
try {
nntrainer = new NNTrainer::NNTrainerTrain(prop, prop->model_config);
} catch (const std::exception &e) {
*private_data = nntrainer;
ml_logd("<leave>");
+
return 0;
}
const GstTensorTrainerProperties *prop,
void **private_data) {
ml_logd("<called>");
+
int status = nntrainer_model_construct_with_conf(fw, prop, private_data);
ml_logd("<leave>");
float training_loss, validation_loss;
bool is_training_complete;
- int64_t tensors_inputsize[NNS_TENSOR_SIZE_LIMIT];
- int64_t num_tensors; /**< The number of tensors in the received a sample */
- int64_t num_inputs; /**< The number of tensors used as input in the received a
- sample */
- int64_t num_labels; /**< The number of tensors used as label in the received a
- sample */
- int64_t num_training_samples; /**< The number of training samples to be taken
- for training model */
- int64_t num_validation_samples; /**< The number of validation samples to be
- taken for validation model */
- int64_t total_num_samples; /**< Total number of samples received for creating
- model */
- int64_t num_epochs; /**< The number of epoch */
- int64_t num_push_data; /**< The number of samples pushed by
- NNStreamer(tensor_trainer)*/
+ unsigned int tensors_inputsize[NNS_TENSOR_SIZE_LIMIT];
+ unsigned int
+ num_tensors; /**< The number of tensors in the received a sample */
+ unsigned int num_inputs; /**< The number of tensors used as input in the
+ received a sample */
+ unsigned int num_labels; /**< The number of tensors used as label in the
+ received a sample */
+ unsigned int num_training_samples; /**< The number of training samples to be
+ taken for training model */
+ unsigned int num_validation_samples; /**< The number of validation samples to
+ be taken for validation model */
+ unsigned int total_num_samples; /**< Total number of samples received for
+ creating model */
+ unsigned int num_epochs; /**< The number of epoch */
+ unsigned int num_push_data; /**< The number of samples pushed by
+ NNStreamer(tensor_trainer) */
std::string model_config;
std::string model_save_path; /**< Model is finally stored */
* sample
* @param _tensors_size[] size of each tensor in a sample
*/
- InputTensorsInfo(int64_t _total_num_samples, int64_t _num_inputs,
- int64_t _num_labels, int64_t _tensors_size[]);
+ InputTensorsInfo(unsigned int _total_num_samples, unsigned int _num_inputs,
+ unsigned int _num_labels, unsigned int _tensors_size[]);
/**
* @brief Destroy the InputTensorsInfo object
unsigned int queue_front;
unsigned int queue_rear;
unsigned int queue_count;
- int64_t push_count; /**< The number of samples pushed to queue by
- NNStreamer(tensor_trainer) */
- int64_t pop_count; /**< The number of pop from the queue for pushing samples
- to nntrainer */
- int64_t input_size[NNS_TENSOR_SIZE_LIMIT]; // feature size * data type
- int64_t label_size[NNS_TENSOR_SIZE_LIMIT];
- int64_t total_num_samples; /**< Total number of samples received for creating
- model */
- int64_t num_inputs; /**< The number of tensors in the received a sample */
- int64_t num_labels; /**< The number of tensors used as label in the received a
- sample */
+ unsigned int push_count; /**< The number of samples pushed to queue by
+ NNStreamer(tensor_trainer) */
+ unsigned int pop_count; /**< The number of pop from the queue for pushing
+ samples to nntrainer */
+ unsigned int
+ input_size[NNS_TENSOR_SIZE_LIMIT]; /**< feature size * data type */
+ unsigned int label_size[NNS_TENSOR_SIZE_LIMIT];
+ unsigned int total_num_samples; /**< Total number of samples received for
+ creating model */
+ unsigned int
+ num_inputs; /**< The number of tensors in the received a sample */
+ unsigned int num_labels; /**< The number of tensors used as label in the
+ received a sample */
std::vector<TensorData>
tensor_data; /**< Manage multiple inputs and labels data */