. Add Lock to set data ready flag.
. change order to set the condition
. Fix undeterministic behavior of databuffer
**Self evaluation:**
1. Build test: [X]Passed [ ]Failed [ ]Skipped
2. Run test: [X]Passed [ ]Failed [ ]Skipped
Signed-off-by: jijoong.moon <jijoong.moon@samsung.com>
int DataBuffer::clear(BufferType type) {
int status = ML_ERROR_NONE;
+ NN_EXCEPTION_NOTI(DATA_NOT_READY);
switch (type) {
case BUF_TRAIN: {
train_running = false;
this->train_data_label.clear();
this->cur_train_bufsize = 0;
this->rest_train = max_train;
- trainReadyFlag = DATA_NOT_READY;
} break;
case BUF_VAL: {
val_running = false;
this->val_data_label.clear();
this->cur_val_bufsize = 0;
this->rest_val = max_val;
- valReadyFlag = DATA_NOT_READY;
} break;
case BUF_TEST: {
test_running = false;
this->test_data_label.clear();
this->cur_test_bufsize = 0;
this->rest_test = max_test;
- testReadyFlag = DATA_NOT_READY;
} break;
default:
ml_loge("Error: Not Supported Data Type");
switch (type) {
case BUF_TRAIN: {
std::vector<int> list;
- std::unique_lock<std::mutex> ultrain(readyTrainData);
- cv_train.wait(ultrain, [this]() -> int { return trainReadyFlag; });
-
- if (train_data.size() < batch_size || trainReadyFlag == DATA_ERROR ||
- trainReadyFlag == DATA_END) {
- return false;
+ while (true) {
+ std::unique_lock<std::mutex> ultrain(readyTrainData);
+ cv_train.wait(ultrain, [this]() -> bool { return trainReadyFlag; });
+ if (trainReadyFlag == DATA_ERROR || trainReadyFlag == DATA_END) {
+ return false;
+ }
+ if (trainReadyFlag == DATA_READY && train_data.size() != 0) {
+ break;
+ }
}
for (k = 0; k < batch_size; ++k) {
} break;
case BUF_VAL: {
std::vector<int> list;
- std::unique_lock<std::mutex> ulval(readyValData);
- cv_val.wait(ulval, [this]() -> bool { return valReadyFlag; });
- if (val_data.size() < batch_size || valReadyFlag == DATA_ERROR ||
- valReadyFlag == DATA_END) {
- return false;
+ while (true) {
+ std::unique_lock<std::mutex> ulval(readyValData);
+ cv_val.wait(ulval, [this]() -> bool { return valReadyFlag; });
+ if (valReadyFlag == DATA_ERROR || valReadyFlag == DATA_END) {
+ return false;
+ }
+ if (valReadyFlag == DATA_READY && val_data.size() != 0) {
+ break;
+ }
}
for (k = 0; k < batch_size; ++k) {
} break;
case BUF_TEST: {
std::vector<int> list;
- std::unique_lock<std::mutex> ultest(readyTestData);
- cv_test.wait(ultest, [this]() -> bool { return testReadyFlag; });
+ while (true) {
+ std::unique_lock<std::mutex> ultest(readyTestData);
+ cv_test.wait(ultest, [this]() -> bool { return testReadyFlag; });
- if (test_data.size() < batch_size || testReadyFlag == DATA_ERROR ||
- testReadyFlag == DATA_END) {
- return false;
+ if (testReadyFlag == DATA_ERROR || testReadyFlag == DATA_END) {
+ NN_EXCEPTION_NOTI(DATA_END);
+ return false;
+ }
+ if (testReadyFlag == DATA_READY && test_data.size() != 0) {
+ break;
+ }
}
for (k = 0; k < batch_size; ++k) {
return false;
break;
}
-
data_lock.unlock();
return true;
this->cur_val_bufsize = 0;
this->cur_test_bufsize = 0;
+ readyTrainData.lock();
trainReadyFlag = DATA_NOT_READY;
+ readyTrainData.unlock();
+ readyValData.lock();
valReadyFlag = DATA_NOT_READY;
+ readyValData.unlock();
+ readyTestData.lock();
testReadyFlag = DATA_NOT_READY;
-
+ readyTestData.unlock();
return ML_ERROR_NONE;
}
}
while ((*running)) {
+
+ readyTrainData.lock();
+ trainReadyFlag = DATA_NOT_READY;
+ readyTrainData.unlock();
+ readyValData.lock();
+ valReadyFlag = DATA_NOT_READY;
+ readyValData.unlock();
+ readyTestData.lock();
+ testReadyFlag = DATA_NOT_READY;
+ readyTestData.unlock();
+
if (mark.size() == 0) {
NN_EXCEPTION_NOTI(DATA_END);
break;
}
- trainReadyFlag = DATA_NOT_READY;
- valReadyFlag = DATA_NOT_READY;
- testReadyFlag = DATA_NOT_READY;
+
if (buf_size - (*cur_size) > 0 && (*rest_size) > 0) {
std::vector<float> vec;
std::vector<float> veclabel;
veclabel_arr[0] = veclabel;
while ((*running)) {
- trainReadyFlag = DATA_NOT_READY;
- valReadyFlag = DATA_NOT_READY;
- testReadyFlag = DATA_NOT_READY;
+ endflag = false;
+ NN_EXCEPTION_NOTI(DATA_NOT_READY);
if (buf_size - (*cur_size) > 0) {
/** @todo Update to support multiple inputs later */
status = callback(vec_arr, veclabel_arr, &endflag, NULL);
-
- if (status == ML_ERROR_NONE && !endflag) {
- for (unsigned int i = 0; i < input_dim.batch(); ++i) {
- std::vector<float> v;
- std::vector<float> vl;
- unsigned int I =
- i * input_dim.channel() * input_dim.height() * input_dim.width();
- for (unsigned int j = 0; j < input_dim.channel(); ++j) {
- unsigned int J = j * input_dim.height() * input_dim.width();
- for (unsigned int k = 0; k < input_dim.height() * input_dim.width();
- ++k) {
- unsigned int K = I + J + k;
- v.push_back(vec[K]);
- }
- }
-
- I = i * class_num;
- for (unsigned int j = 0; j < class_num; ++j) {
- vl.push_back(veclabel[I + j]);
- }
-
- data_lock.lock();
- data->push_back(v);
- datalabel->push_back(vl);
- (*cur_size)++;
- data_lock.unlock();
- }
+ if (endflag) {
+ NN_EXCEPTION_NOTI(DATA_END);
+ free(vec);
+ free(veclabel);
+ free(vec_arr);
+ free(veclabel_arr);
+ return;
+ }
+ if (status != ML_ERROR_NONE) {
+ NN_EXCEPTION_NOTI(DATA_ERROR);
+ free(vec);
+ free(veclabel);
+ free(vec_arr);
+ free(veclabel_arr);
+ return;
}
- }
-
- if (buf_size == (*cur_size) || endflag) {
- switch (type) {
- case BUF_TRAIN: {
- std::lock_guard<std::mutex> lgtrain(readyTrainData);
- if (status != ML_ERROR_NONE) {
- trainReadyFlag = DATA_ERROR;
- cv_train.notify_all();
- free(vec);
- free(veclabel);
- free(vec_arr);
- free(veclabel_arr);
- return;
- } else if (endflag) {
- trainReadyFlag = DATA_END;
- cv_train.notify_all();
- free(vec);
- free(veclabel);
- free(vec_arr);
- free(veclabel_arr);
- return;
- } else {
- trainReadyFlag = DATA_READY;
- cv_train.notify_all();
- }
- } break;
- case BUF_VAL: {
- std::lock_guard<std::mutex> lgval(readyValData);
- if (status != ML_ERROR_NONE) {
- valReadyFlag = DATA_ERROR;
- cv_val.notify_all();
- free(vec);
- free(veclabel);
- free(vec_arr);
- free(veclabel_arr);
- return;
- } else if (endflag) {
- valReadyFlag = DATA_END;
- cv_val.notify_all();
- free(vec);
- free(veclabel);
- free(vec_arr);
- free(veclabel_arr);
- return;
- } else {
- valReadyFlag = DATA_READY;
- cv_val.notify_all();
+ for (unsigned int i = 0; i < input_dim.batch(); ++i) {
+ std::vector<float> v;
+ std::vector<float> vl;
+ unsigned int I =
+ i * input_dim.channel() * input_dim.height() * input_dim.width();
+ for (unsigned int j = 0; j < input_dim.channel(); ++j) {
+ unsigned int J = j * input_dim.height() * input_dim.width();
+ for (unsigned int k = 0; k < input_dim.height() * input_dim.width();
+ ++k) {
+ unsigned int K = I + J + k;
+ v.push_back(vec[K]);
+ }
}
- } break;
- case BUF_TEST: {
- std::lock_guard<std::mutex> lgtest(readyTestData);
- if (status != ML_ERROR_NONE) {
- testReadyFlag = DATA_ERROR;
- cv_test.notify_all();
- free(vec);
- free(veclabel);
- free(vec_arr);
- free(veclabel_arr);
- return;
- } else if (endflag) {
- testReadyFlag = DATA_END;
- cv_test.notify_all();
- free(vec);
- free(veclabel);
- free(vec_arr);
- free(veclabel_arr);
- return;
- } else {
- testReadyFlag = DATA_READY;
- cv_test.notify_all();
+ I = i * class_num;
+ for (unsigned int j = 0; j < class_num; ++j) {
+ vl.push_back(veclabel[I + j]);
}
- } break;
- default:
- break;
+ data_lock.lock();
+ data->push_back(v);
+ datalabel->push_back(vl);
+ (*cur_size)++;
+ data_lock.unlock();
}
}
+ if (buf_size == (*cur_size)) {
+ NN_EXCEPTION_NOTI(DATA_READY);
+ }
}
+
free(vec);
free(veclabel);
free(vec_arr);