* @param[out] status for error handling
* @retval true/false
*/
-bool getMiniBatch_train(std::vector<std::vector<std::vector<float>>> &outVec,
- std::vector<std::vector<std::vector<float>>> &outLabel,
- int &status) {
+bool getMiniBatch_train(float *outVec, float *outLabel, int *status) {
std::vector<int> memI;
std::vector<int> memJ;
unsigned int count = 0;
}
for (unsigned int i = 0; i < count; i++) {
- std::vector<std::vector<float>> out;
- std::vector<std::vector<float>> outL;
std::vector<float> o;
std::vector<float> l;
getData(F, o, l, memI[i]);
- out.push_back(o);
- outL.push_back(l);
-
- outVec.push_back(out);
- outLabel.push_back(outL);
+ for (unsigned int j = 0; j < feature_size; ++j)
+ outVec[i * feature_size + j] = o[j];
+ for (unsigned int j = 0; j < total_label_size; ++j)
+ outLabel[i * total_label_size + j] = l[j];
}
F.close();
* @param[out] status for error handling
* @retval true/false false : end of data
*/
-bool getMiniBatch_val(std::vector<std::vector<std::vector<float>>> &outVec,
- std::vector<std::vector<std::vector<float>>> &outLabel,
- int &status) {
+bool getMiniBatch_val(float *outVec, float *outLabel, int *status) {
+
std::vector<int> memI;
std::vector<int> memJ;
unsigned int count = 0;
}
for (unsigned int i = 0; i < count; i++) {
- std::vector<std::vector<float>> out;
- std::vector<std::vector<float>> outL;
std::vector<float> o;
std::vector<float> l;
getData(F, o, l, memI[i]);
- out.push_back(o);
- outL.push_back(l);
-
- outVec.push_back(out);
- outLabel.push_back(outL);
+ for (unsigned int j = 0; j < feature_size; ++j)
+ outVec[i * feature_size + j] = o[j];
+ for (unsigned int j = 0; j < total_label_size; ++j)
+ outLabel[i * total_label_size + j] = l[j];
}
F.close();
* @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
*/
int setFunc(BufferType type,
- std::function<bool(vec_3d &, vec_3d &, int &)> func);
+ std::function<bool(float *, float *, int *)> func);
/**
* @brief Update Data Buffer ( it is for child thread )
* @retval true / false generate all data for this epoch
*
*/
- std::function<bool(vec_3d &, vec_3d &, int &)> callback_train;
- std::function<bool(vec_3d &, vec_3d &, int &)> callback_val;
- std::function<bool(vec_3d &, vec_3d &, int &)> callback_test;
+ std::function<bool(float *, float *, int *)> callback_train;
+ std::function<bool(float *, float *, int *)> callback_val;
+ std::function<bool(float *, float *, int *)> callback_test;
};
} // namespace nntrainer
#endif /* __cplusplus */
* @retval #ML_ERROR_NONE Successful.
* @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
*/
- int train(std::function<bool(vec_3d &, vec_3d &, int &)> function_train,
- std::function<bool(vec_3d &, vec_3d &, int &)> function_val,
- std::function<bool(vec_3d &, vec_3d &, int &)> function_test);
+ int train(std::function<bool(float *, float *, int *)> function_train,
+ std::function<bool(float *, float *, int *)> function_val,
+ std::function<bool(float *, float *, int *)> function_test);
/**
* @brief check neural network whether the hyper-parameters are set.
}
int DataBufferFromCallback::setFunc(
- BufferType type, std::function<bool(vec_3d &, vec_3d &, int &)> func) {
+ BufferType type, std::function<bool(float *, float *, int *)> func) {
int status = ML_ERROR_NONE;
switch (type) {
bool *running = NULL;
std::vector<std::vector<float>> *data = NULL;
std::vector<std::vector<float>> *datalabel = NULL;
- std::function<bool(vec_3d &, vec_3d &, int &)> callback;
+ std::function<bool(float *, float *, int *)> callback;
switch (type) {
case BUF_TRAIN: {
return;
}
+ float *vec =
+ (float *)malloc(sizeof(float) * input_dim.batch() * input_dim.channel() *
+ input_dim.height() * input_dim.width());
+ float *veclabel =
+ (float *)malloc(sizeof(float) * input_dim.batch() * class_num);
+
while ((*running)) {
if (buf_size - (*cur_size) > 0) {
- vec_3d vec;
- vec_3d veclabel;
-
- bool endflag = callback(vec, veclabel, status);
+ bool endflag = callback(vec, veclabel, &status);
if (!endflag)
break;
- if (vec.size() != veclabel.size()) {
- status = ML_ERROR_INVALID_PARAMETER;
- }
-
- for (unsigned int i = 0; i < vec.size(); ++i) {
+ for (unsigned int i = 0; i < input_dim.batch(); ++i) {
std::vector<float> v;
std::vector<float> vl;
- for (unsigned int j = 0; j < vec[i].size(); ++j) {
- for (unsigned int k = 0; k < vec[i][j].size(); ++k) {
- v.push_back(vec[i][j][k]);
+ unsigned int I =
+ i * input_dim.channel() * input_dim.height() * input_dim.width();
+ for (unsigned int j = 0; j < input_dim.channel(); ++j) {
+ unsigned int J = j * input_dim.height() * input_dim.width();
+ for (unsigned int k = 0; k < input_dim.height() * input_dim.width();
+ ++k) {
+ unsigned int K = I + J + k;
+ v.push_back(vec[K]);
}
}
- for (unsigned int j = 0; j < veclabel[i].size(); ++j) {
- for (unsigned int k = 0; k < veclabel[i][j].size(); ++k) {
- vl.push_back(veclabel[i][j][k]);
- }
+
+ I = i * class_num;
+ for (unsigned int j = 0; j < class_num; ++j) {
+ vl.push_back(veclabel[I + j]);
}
data_lock.lock();
}
}
}
+ free(vec);
+ free(veclabel);
}
} /* namespace nntrainer */
* @brief Run NeuralNetwork train
*/
int NeuralNetwork::train(
- std::function<bool(vec_3d &, vec_3d &, int &)> train_func,
- std::function<bool(vec_3d &, vec_3d &, int &)> val_func,
- std::function<bool(vec_3d &, vec_3d &, int &)> test_func) {
+ std::function<bool(float *, float *, int *)> train_func,
+ std::function<bool(float *, float *, int *)> val_func,
+ std::function<bool(float *, float *, int *)> test_func) {
int status = ML_ERROR_NONE;