From: jijoong.moon Date: Fri, 24 Jul 2020 12:15:21 +0000 (+0900) Subject: [ Coverity ] Fix Coverity Issues X-Git-Tag: accepted/tizen/unified/20200728.135447^0 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=4f9fd5dbfe21f3a086f977d0122a5499fbd5cc20;p=platform%2Fcore%2Fml%2Fnntrainer.git [ Coverity ] Fix Coverity Issues This PR incluide Fixs of Coverity Issues. **Self evaluation:** 1. Build test: [X]Passed [ ]Failed [ ]Skipped 2. Run test: [X]Passed [ ]Failed [ ]Skipped Signed-off-by: jijoong.moon --- diff --git a/Applications/Classification/jni/main.cpp b/Applications/Classification/jni/main.cpp index ef8f324..889f20d 100644 --- a/Applications/Classification/jni/main.cpp +++ b/Applications/Classification/jni/main.cpp @@ -119,10 +119,6 @@ static int rangeRandom(int min, int max) { * @param[out] feature_input save output of tflite */ void getFeature(const string filename, vector &feature_input) { - int input_size; - int output_size; - std::vector output_idx_list; - std::vector input_idx_list; int input_dim[4]; int output_dim[4]; std::string model_path = "../../res/mobilenetv2.tflite"; @@ -134,23 +130,8 @@ void getFeature(const string filename, vector &feature_input) { std::unique_ptr interpreter; tflite::InterpreterBuilder(*model.get(), resolver)(&interpreter); - input_size = interpreter->inputs().size(); - output_size = interpreter->outputs().size(); - - int t_size = interpreter->tensors_size(); - - for (int i = 0; i < t_size; i++) { - for (int j = 0; j < input_size; j++) { - if (strncmp(interpreter->tensor(i)->name, interpreter->GetInputName(j), - sizeof(interpreter->tensor(i)->name)) == 0) - input_idx_list.push_back(i); - } - for (int j = 0; j < output_size; j++) { - if (strncmp(interpreter->tensor(i)->name, interpreter->GetOutputName(j), - sizeof(interpreter->tensor(i)->name)) == 0) - output_idx_list.push_back(i); - } - } + const std::vector &input_idx_list = interpreter->inputs(); + const std::vector &output_idx_list = interpreter->outputs(); for (int i = 0; i < 4; i++) { input_dim[i] = 1; @@ -450,7 +431,15 @@ int main(int argc, char *argv[]) { std::vector featureVector, resultVector; featureVector.resize(feature_size); getFeature(img, featureVector); - nntrainer::Tensor X = nntrainer::Tensor({featureVector}); + + nntrainer::Tensor X; + try { + X = nntrainer::Tensor({featureVector}); + } catch (...) { + std::cerr << "Error while construct tensor" << std::endl; + NN.finalize(); + return 0; + } cout << NN.forwarding(X, status).apply(stepFunction) << endl; } /** diff --git a/Applications/Classification/jni/main_func.cpp b/Applications/Classification/jni/main_func.cpp index b4b012f..65111ef 100644 --- a/Applications/Classification/jni/main_func.cpp +++ b/Applications/Classification/jni/main_func.cpp @@ -118,13 +118,14 @@ static int rangeRandom(int min, int max) { * @retval true/false false : end of data */ bool getData(std::ifstream &F, std::vector &outVec, - std::vector &outLabel, int id) { + std::vector &outLabel, uint64_t id) { F.clear(); F.seekg(0, std::ios_base::end); uint64_t file_length = F.tellg(); - uint64_t position = (feature_size + total_label_size) * id * sizeof(float); + uint64_t position = + (uint64_t)((feature_size + total_label_size) * id * sizeof(float)); - if (position > file_length || position > ULLONG_MAX) { + if (position > file_length) { return false; } F.seekg(position, std::ios::beg); @@ -296,11 +297,18 @@ int main(int argc, char *argv[]) { */ nntrainer::NeuralNetwork NN; NN.setConfig(config); - NN.loadFromConfig(); + try { + NN.loadFromConfig(); + } catch (...) { + std::cerr << "Error during loadFromConfig" << std::endl; + NN.finalize(); + return 0; + } try { NN.init(); } catch (...) { std::cerr << "Error during init" << std::endl; + NN.finalize(); return 0; } NN.readModel(); diff --git a/Applications/ReinforcementLearning/DeepQ/jni/main.cpp b/Applications/ReinforcementLearning/DeepQ/jni/main.cpp index 63a5be1..808da4e 100644 --- a/Applications/ReinforcementLearning/DeepQ/jni/main.cpp +++ b/Applications/ReinforcementLearning/DeepQ/jni/main.cpp @@ -281,10 +281,34 @@ int main(int argc, char **argv) { /** * @brief initialize mainNet & Target Net */ - mainNet.loadFromConfig(); - mainNet.init(); - targetNet.loadFromConfig(); - targetNet.init(); + try { + mainNet.loadFromConfig(); + } catch (...) { + std::cerr << "Error during loadFromConfig" << std::endl; + mainNet.finalize(); + return 0; + } + try { + mainNet.init(); + } catch (...) { + std::cerr << "Error during init" << std::endl; + mainNet.finalize(); + return 0; + } + try { + targetNet.loadFromConfig(); + } catch (...) { + std::cerr << "Error during loadFromConfig" << std::endl; + targetNet.finalize(); + return 0; + } + try { + targetNet.init(); + } catch (...) { + std::cerr << "Error during init" << std::endl; + targetNet.finalize(); + return 0; + } /** * @brief Read Model Data if any diff --git a/Applications/Tizen_CAPI/capi_file.c b/Applications/Tizen_CAPI/capi_file.c index 9f6c8a8..cb3185d 100644 --- a/Applications/Tizen_CAPI/capi_file.c +++ b/Applications/Tizen_CAPI/capi_file.c @@ -91,7 +91,7 @@ int main(int argc, char *argv[]) { NN_RETURN_STATUS(); /* compile model with cross entropy loss function */ - status = ml_train_model_compile(model, "loss=cross", NULL); + status = ml_train_model_compile(model, "loss=cross", "batch_size=32", NULL); NN_RETURN_STATUS(); /* create dataset */ @@ -110,8 +110,7 @@ int main(int argc, char *argv[]) { /* train model with data files : epochs = 10 and store model file named * "model.bin" */ - status = ml_train_model_run(model, "epochs=10", "batch_size=32", - "model_file=model.bin", NULL); + status = ml_train_model_run(model, "epochs=10", "model_file=model.bin", NULL); NN_RETURN_STATUS(); /* delete model */ diff --git a/Applications/Tizen_CAPI/capi_func.c b/Applications/Tizen_CAPI/capi_func.c index a643da0..2d8493d 100644 --- a/Applications/Tizen_CAPI/capi_func.c +++ b/Applications/Tizen_CAPI/capi_func.c @@ -72,7 +72,7 @@ static int range_random(int min, int max) { * @retval true/false false : end of data */ static bool get_data(const char *file_name, float *outVec, float *outLabel, - int id, int file_length) { + uint64_t id, int file_length) { uint64_t position; FILE *F; unsigned int i; @@ -136,6 +136,7 @@ int gen_data_train(float **outVec, float **outLabel, bool *last) { unsigned int data_size = 0; unsigned int i, j; FILE *file; + float *o, *l; const char *file_name = "trainingSet.dat"; @@ -185,10 +186,10 @@ int gen_data_train(float **outVec, float **outLabel, bool *last) { } } - for (i = 0; i < count; i++) { - float o[feature_size]; - float l[num_class]; + o = malloc(sizeof(float) * feature_size); + l = malloc(sizeof(float) * num_class); + for (i = 0; i < count; i++) { get_data(file_name, o, l, memI[i], file_size); for (j = 0; j < feature_size; ++j) @@ -197,6 +198,8 @@ int gen_data_train(float **outVec, float **outLabel, bool *last) { outLabel[0][i * num_class + j] = l[j]; } + free(o); + free(l); *last = false; return ML_ERROR_NONE; } @@ -215,6 +218,7 @@ int gen_data_val(float **outVec, float **outLabel, bool *last) { unsigned int count = 0; unsigned int data_size = 0; long file_size; + float *o, *l; const char *file_name = "trainingSet.dat"; @@ -255,10 +259,10 @@ int gen_data_val(float **outVec, float **outLabel, bool *last) { } } - for (i = 0; i < count; i++) { - float o[feature_size]; - float l[num_class]; + o = malloc(feature_size * sizeof(float)); + l = malloc(num_class * sizeof(float)); + for (i = 0; i < count; i++) { get_data(file_name, o, l, memI[i], file_size); for (j = 0; j < feature_size; ++j) @@ -268,6 +272,10 @@ int gen_data_val(float **outVec, float **outLabel, bool *last) { } *last = false; + + free(o); + free(l); + return ML_ERROR_NONE; } @@ -328,7 +336,7 @@ int main(int argc, char *argv[]) { NN_RETURN_STATUS(); /* compile model with cross entropy loss function */ - status = ml_train_model_compile(model, "loss=cross", NULL); + status = ml_train_model_compile(model, "loss=cross", "batch_size=32", NULL); NN_RETURN_STATUS(); /* create dataset */ @@ -346,12 +354,12 @@ int main(int argc, char *argv[]) { /* train model with data files : epochs = 10 and store model file named * "model.bin" */ - status = ml_train_model_run(model, "epochs=10", "batch_size=32", - "model_file=model.bin", NULL); + status = ml_train_model_run(model, "epochs=10", "model_file=model.bin", NULL); NN_RETURN_STATUS(); /* delete model */ status = ml_train_model_destroy(model); NN_RETURN_STATUS(); + return 0; } diff --git a/Applications/Training/jni/bitmap_helpers.cpp b/Applications/Training/jni/bitmap_helpers.cpp index 86659f5..ca7dc09 100644 --- a/Applications/Training/jni/bitmap_helpers.cpp +++ b/Applications/Training/jni/bitmap_helpers.cpp @@ -106,9 +106,15 @@ uint8_t *read_bmp(const std::string &input_bmp_name, int *width, int *height, // Decode image, allocating tensor once the image size is known uint8_t *output = new uint8_t[abs(*height) * *width * *channels]; + const uint8_t *bmp_pixels = &img_bytes[header_size]; - return decode_bmp(bmp_pixels, row_size, output, *width, abs(*height), - *channels, top_down); + + decode_bmp(bmp_pixels, row_size, output, *width, abs(*height), *channels, + top_down); + + delete (img_bytes); + + return output; } } // namespace label_image diff --git a/Applications/mnist/jni/main.cpp b/Applications/mnist/jni/main.cpp index 0d63a0d..68f6129 100644 --- a/Applications/mnist/jni/main.cpp +++ b/Applications/mnist/jni/main.cpp @@ -291,11 +291,19 @@ int main(int argc, char *argv[]) { */ nntrainer::NeuralNetwork NN; NN.setConfig(config); - NN.loadFromConfig(); + try { + NN.loadFromConfig(); + } catch (...) { + std::cerr << "Error during loadFromConfig" << std::endl; + NN.finalize(); + return 0; + } + try { NN.init(); } catch (...) { std::cerr << "Error during init" << std::endl; + NN.finalize(); return 0; } diff --git a/api/capi/src/nntrainer.cpp b/api/capi/src/nntrainer.cpp index 318065f..bae5156 100644 --- a/api/capi/src/nntrainer.cpp +++ b/api/capi/src/nntrainer.cpp @@ -496,12 +496,13 @@ int ml_train_layer_create(ml_train_layer_h *layer, ml_train_layer_type_e type) { delete nnlayer; ml_loge("Error: Unknown layer type"); status = ML_ERROR_INVALID_PARAMETER; - break; + return status; } } catch (std::bad_alloc &e) { ml_loge("Error: heap exception: %s", e.what()); status = ML_ERROR_OUT_OF_MEMORY; delete nnlayer; + return status; } nnlayer->in_use = false; diff --git a/nntrainer/include/bn_layer.h b/nntrainer/include/bn_layer.h index 5ddf158..6388918 100644 --- a/nntrainer/include/bn_layer.h +++ b/nntrainer/include/bn_layer.h @@ -50,6 +50,18 @@ public: ~BatchNormalizationLayer(){}; /** + * @brief Move constructor of Pooling 2D Layer. + * @param[in] BatchNormalization && + */ + BatchNormalizationLayer(BatchNormalizationLayer &&rhs) = default; + + /** + * @brief Move assignment operator. + * @parma[in] rhs BatchNormalizationLayer to be moved. + */ + BatchNormalizationLayer &operator=(BatchNormalizationLayer &&rhs) = default; + + /** * @brief forward propagation with input * @param[in] in Input Tensor from upper layer * @retval normalized input tensor using scaling factor diff --git a/nntrainer/include/conv2d_layer.h b/nntrainer/include/conv2d_layer.h index 0a4f13d..6b2e841 100644 --- a/nntrainer/include/conv2d_layer.h +++ b/nntrainer/include/conv2d_layer.h @@ -52,6 +52,18 @@ public: ~Conv2DLayer(){}; /** + * @brief Move constructor of Conv 2D Layer. + * @param[in] Conv2dLayer && + */ + Conv2DLayer(Conv2DLayer &&rhs) = default; + + /** + * @brief Move assignment operator. + * @parma[in] rhs Conv2DLayer to be moved. + */ + Conv2DLayer &operator=(Conv2DLayer &&rhs) = default; + + /** * @brief initialize layer * @param[in] last last layer * @retval #ML_ERROR_NONE Successful. diff --git a/nntrainer/include/fc_layer.h b/nntrainer/include/fc_layer.h index 7c10110..b438b11 100644 --- a/nntrainer/include/fc_layer.h +++ b/nntrainer/include/fc_layer.h @@ -42,6 +42,18 @@ public: ~FullyConnectedLayer(){}; /** + * @brief Move constructor of Pooling 2D Layer. + * @param[in] FullyConnected && + */ + FullyConnectedLayer(FullyConnectedLayer &&rhs) = default; + + /** + * @brief Move assignment operator. + * @parma[in] rhs FullyConnectedLayer to be moved. + */ + FullyConnectedLayer &operator=(FullyConnectedLayer &&rhs) = default; + + /** * @brief Read Weight & Bias Data from file * @param[in] file input stream file */ diff --git a/nntrainer/include/flatten_layer.h b/nntrainer/include/flatten_layer.h index 9b16eda..75f0ec1 100644 --- a/nntrainer/include/flatten_layer.h +++ b/nntrainer/include/flatten_layer.h @@ -41,6 +41,18 @@ public: ~FlattenLayer(){}; /** + * @brief Move constructor of FlattenLayer. + * @param[in] FlattenLayer && + */ + FlattenLayer(FlattenLayer &&rhs) = default; + + /** + * @brief Move assignment operator. + * @parma[in] rhs FlattenLayer to be moved. + */ + FlattenLayer &operator=(FlattenLayer &&rhs) = default; + + /** * @brief initialize layer * @param[in] last last layer * @retval #ML_ERROR_NONE Successful. diff --git a/nntrainer/include/input_layer.h b/nntrainer/include/input_layer.h index 59191e4..d34f516 100644 --- a/nntrainer/include/input_layer.h +++ b/nntrainer/include/input_layer.h @@ -52,6 +52,18 @@ public: ~InputLayer(){}; /** + * @brief Move constructor of Pooling 2D Layer. + * @param[in] Input && + */ + InputLayer(InputLayer &&rhs) = default; + + /** + * @brief Move assignment operator. + * @parma[in] rhs InputLayer to be moved. + */ + InputLayer &operator=(InputLayer &&rhs) = default; + + /** * @brief No Weight data for this Input Layer */ void read(std::ifstream &file){}; diff --git a/nntrainer/include/layer.h b/nntrainer/include/layer.h index 50186b7..ac8264e 100644 --- a/nntrainer/include/layer.h +++ b/nntrainer/include/layer.h @@ -155,6 +155,18 @@ public: virtual ~Layer(){}; /** + * @brief Move constructor of Layer. + * @param[in] Layer && + */ + Layer(Layer &&rhs) noexcept = default; + + /** + * @brief Move assignment operator. + * @parma[in] rhs Layer to be moved. + */ + virtual Layer &operator=(Layer &&rhs) = default; + + /** * @brief Forward Propation of neural Network * @param[in] in Input Tensor taken by upper layer * @retval Output Tensor diff --git a/nntrainer/include/optimizer.h b/nntrainer/include/optimizer.h index 1858fd4..498f467 100644 --- a/nntrainer/include/optimizer.h +++ b/nntrainer/include/optimizer.h @@ -97,12 +97,32 @@ public: */ Optimizer() : type(OptType::unknown), popt() {} + Optimizer(const OptType type, OptParam popt); + /** * @brief Destructor of Optimizer Class */ ~Optimizer() {} /** + * @brief copy assignment operator + * @parma[in] rhs Optimizer to be copied + */ + Optimizer &operator=(const Optimizer &rhs) = default; + + /** + * @brief Move constructor of Conv 2D Layer. + * @param[in] Conv2dLayer && + */ + Optimizer(Optimizer &&rhs) = default; + + /** + * @brief Move assignment operator. + * @parma[in] rhs Optimizer to be moved. + */ + Optimizer &operator=(Optimizer &&rhs) = default; + + /** * @brief set Optimizer Type * @param[in] t Optimizer type * @retval #ML_ERROR_NONE Successful. diff --git a/nntrainer/include/pooling2d_layer.h b/nntrainer/include/pooling2d_layer.h index fd9369e..3ae0113 100644 --- a/nntrainer/include/pooling2d_layer.h +++ b/nntrainer/include/pooling2d_layer.h @@ -57,6 +57,18 @@ public: ~Pooling2DLayer(){}; /** + * @brief Move constructor of Pooling 2D Layer. + * @param[in] Pooling2D && + */ + Pooling2DLayer(Pooling2DLayer &&rhs) = default; + + /** + * @brief Move assignment operator. + * @parma[in] rhs Pooling2DLayer to be moved. + */ + Pooling2DLayer &operator=(Pooling2DLayer &&rhs) = default; + + /** * @brief initialize layer * @param[in] last last layer * @retval #ML_ERROR_NONE Successful. diff --git a/nntrainer/include/tensor.h b/nntrainer/include/tensor.h index 20fee1c..9501824 100644 --- a/nntrainer/include/tensor.h +++ b/nntrainer/include/tensor.h @@ -523,13 +523,6 @@ public: int setDim(TensorDim d); /** - * @brief return if current tensor is contiguous, if not, you can't write - * on this tensor - * @retval bool is contigous - */ - const bool isContiguous() const noexcept { return is_contiguous; } - - /** * @brief return current stride of tensor. * @retval int[MAXDIM] strides */ diff --git a/nntrainer/include/tensor_dim.h b/nntrainer/include/tensor_dim.h index ebea783..439fb0b 100644 --- a/nntrainer/include/tensor_dim.h +++ b/nntrainer/include/tensor_dim.h @@ -44,7 +44,30 @@ public: len = b * feature_len; } + TensorDim(const TensorDim &rhs) : + TensorDim(rhs.batch(), rhs.channel(), rhs.height(), rhs.width()){}; + ~TensorDim(){}; + + /** + * @brief Move constructor of Conv 2D Layer. + * @param[in] Conv2dLayer && + */ + TensorDim(TensorDim &&rhs) noexcept = default; + + /** + * @brief Move assignment operator. + * @parma[in] rhs Optimizer to be moved. + */ + TensorDim &operator=(TensorDim &&rhs) noexcept; + + /** + * @brief swap variable of Conv2D Layer + * @parma[out] lhs Optimizer + * @parma[in] rhs Optimizer + */ + void swap(TensorDim &lhs, TensorDim &rhs) noexcept; + unsigned int batch() const { return dim[0]; }; unsigned int channel() const { return dim[1]; }; unsigned int height() const { return dim[2]; }; @@ -59,12 +82,12 @@ public: void width(unsigned int w) { setTensorDim(3, w); } const unsigned int *getDim() const { return dim; } - const unsigned int getNumDim() const { return MAXDIM; } + unsigned int getNumDim() const { return MAXDIM; } void setTensorDim(unsigned int idx, unsigned int value); int setTensorDim(std::string input_shape); - void operator=(const TensorDim &from); + TensorDim &operator=(const TensorDim &rhs); bool operator==(const TensorDim &rhs) const; bool operator!=(const TensorDim &rhs) const { return !(*this == rhs); } diff --git a/nntrainer/src/databuffer_file.cpp b/nntrainer/src/databuffer_file.cpp index ab483c1..84a726e 100644 --- a/nntrainer/src/databuffer_file.cpp +++ b/nntrainer/src/databuffer_file.cpp @@ -191,7 +191,7 @@ void DataBufferFromDataFile::updateData(BufferType type) { break; } - unsigned int I; + uint64_t I; std::vector mark; mark.resize(max_size); file.clear(); diff --git a/nntrainer/src/databuffer_func.cpp b/nntrainer/src/databuffer_func.cpp index d3ab490..01aeb6c 100644 --- a/nntrainer/src/databuffer_func.cpp +++ b/nntrainer/src/databuffer_func.cpp @@ -316,6 +316,8 @@ void DataBufferFromCallback::updateData(BufferType type) { } free(vec); free(veclabel); + free(vec_arr); + free(veclabel_arr); } int DataBufferFromCallback::setProperty(const PropertyType type, diff --git a/nntrainer/src/neuralnet.cpp b/nntrainer/src/neuralnet.cpp index e4cdf3a..0cfc198 100644 --- a/nntrainer/src/neuralnet.cpp +++ b/nntrainer/src/neuralnet.cpp @@ -233,7 +233,8 @@ int NeuralNetwork::loadFromConfig() { if (!sec_name) { ml_loge("Error: Unable to retrieve section names from ini."); - return ML_ERROR_INVALID_PARAMETER; + status = ML_ERROR_INVALID_PARAMETER; + NN_RETURN_STATUS(); } if (strncasecmp(network_str, sec_name, network_len) == 0) { @@ -979,7 +980,7 @@ static unsigned int getLayerFlag(ml_train_summary_type_e verbosity, /// no break intended case ML_TRAIN_SUMMARY_MODEL: - flag = + flag |= LayerPrintOption::PRINT_INST_INFO | LayerPrintOption::PRINT_SHAPE_INFO; break; diff --git a/nntrainer/src/optimizer.cpp b/nntrainer/src/optimizer.cpp index b953c8e..83dc703 100644 --- a/nntrainer/src/optimizer.cpp +++ b/nntrainer/src/optimizer.cpp @@ -31,6 +31,11 @@ namespace nntrainer { +Optimizer::Optimizer(const OptType t, const OptParam p) { + type = t; + popt = p; +} + int Optimizer::setType(OptType t) { int status = ML_ERROR_NONE; if (t == OptType::unknown) { diff --git a/nntrainer/src/tensor_dim.cpp b/nntrainer/src/tensor_dim.cpp index 376965a..e38ae90 100644 --- a/nntrainer/src/tensor_dim.cpp +++ b/nntrainer/src/tensor_dim.cpp @@ -22,6 +22,23 @@ namespace nntrainer { +TensorDim &TensorDim::operator=(const TensorDim &rhs) { + TensorDim tmp(rhs.batch(), rhs.channel(), rhs.height(), rhs.width()); + this->swap(*this, tmp); + return *this; +} + +TensorDim &TensorDim::operator=(TensorDim &&rhs) noexcept { + this->swap(*this, rhs); + return *this; +} + +void TensorDim::swap(TensorDim &lhs, TensorDim &rhs) noexcept { + std::swap(lhs.dim, rhs.dim); + std::swap(lhs.len, rhs.len); + std::swap(lhs.feature_len, rhs.feature_len); +} + void TensorDim::resetLen() { feature_len = dim[1] * dim[2] * dim[3]; len = dim[0] * feature_len; @@ -66,14 +83,6 @@ int TensorDim::setTensorDim(std::string input_shape) { return status; } -void TensorDim::operator=(const TensorDim &from) { - for (int i = 0; i < MAXDIM; ++i) { - this->dim[i] = from.dim[i]; - } - len = from.len; - feature_len = from.feature_len; -} - bool TensorDim::operator==(const TensorDim &rhs) const { for (int i = 0; i < MAXDIM; ++i) { if (this->dim[i] != rhs.dim[i]) { diff --git a/test/include/nntrainer_test_util.h b/test/include/nntrainer_test_util.h index 8c63eb3..713d736 100644 --- a/test/include/nntrainer_test_util.h +++ b/test/include/nntrainer_test_util.h @@ -157,7 +157,7 @@ protected: nntrainer::NeuralNetwork NN; private: - void erase_ini() { std::remove(getIniName().c_str()); } + void erase_ini() { std::remove((char *)(getIniName().c_str())); } int failAt; std::string name; std::vector sections; diff --git a/test/nntrainer_test_util.cpp b/test/nntrainer_test_util.cpp index 167ad4b..d3a4494 100644 --- a/test/nntrainer_test_util.cpp +++ b/test/nntrainer_test_util.cpp @@ -87,7 +87,7 @@ static int rangeRandom(int min, int max) { * @retval true/false false : end of data */ static bool getData(std::ifstream &F, std::vector &outVec, - std::vector &outLabel, int id) { + std::vector &outLabel, uint64_t id) { F.clear(); F.seekg(0, std::ios_base::end); uint64_t file_length = F.tellg(); diff --git a/test/tizen_capi/unittest_tizen_capi.cpp b/test/tizen_capi/unittest_tizen_capi.cpp index f83212b..b31b49f 100644 --- a/test/tizen_capi/unittest_tizen_capi.cpp +++ b/test/tizen_capi/unittest_tizen_capi.cpp @@ -759,6 +759,8 @@ TEST(nntrainer_capi_summary, summary_01_p) { status = ml_train_model_destroy(handle); EXPECT_EQ(status, ML_ERROR_NONE); + + free(sum); } /** diff --git a/test/tizen_capi/unittest_tizen_capi_dataset.cpp b/test/tizen_capi/unittest_tizen_capi_dataset.cpp index fd66cae..0570610 100644 --- a/test/tizen_capi/unittest_tizen_capi_dataset.cpp +++ b/test/tizen_capi/unittest_tizen_capi_dataset.cpp @@ -35,6 +35,9 @@ TEST(nntrainer_capi_dataset, create_destroy_02_n) { status = ml_train_dataset_create_with_file(&dataset, "nofile.txt", NULL, NULL); EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER); + + status = ml_train_dataset_destroy(dataset); + EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER); } /** @@ -78,6 +81,7 @@ TEST(nntrainer_capi_dataset, create_destroy_05_p) { status = ml_train_dataset_create_with_file(&dataset, "trainingSet.dat", NULL, NULL); EXPECT_EQ(status, ML_ERROR_NONE); + status = ml_train_dataset_destroy(dataset); EXPECT_EQ(status, ML_ERROR_NONE);