This PR incluide Fixs of Coverity Issues.
**Self evaluation:**
1. Build test: [X]Passed [ ]Failed [ ]Skipped
2. Run test: [X]Passed [ ]Failed [ ]Skipped
Signed-off-by: jijoong.moon <jijoong.moon@samsung.com>
* @param[out] feature_input save output of tflite
*/
void getFeature(const string filename, vector<float> &feature_input) {
- int input_size;
- int output_size;
- std::vector<int> output_idx_list;
- std::vector<int> input_idx_list;
int input_dim[4];
int output_dim[4];
std::string model_path = "../../res/mobilenetv2.tflite";
std::unique_ptr<tflite::Interpreter> interpreter;
tflite::InterpreterBuilder(*model.get(), resolver)(&interpreter);
- input_size = interpreter->inputs().size();
- output_size = interpreter->outputs().size();
-
- int t_size = interpreter->tensors_size();
-
- for (int i = 0; i < t_size; i++) {
- for (int j = 0; j < input_size; j++) {
- if (strncmp(interpreter->tensor(i)->name, interpreter->GetInputName(j),
- sizeof(interpreter->tensor(i)->name)) == 0)
- input_idx_list.push_back(i);
- }
- for (int j = 0; j < output_size; j++) {
- if (strncmp(interpreter->tensor(i)->name, interpreter->GetOutputName(j),
- sizeof(interpreter->tensor(i)->name)) == 0)
- output_idx_list.push_back(i);
- }
- }
+ const std::vector<int> &input_idx_list = interpreter->inputs();
+ const std::vector<int> &output_idx_list = interpreter->outputs();
for (int i = 0; i < 4; i++) {
input_dim[i] = 1;
std::vector<float> featureVector, resultVector;
featureVector.resize(feature_size);
getFeature(img, featureVector);
- nntrainer::Tensor X = nntrainer::Tensor({featureVector});
+
+ nntrainer::Tensor X;
+ try {
+ X = nntrainer::Tensor({featureVector});
+ } catch (...) {
+ std::cerr << "Error while construct tensor" << std::endl;
+ NN.finalize();
+ return 0;
+ }
cout << NN.forwarding(X, status).apply(stepFunction) << endl;
}
/**
* @retval true/false false : end of data
*/
bool getData(std::ifstream &F, std::vector<float> &outVec,
- std::vector<float> &outLabel, int id) {
+ std::vector<float> &outLabel, uint64_t id) {
F.clear();
F.seekg(0, std::ios_base::end);
uint64_t file_length = F.tellg();
- uint64_t position = (feature_size + total_label_size) * id * sizeof(float);
+ uint64_t position =
+ (uint64_t)((feature_size + total_label_size) * id * sizeof(float));
- if (position > file_length || position > ULLONG_MAX) {
+ if (position > file_length) {
return false;
}
F.seekg(position, std::ios::beg);
*/
nntrainer::NeuralNetwork NN;
NN.setConfig(config);
- NN.loadFromConfig();
+ try {
+ NN.loadFromConfig();
+ } catch (...) {
+ std::cerr << "Error during loadFromConfig" << std::endl;
+ NN.finalize();
+ return 0;
+ }
try {
NN.init();
} catch (...) {
std::cerr << "Error during init" << std::endl;
+ NN.finalize();
return 0;
}
NN.readModel();
/**
* @brief initialize mainNet & Target Net
*/
- mainNet.loadFromConfig();
- mainNet.init();
- targetNet.loadFromConfig();
- targetNet.init();
+ try {
+ mainNet.loadFromConfig();
+ } catch (...) {
+ std::cerr << "Error during loadFromConfig" << std::endl;
+ mainNet.finalize();
+ return 0;
+ }
+ try {
+ mainNet.init();
+ } catch (...) {
+ std::cerr << "Error during init" << std::endl;
+ mainNet.finalize();
+ return 0;
+ }
+ try {
+ targetNet.loadFromConfig();
+ } catch (...) {
+ std::cerr << "Error during loadFromConfig" << std::endl;
+ targetNet.finalize();
+ return 0;
+ }
+ try {
+ targetNet.init();
+ } catch (...) {
+ std::cerr << "Error during init" << std::endl;
+ targetNet.finalize();
+ return 0;
+ }
/**
* @brief Read Model Data if any
NN_RETURN_STATUS();
/* compile model with cross entropy loss function */
- status = ml_train_model_compile(model, "loss=cross", NULL);
+ status = ml_train_model_compile(model, "loss=cross", "batch_size=32", NULL);
NN_RETURN_STATUS();
/* create dataset */
/* train model with data files : epochs = 10 and store model file named
* "model.bin" */
- status = ml_train_model_run(model, "epochs=10", "batch_size=32",
- "model_file=model.bin", NULL);
+ status = ml_train_model_run(model, "epochs=10", "model_file=model.bin", NULL);
NN_RETURN_STATUS();
/* delete model */
* @retval true/false false : end of data
*/
static bool get_data(const char *file_name, float *outVec, float *outLabel,
- int id, int file_length) {
+ uint64_t id, int file_length) {
uint64_t position;
FILE *F;
unsigned int i;
unsigned int data_size = 0;
unsigned int i, j;
FILE *file;
+ float *o, *l;
const char *file_name = "trainingSet.dat";
}
}
- for (i = 0; i < count; i++) {
- float o[feature_size];
- float l[num_class];
+ o = malloc(sizeof(float) * feature_size);
+ l = malloc(sizeof(float) * num_class);
+ for (i = 0; i < count; i++) {
get_data(file_name, o, l, memI[i], file_size);
for (j = 0; j < feature_size; ++j)
outLabel[0][i * num_class + j] = l[j];
}
+ free(o);
+ free(l);
*last = false;
return ML_ERROR_NONE;
}
unsigned int count = 0;
unsigned int data_size = 0;
long file_size;
+ float *o, *l;
const char *file_name = "trainingSet.dat";
}
}
- for (i = 0; i < count; i++) {
- float o[feature_size];
- float l[num_class];
+ o = malloc(feature_size * sizeof(float));
+ l = malloc(num_class * sizeof(float));
+ for (i = 0; i < count; i++) {
get_data(file_name, o, l, memI[i], file_size);
for (j = 0; j < feature_size; ++j)
}
*last = false;
+
+ free(o);
+ free(l);
+
return ML_ERROR_NONE;
}
NN_RETURN_STATUS();
/* compile model with cross entropy loss function */
- status = ml_train_model_compile(model, "loss=cross", NULL);
+ status = ml_train_model_compile(model, "loss=cross", "batch_size=32", NULL);
NN_RETURN_STATUS();
/* create dataset */
/* train model with data files : epochs = 10 and store model file named
* "model.bin" */
- status = ml_train_model_run(model, "epochs=10", "batch_size=32",
- "model_file=model.bin", NULL);
+ status = ml_train_model_run(model, "epochs=10", "model_file=model.bin", NULL);
NN_RETURN_STATUS();
/* delete model */
status = ml_train_model_destroy(model);
NN_RETURN_STATUS();
+
return 0;
}
// Decode image, allocating tensor once the image size is known
uint8_t *output = new uint8_t[abs(*height) * *width * *channels];
+
const uint8_t *bmp_pixels = &img_bytes[header_size];
- return decode_bmp(bmp_pixels, row_size, output, *width, abs(*height),
- *channels, top_down);
+
+ decode_bmp(bmp_pixels, row_size, output, *width, abs(*height), *channels,
+ top_down);
+
+ delete (img_bytes);
+
+ return output;
}
} // namespace label_image
*/
nntrainer::NeuralNetwork NN;
NN.setConfig(config);
- NN.loadFromConfig();
+ try {
+ NN.loadFromConfig();
+ } catch (...) {
+ std::cerr << "Error during loadFromConfig" << std::endl;
+ NN.finalize();
+ return 0;
+ }
+
try {
NN.init();
} catch (...) {
std::cerr << "Error during init" << std::endl;
+ NN.finalize();
return 0;
}
delete nnlayer;
ml_loge("Error: Unknown layer type");
status = ML_ERROR_INVALID_PARAMETER;
- break;
+ return status;
}
} catch (std::bad_alloc &e) {
ml_loge("Error: heap exception: %s", e.what());
status = ML_ERROR_OUT_OF_MEMORY;
delete nnlayer;
+ return status;
}
nnlayer->in_use = false;
~BatchNormalizationLayer(){};
/**
+ * @brief Move constructor of Pooling 2D Layer.
+ * @param[in] BatchNormalization &&
+ */
+ BatchNormalizationLayer(BatchNormalizationLayer &&rhs) = default;
+
+ /**
+ * @brief Move assignment operator.
+ * @parma[in] rhs BatchNormalizationLayer to be moved.
+ */
+ BatchNormalizationLayer &operator=(BatchNormalizationLayer &&rhs) = default;
+
+ /**
* @brief forward propagation with input
* @param[in] in Input Tensor from upper layer
* @retval normalized input tensor using scaling factor
~Conv2DLayer(){};
/**
+ * @brief Move constructor of Conv 2D Layer.
+ * @param[in] Conv2dLayer &&
+ */
+ Conv2DLayer(Conv2DLayer &&rhs) = default;
+
+ /**
+ * @brief Move assignment operator.
+ * @parma[in] rhs Conv2DLayer to be moved.
+ */
+ Conv2DLayer &operator=(Conv2DLayer &&rhs) = default;
+
+ /**
* @brief initialize layer
* @param[in] last last layer
* @retval #ML_ERROR_NONE Successful.
~FullyConnectedLayer(){};
/**
+ * @brief Move constructor of Pooling 2D Layer.
+ * @param[in] FullyConnected &&
+ */
+ FullyConnectedLayer(FullyConnectedLayer &&rhs) = default;
+
+ /**
+ * @brief Move assignment operator.
+ * @parma[in] rhs FullyConnectedLayer to be moved.
+ */
+ FullyConnectedLayer &operator=(FullyConnectedLayer &&rhs) = default;
+
+ /**
* @brief Read Weight & Bias Data from file
* @param[in] file input stream file
*/
~FlattenLayer(){};
/**
+ * @brief Move constructor of FlattenLayer.
+ * @param[in] FlattenLayer &&
+ */
+ FlattenLayer(FlattenLayer &&rhs) = default;
+
+ /**
+ * @brief Move assignment operator.
+ * @parma[in] rhs FlattenLayer to be moved.
+ */
+ FlattenLayer &operator=(FlattenLayer &&rhs) = default;
+
+ /**
* @brief initialize layer
* @param[in] last last layer
* @retval #ML_ERROR_NONE Successful.
~InputLayer(){};
/**
+ * @brief Move constructor of Pooling 2D Layer.
+ * @param[in] Input &&
+ */
+ InputLayer(InputLayer &&rhs) = default;
+
+ /**
+ * @brief Move assignment operator.
+ * @parma[in] rhs InputLayer to be moved.
+ */
+ InputLayer &operator=(InputLayer &&rhs) = default;
+
+ /**
* @brief No Weight data for this Input Layer
*/
void read(std::ifstream &file){};
virtual ~Layer(){};
/**
+ * @brief Move constructor of Layer.
+ * @param[in] Layer &&
+ */
+ Layer(Layer &&rhs) noexcept = default;
+
+ /**
+ * @brief Move assignment operator.
+ * @parma[in] rhs Layer to be moved.
+ */
+ virtual Layer &operator=(Layer &&rhs) = default;
+
+ /**
* @brief Forward Propation of neural Network
* @param[in] in Input Tensor taken by upper layer
* @retval Output Tensor
*/
Optimizer() : type(OptType::unknown), popt() {}
+ Optimizer(const OptType type, OptParam popt);
+
/**
* @brief Destructor of Optimizer Class
*/
~Optimizer() {}
/**
+ * @brief copy assignment operator
+ * @parma[in] rhs Optimizer to be copied
+ */
+ Optimizer &operator=(const Optimizer &rhs) = default;
+
+ /**
+ * @brief Move constructor of Conv 2D Layer.
+ * @param[in] Conv2dLayer &&
+ */
+ Optimizer(Optimizer &&rhs) = default;
+
+ /**
+ * @brief Move assignment operator.
+ * @parma[in] rhs Optimizer to be moved.
+ */
+ Optimizer &operator=(Optimizer &&rhs) = default;
+
+ /**
* @brief set Optimizer Type
* @param[in] t Optimizer type
* @retval #ML_ERROR_NONE Successful.
~Pooling2DLayer(){};
/**
+ * @brief Move constructor of Pooling 2D Layer.
+ * @param[in] Pooling2D &&
+ */
+ Pooling2DLayer(Pooling2DLayer &&rhs) = default;
+
+ /**
+ * @brief Move assignment operator.
+ * @parma[in] rhs Pooling2DLayer to be moved.
+ */
+ Pooling2DLayer &operator=(Pooling2DLayer &&rhs) = default;
+
+ /**
* @brief initialize layer
* @param[in] last last layer
* @retval #ML_ERROR_NONE Successful.
int setDim(TensorDim d);
/**
- * @brief return if current tensor is contiguous, if not, you can't write
- * on this tensor
- * @retval bool is contigous
- */
- const bool isContiguous() const noexcept { return is_contiguous; }
-
- /**
* @brief return current stride of tensor.
* @retval int[MAXDIM] strides
*/
len = b * feature_len;
}
+ TensorDim(const TensorDim &rhs) :
+ TensorDim(rhs.batch(), rhs.channel(), rhs.height(), rhs.width()){};
+
~TensorDim(){};
+
+ /**
+ * @brief Move constructor of Conv 2D Layer.
+ * @param[in] Conv2dLayer &&
+ */
+ TensorDim(TensorDim &&rhs) noexcept = default;
+
+ /**
+ * @brief Move assignment operator.
+ * @parma[in] rhs Optimizer to be moved.
+ */
+ TensorDim &operator=(TensorDim &&rhs) noexcept;
+
+ /**
+ * @brief swap variable of Conv2D Layer
+ * @parma[out] lhs Optimizer
+ * @parma[in] rhs Optimizer
+ */
+ void swap(TensorDim &lhs, TensorDim &rhs) noexcept;
+
unsigned int batch() const { return dim[0]; };
unsigned int channel() const { return dim[1]; };
unsigned int height() const { return dim[2]; };
void width(unsigned int w) { setTensorDim(3, w); }
const unsigned int *getDim() const { return dim; }
- const unsigned int getNumDim() const { return MAXDIM; }
+ unsigned int getNumDim() const { return MAXDIM; }
void setTensorDim(unsigned int idx, unsigned int value);
int setTensorDim(std::string input_shape);
- void operator=(const TensorDim &from);
+ TensorDim &operator=(const TensorDim &rhs);
bool operator==(const TensorDim &rhs) const;
bool operator!=(const TensorDim &rhs) const { return !(*this == rhs); }
break;
}
- unsigned int I;
+ uint64_t I;
std::vector<unsigned int> mark;
mark.resize(max_size);
file.clear();
}
free(vec);
free(veclabel);
+ free(vec_arr);
+ free(veclabel_arr);
}
int DataBufferFromCallback::setProperty(const PropertyType type,
if (!sec_name) {
ml_loge("Error: Unable to retrieve section names from ini.");
- return ML_ERROR_INVALID_PARAMETER;
+ status = ML_ERROR_INVALID_PARAMETER;
+ NN_RETURN_STATUS();
}
if (strncasecmp(network_str, sec_name, network_len) == 0) {
/// no break intended
case ML_TRAIN_SUMMARY_MODEL:
- flag =
+ flag |=
LayerPrintOption::PRINT_INST_INFO | LayerPrintOption::PRINT_SHAPE_INFO;
break;
namespace nntrainer {
+Optimizer::Optimizer(const OptType t, const OptParam p) {
+ type = t;
+ popt = p;
+}
+
int Optimizer::setType(OptType t) {
int status = ML_ERROR_NONE;
if (t == OptType::unknown) {
namespace nntrainer {
+TensorDim &TensorDim::operator=(const TensorDim &rhs) {
+ TensorDim tmp(rhs.batch(), rhs.channel(), rhs.height(), rhs.width());
+ this->swap(*this, tmp);
+ return *this;
+}
+
+TensorDim &TensorDim::operator=(TensorDim &&rhs) noexcept {
+ this->swap(*this, rhs);
+ return *this;
+}
+
+void TensorDim::swap(TensorDim &lhs, TensorDim &rhs) noexcept {
+ std::swap(lhs.dim, rhs.dim);
+ std::swap(lhs.len, rhs.len);
+ std::swap(lhs.feature_len, rhs.feature_len);
+}
+
void TensorDim::resetLen() {
feature_len = dim[1] * dim[2] * dim[3];
len = dim[0] * feature_len;
return status;
}
-void TensorDim::operator=(const TensorDim &from) {
- for (int i = 0; i < MAXDIM; ++i) {
- this->dim[i] = from.dim[i];
- }
- len = from.len;
- feature_len = from.feature_len;
-}
-
bool TensorDim::operator==(const TensorDim &rhs) const {
for (int i = 0; i < MAXDIM; ++i) {
if (this->dim[i] != rhs.dim[i]) {
nntrainer::NeuralNetwork NN;
private:
- void erase_ini() { std::remove(getIniName().c_str()); }
+ void erase_ini() { std::remove((char *)(getIniName().c_str())); }
int failAt;
std::string name;
std::vector<IniSection> sections;
* @retval true/false false : end of data
*/
static bool getData(std::ifstream &F, std::vector<float> &outVec,
- std::vector<float> &outLabel, int id) {
+ std::vector<float> &outLabel, uint64_t id) {
F.clear();
F.seekg(0, std::ios_base::end);
uint64_t file_length = F.tellg();
status = ml_train_model_destroy(handle);
EXPECT_EQ(status, ML_ERROR_NONE);
+
+ free(sum);
}
/**
status =
ml_train_dataset_create_with_file(&dataset, "nofile.txt", NULL, NULL);
EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER);
+
+ status = ml_train_dataset_destroy(dataset);
+ EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER);
}
/**
status =
ml_train_dataset_create_with_file(&dataset, "trainingSet.dat", NULL, NULL);
EXPECT_EQ(status, ML_ERROR_NONE);
+
status = ml_train_dataset_destroy(dataset);
EXPECT_EQ(status, ML_ERROR_NONE);