From 26d5def1f7c800ce5d3f0c5e9741b7cce72c8eec Mon Sep 17 00:00:00 2001 From: Seungbaek Hong Date: Wed, 27 Mar 2024 18:44:06 +0900 Subject: [PATCH] [svace] fix svace issues fixed all svace issues on main branch **Self-evaluation:** 1. Build test: [X]Passed [ ]Failed [ ]Skipped 2. Run test: [X]Passed [ ]Failed [ ]Skipped Change-Id: I507379b2ee5f4d15c306408efb56347afaba23ba Signed-off-by: Seungbaek Hong --- Applications/utils/jni/bitmap_helpers.cpp | 4 +++- nntrainer/dataset/raw_file_data_producer.cpp | 8 +++++--- nntrainer/layers/gru.cpp | 2 ++ nntrainer/layers/lstm.cpp | 2 ++ nntrainer/layers/rnn.cpp | 2 ++ nntrainer/utils/util_func.cpp | 3 ++- 6 files changed, 16 insertions(+), 5 deletions(-) diff --git a/Applications/utils/jni/bitmap_helpers.cpp b/Applications/utils/jni/bitmap_helpers.cpp index ba01344..0fc64ac 100644 --- a/Applications/utils/jni/bitmap_helpers.cpp +++ b/Applications/utils/jni/bitmap_helpers.cpp @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. @file bitmat_helpers.cpp @brief bitmap_helpers from tensorflow +@author TensorFlow Authors +@bug there are no known bugs ==============================================================================*/ @@ -89,7 +91,7 @@ uint8_t *read_bmp(const std::string &input_bmp_name, int *width, int *height, const uint8_t *img_bytes = new uint8_t[len]; file.seekg(0, std::ios::beg); - file.read((char *)img_bytes, len); + file.read((char *)img_bytes, static_cast(len)); const int32_t header_size = *(reinterpret_cast(img_bytes + 10)); *width = *(reinterpret_cast(img_bytes + 18)); diff --git a/nntrainer/dataset/raw_file_data_producer.cpp b/nntrainer/dataset/raw_file_data_producer.cpp index e8ce12e..5a51139 100644 --- a/nntrainer/dataset/raw_file_data_producer.cpp +++ b/nntrainer/dataset/raw_file_data_producer.cpp @@ -67,8 +67,9 @@ RawFileDataProducer::finalize(const std::vector &input_dims, std::vector &labels) { NNTR_THROW_IF(idx >= sz, std::range_error) << "given index is out of bound, index: " << idx << " size: " << sz; - file.seekg(idx * sample_size * RawFileDataProducer::pixel_size, - std::ios_base::beg); + std::streamoff offset = static_cast( + idx * sample_size * RawFileDataProducer::pixel_size); + file.seekg(offset, std::ios_base::beg); for (auto &input : inputs) { input.read(file); } @@ -107,7 +108,8 @@ RawFileDataProducer::size(const std::vector &input_dims, // << " Given file does not align with the given sample size, sample size: " // << sample_size << " file_size: " << file_size; - return file_size / (sample_size * RawFileDataProducer::pixel_size); + return static_cast(file_size) / + (sample_size * RawFileDataProducer::pixel_size); } void RawFileDataProducer::exportTo( diff --git a/nntrainer/layers/gru.cpp b/nntrainer/layers/gru.cpp index 8f68cb5..1b90247 100644 --- a/nntrainer/layers/gru.cpp +++ b/nntrainer/layers/gru.cpp @@ -94,6 +94,8 @@ void GRULayer::finalize(InitLayerContext &context) { const TensorDim &input_dim = context.getInputDimensions()[0]; const unsigned int batch_size = input_dim.batch(); const unsigned int max_timestep = input_dim.height(); + NNTR_THROW_IF(max_timestep < 1, std::runtime_error) + << "max timestep must be greator than 0 in gru layer."; const unsigned int feature_size = input_dim.width(); // if return_sequences == False : diff --git a/nntrainer/layers/lstm.cpp b/nntrainer/layers/lstm.cpp index bc3d750..79a3a28 100644 --- a/nntrainer/layers/lstm.cpp +++ b/nntrainer/layers/lstm.cpp @@ -424,6 +424,8 @@ void LSTMLayer::finalize(InitLayerContext &context) { if (!std::get(lstm_props).empty()) max_timestep = std::max(max_timestep, std::get(lstm_props).get()); + NNTR_THROW_IF(max_timestep < 1, std::runtime_error) + << "max timestep must be greator than 0 in lstm layer."; std::get(lstm_props).set(max_timestep); const unsigned int feature_size = input_dim.width(); diff --git a/nntrainer/layers/rnn.cpp b/nntrainer/layers/rnn.cpp index 8ac74bd..e5fb70a 100644 --- a/nntrainer/layers/rnn.cpp +++ b/nntrainer/layers/rnn.cpp @@ -77,6 +77,8 @@ void RNNLayer::finalize(InitLayerContext &context) { const TensorDim &input_dim = context.getInputDimensions()[SINGLE_INOUT_IDX]; const unsigned int batch_size = input_dim.batch(); const unsigned int max_timestep = input_dim.height(); + NNTR_THROW_IF(max_timestep < 1, std::runtime_error) + << "max timestep must be greator than 0 in rnn layer."; const unsigned int feature_size = input_dim.width(); // output_dim = [ batch, 1, (return_sequences ? time_iteration : 1), unit ] diff --git a/nntrainer/utils/util_func.cpp b/nntrainer/utils/util_func.cpp index fe212a4..207a810 100644 --- a/nntrainer/utils/util_func.cpp +++ b/nntrainer/utils/util_func.cpp @@ -214,7 +214,8 @@ char *getRealpath(const char *name, char *resolved) { #ifdef _WIN32 return _fullpath(resolved, name, MAX_PATH_LENGTH); #else - return realpath(name, resolved); + resolved = realpath(name, nullptr); + return resolved; #endif } -- 2.7.4