From: Donghyeon Jeong Date: Tue, 16 Jul 2024 00:34:06 +0000 (+0900) Subject: [bugfix] Resolves Android build warnings X-Git-Tag: accepted/tizen/7.0/unified/20240830.164841~64 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=9d1cd59268aeefbf8e489bd06bb0b6507ce3cc65;p=platform%2Fcore%2Fml%2Fnntrainer.git [bugfix] Resolves Android build warnings This PR resolves warnings that occur during the Android build. The list is as follows. **Changes proposed in this PR:** - Resolves explicitly defaulted function is implicitly deleted. - Fix function that overrides virtual functions but is not marked override. - Resolves clang warning on expression side effects. **Self-evaluation:** 1. Build test: [X]Passed [ ]Failed [ ]Skipped 2. Run test: [X]Passed [ ]Failed [ ]Skipped Signed-off-by: Donghyeon Jeong --- diff --git a/nntrainer/layers/layer_node.h b/nntrainer/layers/layer_node.h index f3733866..55b24f44 100644 --- a/nntrainer/layers/layer_node.h +++ b/nntrainer/layers/layer_node.h @@ -384,7 +384,7 @@ public: * * @return boolean true if trainable, else false */ - bool getTrainable() const; + bool getTrainable() const override; /** * @brief get if the output of this layer must be flatten diff --git a/nntrainer/layers/lstm.h b/nntrainer/layers/lstm.h index f35fdf88..21a7e495 100644 --- a/nntrainer/layers/lstm.h +++ b/nntrainer/layers/lstm.h @@ -41,13 +41,13 @@ public: * @brief Move constructor. * @param[in] LSTMLayer && */ - LSTMLayer(LSTMLayer &&rhs) noexcept = default; + LSTMLayer(LSTMLayer &&rhs) noexcept; /** * @brief Move assignment operator. * @parma[in] rhs LSTMLayer to be moved. */ - LSTMLayer &operator=(LSTMLayer &&rhs) = default; + LSTMLayer &operator=(LSTMLayer &&rhs); /** * @copydoc Layer::finalize(InitLayerContext &context) diff --git a/nntrainer/models/neuralnet.h b/nntrainer/models/neuralnet.h index da1571a3..30d2288f 100644 --- a/nntrainer/models/neuralnet.h +++ b/nntrainer/models/neuralnet.h @@ -194,7 +194,7 @@ public: * @retval #ML_ERROR_NONE Successful. * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter. */ - int allocate(ExecutionMode mode = ExecutionMode::TRAIN); + int allocate(ExecutionMode mode = ExecutionMode::TRAIN) override; /** * @brief Deallocate memory for the model. diff --git a/nntrainer/tensor/cache_pool.h b/nntrainer/tensor/cache_pool.h index a23e6f87..a986f18c 100644 --- a/nntrainer/tensor/cache_pool.h +++ b/nntrainer/tensor/cache_pool.h @@ -76,7 +76,7 @@ public: size_t bytes, unsigned int start_time, unsigned int end_time, std::vector exec_order = std::vector(), TensorLifespan lifespan = TensorLifespan::MAX_LIFESPAN, - bool is_wgrad = false); + bool is_wgrad = false) override; /** * @brief Get the allocated cache * @@ -86,7 +86,7 @@ public: * * @details This function will throw if called before allocation. */ - virtual std::shared_ptr getMemory(unsigned int id); + virtual std::shared_ptr getMemory(unsigned int id) override; /** * @brief Is the cache pool allocated diff --git a/nntrainer/tensor/manager.h b/nntrainer/tensor/manager.h index ab1c0181..8ae5aa89 100644 --- a/nntrainer/tensor/manager.h +++ b/nntrainer/tensor/manager.h @@ -168,14 +168,14 @@ public: * @brief Move Construct a new Manager object * */ - Manager(Manager &&) noexcept = default; + Manager(Manager &&) noexcept; /** * @brief Move assign a new Manager object * * @return Manager& reference to newly assign */ - Manager &operator=(Manager &&) noexcept = default; + Manager &operator=(Manager &&) noexcept; /** * @brief Destructor of Manager diff --git a/test/unittest/unittest_nntrainer_appcontext.cpp b/test/unittest/unittest_nntrainer_appcontext.cpp index c7ef039d..c7249a4d 100644 --- a/test/unittest/unittest_nntrainer_appcontext.cpp +++ b/test/unittest/unittest_nntrainer_appcontext.cpp @@ -176,9 +176,12 @@ TEST_P(AppContextTest, RegisterCreateCustomOptimizer_p) { EXPECT_EQ(num_id, ((int_key == -1) ? (-1) * int_key : int_key)); auto opt = ac.createObject( ((key == "") ? "identity_optimizer" : key), {}); - EXPECT_EQ(typeid(*opt).hash_code(), typeid(CustomOptimizer).hash_code()); + auto &optimizer = *opt.get(); + EXPECT_EQ(typeid(optimizer).hash_code(), typeid(CustomOptimizer).hash_code()); opt = ac.createObject(num_id, {}); - EXPECT_EQ(typeid(*opt).hash_code(), typeid(CustomOptimizer).hash_code()); + auto &new_optimizer = *opt.get(); + EXPECT_EQ(typeid(new_optimizer).hash_code(), + typeid(CustomOptimizer).hash_code()); } GTEST_PARAMETER_TEST(RegisterCreateCustomOptimizerTests, AppContextTest, diff --git a/test/unittest/unittest_nntrainer_tensor.cpp b/test/unittest/unittest_nntrainer_tensor.cpp index 94aa0183..12c88730 100644 --- a/test/unittest/unittest_nntrainer_tensor.cpp +++ b/test/unittest/unittest_nntrainer_tensor.cpp @@ -2225,7 +2225,7 @@ TEST(nntrainer_Tensor, multiple_sum_invalid_args_01_n) { TEST(nntrainer_Tensor, multiple_sum_out_of_range_n) { nntrainer::Tensor t = constant(1.0, 1, 1, 1, 1); - EXPECT_THROW(t.sum({7}), std::out_of_range); + EXPECT_THROW(t.sum(7), std::out_of_range); } TEST(nntrainer_Tensor, multiple_sum_p) { diff --git a/test/unittest/unittest_nntrainer_tensor_fp16.cpp b/test/unittest/unittest_nntrainer_tensor_fp16.cpp index 2b0d9c04..c0b06010 100644 --- a/test/unittest/unittest_nntrainer_tensor_fp16.cpp +++ b/test/unittest/unittest_nntrainer_tensor_fp16.cpp @@ -3686,7 +3686,7 @@ TEST(nntrainer_Tensor, multiple_sum_invalid_args_01_n) { TEST(nntrainer_Tensor, multiple_sum_out_of_range_n) { nntrainer::Tensor t = constant(1.0, 1, 1, 1, 1, nntrainer::Tformat::NCHW, nntrainer::Tdatatype::FP16); - EXPECT_THROW(t.sum({7}), std::out_of_range); + EXPECT_THROW(t.sum(7), std::out_of_range); } TEST(nntrainer_Tensor, multiple_sum_p) { diff --git a/test/unittest/unittest_nntrainer_tensor_nhwc.cpp b/test/unittest/unittest_nntrainer_tensor_nhwc.cpp index 11f91a41..f65e1b4e 100644 --- a/test/unittest/unittest_nntrainer_tensor_nhwc.cpp +++ b/test/unittest/unittest_nntrainer_tensor_nhwc.cpp @@ -2527,7 +2527,7 @@ TEST(nntrainer_Tensor, multiple_sum_invalid_args_01_hnwc_n) { TEST(nntrainer_Tensor, multiple_sum_out_of_range_nhwc_n) { nntrainer::Tensor t = constant(1.0, 1, 1, 1, 1, NHWC_, FP32_); - EXPECT_THROW(t.sum({7}), std::out_of_range); + EXPECT_THROW(t.sum(7), std::out_of_range); } TEST(nntrainer_Tensor, multiple_sum_nhwc_p) {