From 3a3f0d66936035bcb716417f3f866305fdad61e0 Mon Sep 17 00:00:00 2001 From: Jihoon Lee Date: Fri, 5 Nov 2021 16:17:58 +0900 Subject: [PATCH] [Trivial] ZERO_LIFESPAN -> UNMANAGED This patch renames zero_lifespan to unmanaged. zero_lifespan suggests it should be extendable but it's precisely not. So changed name for clarity. **Self evaluation:** 1. Build test: [X]Passed [ ]Failed [ ]Skipped 2. Run test: [X]Passed [ ]Failed [ ]Skipped Signed-off-by: Jihoon Lee --- nntrainer/tensor/tensor_pool.cpp | 10 ++-- nntrainer/tensor/tensor_pool.h | 2 +- nntrainer/tensor/tensor_wrap_specs.h | 2 +- test/unittest/unittest_nntrainer_tensor_pool.cpp | 60 ++++++++++++------------ 4 files changed, 36 insertions(+), 38 deletions(-) diff --git a/nntrainer/tensor/tensor_pool.cpp b/nntrainer/tensor/tensor_pool.cpp index a518a12..c5e99ce 100644 --- a/nntrainer/tensor/tensor_pool.cpp +++ b/nntrainer/tensor/tensor_pool.cpp @@ -61,7 +61,7 @@ Tensor * TensorPool::requestExternallyAllocateTensor(const TensorDim &dim, const std::string &name, const Tensor::Initializer &init) { - return requestTensor(dim, {}, TensorLifespan::ZERO_LIFESPAN, name, init); + return requestTensor(dim, {}, TensorLifespan::UNMANAGED, name, init); } /** @@ -96,7 +96,7 @@ Tensor *TensorPool::requestPrerequestedTensor( * cannot expand lifespan of zero lifespan tensor * it works for externally allocated tensors as well */ - if (spec.lifespan != TensorLifespan::ZERO_LIFESPAN) { + if (spec.lifespan != TensorLifespan::UNMANAGED) { spec.exec_order.insert(spec.exec_order.end(), exec_order.begin(), exec_order.end()); spec.lifespan = enum_class_or(spec.lifespan, lifespan); @@ -125,7 +125,7 @@ void TensorPool::finalize(const MemoryPlanner &planner, for (auto &spec : pool) { /** do not include dependent tensors in planning layout */ if (spec.dependent || spec.exec_order.empty() || - spec.lifespan == TensorLifespan::ZERO_LIFESPAN) + spec.lifespan == TensorLifespan::UNMANAGED) continue; spec.token = 0; @@ -232,7 +232,7 @@ void TensorPool::expand_lifespan(const std::string &name, auto &spec = pool[parent_spec_idx]; - if (spec.lifespan != TensorLifespan::ZERO_LIFESPAN) + if (spec.lifespan != TensorLifespan::UNMANAGED) throw std::invalid_argument("Cannot extend tensor lifespan from ZERO"); spec.lifespan = enum_class_or(spec.lifespan, lifespan); @@ -311,7 +311,7 @@ bool TensorPool::isTensorLongTerm(const TensorLifespan &lifespan) { [[fallthrough]]; case TensorLifespan::ITERATION_LIFESPAN: [[fallthrough]]; - case TensorLifespan::ZERO_LIFESPAN: + case TensorLifespan::UNMANAGED: [[fallthrough]]; default: return false; diff --git a/nntrainer/tensor/tensor_pool.h b/nntrainer/tensor/tensor_pool.h index c31670c..6310199 100644 --- a/nntrainer/tensor/tensor_pool.h +++ b/nntrainer/tensor/tensor_pool.h @@ -203,7 +203,7 @@ public: void setExternalTensor(const std::string &name, const Tensor &t) { auto &spec = getSourceSpec(name); - if (spec.lifespan != TensorLifespan::ZERO_LIFESPAN) + if (spec.lifespan != TensorLifespan::UNMANAGED) throw std::invalid_argument( "Cannot set external tensor for non-zero lifespan"); diff --git a/nntrainer/tensor/tensor_wrap_specs.h b/nntrainer/tensor/tensor_wrap_specs.h index f2fb197..5ac3166 100644 --- a/nntrainer/tensor/tensor_wrap_specs.h +++ b/nntrainer/tensor/tensor_wrap_specs.h @@ -35,7 +35,7 @@ enum class WeightRegularizer { * */ enum class TensorLifespan { - ZERO_LIFESPAN = 0b0, /**< tensor with no lifespan, will not be allocated */ + UNMANAGED = 0b0, /**< tensor with no lifespan, will not be allocated */ FORWARD_FUNC_LIFESPAN = 0b01, /**< tensor must not be reset before during the forward function call, eg. temporary tensors needed during forward operations */ diff --git a/test/unittest/unittest_nntrainer_tensor_pool.cpp b/test/unittest/unittest_nntrainer_tensor_pool.cpp index df9dea4..48d54b5 100644 --- a/test/unittest/unittest_nntrainer_tensor_pool.cpp +++ b/test/unittest/unittest_nntrainer_tensor_pool.cpp @@ -35,8 +35,7 @@ TEST(TensorPool, request_mem_01_n) { nntrainer::TensorPool pool; EXPECT_THROW(pool.requestTensor(nntrainer::TensorDim(), {}, - nntrainer::TensorLifespan::ZERO_LIFESPAN, - "abc"), + nntrainer::TensorLifespan::UNMANAGED, "abc"), std::invalid_argument); } @@ -47,7 +46,7 @@ TEST(TensorPool, request_mem_02_n) { nntrainer::TensorPool pool; EXPECT_THROW(pool.requestTensor(nntrainer::TensorDim({1}), {}, - nntrainer::TensorLifespan::ZERO_LIFESPAN, ""), + nntrainer::TensorLifespan::UNMANAGED, ""), std::invalid_argument); } @@ -58,9 +57,9 @@ TEST(TensorPool, request_mem_03_p) { nntrainer::TensorPool pool; nntrainer::Tensor *t; - EXPECT_NO_THROW( - t = pool.requestTensor(nntrainer::TensorDim({1}), {}, - nntrainer::TensorLifespan::ZERO_LIFESPAN, "abc")); + EXPECT_NO_THROW(t = pool.requestTensor(nntrainer::TensorDim({1}), {}, + nntrainer::TensorLifespan::UNMANAGED, + "abc")); EXPECT_NE(t, nullptr); EXPECT_FALSE(t->isAllocated()); } @@ -72,12 +71,11 @@ TEST(TensorPool, request_mem_04_n) { nntrainer::TensorPool pool; EXPECT_NO_THROW(pool.requestTensor(nntrainer::TensorDim({1}), {}, - nntrainer::TensorLifespan::ZERO_LIFESPAN, + nntrainer::TensorLifespan::UNMANAGED, "abc")); EXPECT_THROW(pool.requestTensor(nntrainer::TensorDim({2}), {}, - nntrainer::TensorLifespan::ZERO_LIFESPAN, - "abc"), + nntrainer::TensorLifespan::UNMANAGED, "abc"), std::invalid_argument); } @@ -88,15 +86,15 @@ TEST(TensorPool, request_mem_05_p) { nntrainer::TensorPool pool; nntrainer::Tensor *t1, *t2; - EXPECT_NO_THROW( - t1 = pool.requestTensor(nntrainer::TensorDim({1}), {}, - nntrainer::TensorLifespan::ZERO_LIFESPAN, "abc")); + EXPECT_NO_THROW(t1 = pool.requestTensor(nntrainer::TensorDim({1}), {}, + nntrainer::TensorLifespan::UNMANAGED, + "abc")); EXPECT_NE(t1, nullptr); EXPECT_FALSE(t1->isAllocated()); EXPECT_NO_THROW(t2 = pool.requestPrerequestedTensor( nntrainer::TensorDim({1}), {}, - nntrainer::TensorLifespan::ZERO_LIFESPAN, "abc1", "abc")); + nntrainer::TensorLifespan::UNMANAGED, "abc1", "abc")); EXPECT_NE(t2, nullptr); EXPECT_FALSE(t2->isAllocated()); @@ -110,12 +108,12 @@ TEST(TensorPool, request_mem_06_n) { nntrainer::TensorPool pool; EXPECT_NO_THROW(pool.requestTensor(nntrainer::TensorDim({1}), {}, - nntrainer::TensorLifespan::ZERO_LIFESPAN, + nntrainer::TensorLifespan::UNMANAGED, "abc")); EXPECT_THROW(pool.requestPrerequestedTensor( nntrainer::TensorDim({2}), {}, - nntrainer::TensorLifespan::ZERO_LIFESPAN, "abc1", "abc"), + nntrainer::TensorLifespan::UNMANAGED, "abc1", "abc"), std::invalid_argument); } @@ -126,12 +124,12 @@ TEST(TensorPool, request_mem_07_n) { nntrainer::TensorPool pool; EXPECT_NO_THROW(pool.requestTensor(nntrainer::TensorDim({1}), {}, - nntrainer::TensorLifespan::ZERO_LIFESPAN, + nntrainer::TensorLifespan::UNMANAGED, "abc")); EXPECT_THROW(pool.requestPrerequestedTensor( nntrainer::TensorDim({1}), {}, - nntrainer::TensorLifespan::ZERO_LIFESPAN, "abc1", "not_exist"), + nntrainer::TensorLifespan::UNMANAGED, "abc1", "not_exist"), std::invalid_argument); } @@ -142,9 +140,9 @@ TEST(TensorPool, request_mem_08_p) { nntrainer::TensorPool pool; nntrainer::Tensor *t1, *t2; - EXPECT_NO_THROW( - t1 = pool.requestTensor(nntrainer::TensorDim({1}), {}, - nntrainer::TensorLifespan::ZERO_LIFESPAN, "abc")); + EXPECT_NO_THROW(t1 = pool.requestTensor(nntrainer::TensorDim({1}), {}, + nntrainer::TensorLifespan::UNMANAGED, + "abc")); EXPECT_NE(t1, nullptr); EXPECT_FALSE(t1->isAllocated()); @@ -172,12 +170,12 @@ TEST(TensorPool, request_mem_09_n) { nntrainer::TensorPool pool; EXPECT_NO_THROW(pool.requestTensor(nntrainer::TensorDim({1}), {}, - nntrainer::TensorLifespan::ZERO_LIFESPAN, + nntrainer::TensorLifespan::UNMANAGED, "abc")); EXPECT_THROW(pool.requestPrerequestedTensor( nntrainer::TensorDim({1}), {}, - nntrainer::TensorLifespan::ZERO_LIFESPAN, "abc", "abc"), + nntrainer::TensorLifespan::UNMANAGED, "abc", "abc"), std::invalid_argument); } @@ -188,9 +186,9 @@ TEST(TensorPool, set_batch_01_p) { nntrainer::TensorPool pool; nntrainer::Tensor *t1; - EXPECT_NO_THROW( - t1 = pool.requestTensor(nntrainer::TensorDim({1}), {}, - nntrainer::TensorLifespan::ZERO_LIFESPAN, "abc")); + EXPECT_NO_THROW(t1 = pool.requestTensor(nntrainer::TensorDim({1}), {}, + nntrainer::TensorLifespan::UNMANAGED, + "abc")); EXPECT_NE(t1, nullptr); EXPECT_FALSE(t1->isAllocated()); @@ -206,9 +204,9 @@ TEST(TensorPool, set_batch_02_n) { nntrainer::TensorPool pool; nntrainer::Tensor *t1; - EXPECT_NO_THROW( - t1 = pool.requestTensor(nntrainer::TensorDim({1}), {}, - nntrainer::TensorLifespan::ZERO_LIFESPAN, "abc")); + EXPECT_NO_THROW(t1 = pool.requestTensor(nntrainer::TensorDim({1}), {}, + nntrainer::TensorLifespan::UNMANAGED, + "abc")); EXPECT_NE(t1, nullptr); EXPECT_FALSE(t1->isAllocated()); @@ -223,9 +221,9 @@ TEST(TensorPool, finalize_01_p) { nntrainer::TensorPool pool; nntrainer::Tensor *t1, *t2; - EXPECT_NO_THROW( - t1 = pool.requestTensor(nntrainer::TensorDim({1}), {}, - nntrainer::TensorLifespan::ZERO_LIFESPAN, "abc1")); + EXPECT_NO_THROW(t1 = pool.requestTensor(nntrainer::TensorDim({1}), {}, + nntrainer::TensorLifespan::UNMANAGED, + "abc1")); EXPECT_NE(t1, nullptr); EXPECT_FALSE(t1->isAllocated()); -- 2.7.4