--- /dev/null
+// SPDX-License-Identifier: Apache-2.0
+/**
+ * Copyright (C) 2020 Parichay Kapoor <pk.kapoor@samsung.com>
+ *
+ * @file tensor_pool.cpp
+ * @date 19 Aug 2020
+ * @brief This is TensorPool for all requested tensors
+ * @see https://github.com/nnstreamer/nntrainer
+ * @author Parichay Kapoor <pk.kapoor@samsung.com>
+ * @bug No known bugs except for NYI items
+ *
+ * @todo add checks for request/updates that finalize is not done
+ * @todo check before allocate that finalize is done
+ */
+
+#include <memory_pool.h>
+#include <tensor.h>
+#include <tensor_pool.h>
+#include <tensor_wrap_specs.h>
+#include <util_func.h>
+
+namespace nntrainer {
+
+/**
+ * @brief Request tensor with the given spec
+ *
+ * @note returns empty tensor which will be filled when allocate is called.
+ * @note we assume that the caller checks if the exec_order and lifespan are
+ * compatible.
+ */
+Tensor *TensorPool::requestTensor(const TensorDim dim,
+ const std::vector<unsigned int> &exec_order,
+ TensorLifespan lifespan,
+ const std::string &name) {
+ if (pool.find(name) != pool.end())
+ throw std::invalid_argument("Cannot request tensor with same name");
+
+ if (dim.getDataLen() == 0)
+ throw std::invalid_argument("Cannot request tensor with size 0");
+
+ if (name.empty())
+ throw std::invalid_argument("Cannot request tensor with empty name");
+
+ pool[name] = {
+ std::make_unique<Tensor>(dim, false, Tensor::Initializer::NONE, name),
+ exec_order, lifespan, 0};
+
+ return pool[name].tensor.get();
+}
+
+/**
+ * @brief Request tensor which has been already requested with the given
+ * spec
+ *
+ * @note returns empty tensor which will be filled when allocate is called.
+ * @note we assume that the caller checks if the exec_order and lifespan are
+ * compatible.
+ */
+Tensor *TensorPool::requestPrerequestedTensor(
+ const TensorDim dim, const std::vector<unsigned int> &exec_order,
+
+ TensorLifespan lifespan, const std::string &name) {
+ if (pool.find(name) == pool.end())
+ throw std::invalid_argument("Requested tensor not found");
+
+ auto &spec = pool[name];
+ if (spec.tensor->getDim() != dim)
+ throw std::invalid_argument("Request tensor dimension mismatch");
+
+ spec.exec_order.insert(spec.exec_order.end(), exec_order.begin(),
+ exec_order.end());
+ spec.lifespan = enum_class_or<TensorLifespan>(spec.lifespan, lifespan);
+
+ return pool[name].tensor.get();
+}
+
+/**
+ * @brief finalize the requested tensors
+ *
+ * @details finalize the requested tensors, request memory for them and plan
+ * layout for their allocations.
+ */
+void TensorPool::finalize(const MemoryPlanner &planner,
+ unsigned int start_order, unsigned int end_order) {
+ unsigned int bytes_requested = 0;
+ for (auto &entry : pool) {
+ auto &spec = entry.second;
+
+ if (spec.exec_order.empty())
+ continue;
+
+ /** 1. create the validity ranges for the all the requested tensors */
+ unsigned int validity_start =
+ *std::min_element(spec.exec_order.begin(), spec.exec_order.end());
+ unsigned int validity_end =
+ *std::max_element(spec.exec_order.begin(), spec.exec_order.end());
+
+ /**
+ * use lifespan to update the validity.
+ * if the validity is long term, the tensor must stay valid for the
+ * complete duration.
+ */
+ if (isTensorLongTerm(spec.lifespan)) {
+ validity_start = start_order;
+ validity_end = end_order;
+ }
+
+ /** 2. for each tensor request if it is in the provided range */
+ if (validity_end < start_order || validity_start > end_order)
+ continue;
+ validity_start = std::max(validity_start, start_order);
+ validity_end = std::max(validity_end, end_order);
+
+ /**
+ * 3. requestMemory for all the tensors and set their tokens
+ * @note +1 is to make the validity_end exlusive in the interval range
+ */
+ spec.token = mem_pool.requestMemory(spec.tensor->bytes(), validity_start,
+ validity_end + 1);
+ bytes_requested += spec.tensor->bytes();
+ }
+
+ /** 4. finalizeLayout for the memory pool. */
+ if (bytes_requested > 0)
+ mem_pool.planLayout(planner);
+}
+
+/**
+ * @brief Set the batch size for the inputs/outputs of the layers
+ */
+void TensorPool::setBatchSize(const std::string &name, unsigned int batch) {
+ if (pool.find(name) == pool.end())
+ throw std::invalid_argument("Requested tensor not found");
+
+ pool[name].tensor->updateBatch(batch);
+}
+
+/**
+ * @brief Allocate memory for all the managed tensors
+ */
+void TensorPool::allocate() {
+ mem_pool.allocate();
+
+ /** set the pointers using the token for all the tensors */
+ for (auto &entry : pool) {
+ auto &spec = entry.second;
+ spec.tensor->setData(mem_pool.getMemory(spec.token));
+ }
+}
+
+/**
+ * @brief Deallocate memory for all the managed tensors
+ */
+void TensorPool::deallocate() {
+ mem_pool.deallocate();
+
+ /** nullify the data pointers for the tensors */
+ for (auto &entry : pool) {
+ auto &spec = entry.second;
+ spec.tensor->setData(nullptr);
+ }
+}
+
+/**
+ * @brief Expand the lifespan of the tensor with the given name
+ *
+ */
+void TensorPool::expand_lifespan(const std::string &name,
+ TensorLifespan lifespan) {
+ if (pool.find(name) == pool.end())
+ throw std::invalid_argument("Requested tensor not found");
+
+ auto &spec = pool[name];
+ spec.lifespan = enum_class_or<TensorLifespan>(spec.lifespan, lifespan);
+}
+
+/**
+ * @brief Expand the execution order of the tensor with the given name
+ *
+ */
+void TensorPool::expand_lifespan(const std::string &name,
+ const std::vector<unsigned int> &exec_order) {
+ if (pool.find(name) == pool.end())
+ throw std::invalid_argument("Requested tensor not found");
+
+ auto &spec = pool[name];
+ spec.exec_order.insert(spec.exec_order.end(), exec_order.begin(),
+ exec_order.end());
+}
+
+/**
+ * @brief Check if the lifespan leads to long term valitidy
+ *
+ */
+bool TensorPool::isTensorLongTerm(const TensorLifespan &lifespan) {
+ switch (lifespan) {
+ case TensorLifespan::EPOCH_LIFESPAN:
+ [[fallthrough]];
+ case TensorLifespan::MAX_LIFESPAN:
+ return true;
+ case TensorLifespan::FORWARD_FUNC_LIFESPAN:
+ [[fallthrough]];
+ case TensorLifespan::BACKWARD_FUNC_LIFESPAN:
+ [[fallthrough]];
+ case TensorLifespan::ITERATION_LIFESPAN:
+ [[fallthrough]];
+ case TensorLifespan::ZERO_LIFESPAN:
+ [[fallthrough]];
+ default:
+ return false;
+ }
+}
+
+} // namespace nntrainer
--- /dev/null
+// SPDX-License-Identifier: Apache-2.0
+/**
+ * Copyright (C) 2021 Parichay Kapoor <pk.kapoor@samsung.com>
+ *
+ * @file unittest_nntrainer_tensor_pool.cpp
+ * @date 20 August 2021
+ * @brief Tensor Pool Test
+ * @see https://github.com/nnstreamer/nntrainer
+ * @author Parichay Kapoor <pk.kapoor@samsung.com>
+ * @bug No known bugs except for NYI items
+ */
+
+#include <cstring>
+#include <random>
+#include <vector>
+
+#include <gtest/gtest.h>
+
+#include <basic_planner.h>
+#include <tensor_pool.h>
+
+constexpr unsigned int MEM_BYTES = 128;
+constexpr unsigned int MEM_QUANT = 100;
+constexpr unsigned int INTERVAL_SIZE = 5;
+
+/**
+ * @brief creation and destruction
+ */
+TEST(TensorPool, create_destroy) { EXPECT_NO_THROW(nntrainer::TensorPool()); }
+
+/**
+ * @brief request empty tensor
+ */
+TEST(TensorPool, request_mem_01_n) {
+ nntrainer::TensorPool pool;
+
+ EXPECT_THROW(pool.requestTensor(nntrainer::TensorDim(), {},
+ nntrainer::TensorLifespan::ZERO_LIFESPAN,
+ "abc"),
+ std::invalid_argument);
+}
+
+/**
+ * @brief request empty name
+ */
+TEST(TensorPool, request_mem_02_n) {
+ nntrainer::TensorPool pool;
+
+ EXPECT_THROW(pool.requestTensor(nntrainer::TensorDim({1}), {},
+ nntrainer::TensorLifespan::ZERO_LIFESPAN, ""),
+ std::invalid_argument);
+}
+
+/**
+ * @brief request tensor
+ */
+TEST(TensorPool, request_mem_03_p) {
+ nntrainer::TensorPool pool;
+ nntrainer::Tensor *t;
+
+ EXPECT_NO_THROW(
+ t = pool.requestTensor(nntrainer::TensorDim({1}), {},
+ nntrainer::TensorLifespan::ZERO_LIFESPAN, "abc"));
+ EXPECT_NE(t, nullptr);
+ EXPECT_FALSE(t->isAllocated());
+}
+
+/**
+ * @brief request already allocated tensor
+ */
+TEST(TensorPool, request_mem_04_n) {
+ nntrainer::TensorPool pool;
+
+ EXPECT_NO_THROW(pool.requestTensor(nntrainer::TensorDim({1}), {},
+ nntrainer::TensorLifespan::ZERO_LIFESPAN,
+ "abc"));
+
+ EXPECT_THROW(pool.requestTensor(nntrainer::TensorDim({2}), {},
+ nntrainer::TensorLifespan::ZERO_LIFESPAN,
+ "abc"),
+ std::invalid_argument);
+}
+
+/**
+ * @brief request already allocated tensor
+ */
+TEST(TensorPool, request_mem_05_p) {
+ nntrainer::TensorPool pool;
+ nntrainer::Tensor *t1, *t2;
+
+ EXPECT_NO_THROW(
+ t1 = pool.requestTensor(nntrainer::TensorDim({1}), {},
+ nntrainer::TensorLifespan::ZERO_LIFESPAN, "abc"));
+ EXPECT_NE(t1, nullptr);
+ EXPECT_FALSE(t1->isAllocated());
+
+ EXPECT_NO_THROW(t2 = pool.requestPrerequestedTensor(
+ nntrainer::TensorDim({1}), {},
+ nntrainer::TensorLifespan::ZERO_LIFESPAN, "abc"));
+ EXPECT_NE(t2, nullptr);
+ EXPECT_FALSE(t2->isAllocated());
+
+ EXPECT_EQ(t1, t2);
+}
+
+/**
+ * @brief request already allocated tensor
+ */
+TEST(TensorPool, request_mem_06_n) {
+ nntrainer::TensorPool pool;
+
+ EXPECT_NO_THROW(pool.requestTensor(nntrainer::TensorDim({1}), {},
+ nntrainer::TensorLifespan::ZERO_LIFESPAN,
+ "abc"));
+
+ EXPECT_THROW(pool.requestPrerequestedTensor(
+ nntrainer::TensorDim({2}), {},
+ nntrainer::TensorLifespan::ZERO_LIFESPAN, "abc"),
+ std::invalid_argument);
+}
+
+/**
+ * @brief request already allocated tensor
+ */
+TEST(TensorPool, request_mem_07_n) {
+ nntrainer::TensorPool pool;
+
+ EXPECT_NO_THROW(pool.requestTensor(nntrainer::TensorDim({1}), {},
+ nntrainer::TensorLifespan::ZERO_LIFESPAN,
+ "abc"));
+
+ EXPECT_THROW(pool.requestPrerequestedTensor(
+ nntrainer::TensorDim({1}), {},
+ nntrainer::TensorLifespan::ZERO_LIFESPAN, "not_exist"),
+ std::invalid_argument);
+}
+
+/**
+ * @brief request already allocated tensor
+ */
+TEST(TensorPool, request_mem_08_p) {
+ nntrainer::TensorPool pool;
+ nntrainer::Tensor *t1, *t2;
+
+ EXPECT_NO_THROW(
+ t1 = pool.requestTensor(nntrainer::TensorDim({1}), {},
+ nntrainer::TensorLifespan::ZERO_LIFESPAN, "abc"));
+ EXPECT_NE(t1, nullptr);
+ EXPECT_FALSE(t1->isAllocated());
+
+ EXPECT_NO_THROW(t2 = pool.requestPrerequestedTensor(
+ nntrainer::TensorDim({1}), {},
+ nntrainer::TensorLifespan::MAX_LIFESPAN, "abc"));
+ EXPECT_NE(t2, nullptr);
+ EXPECT_FALSE(t2->isAllocated());
+
+ EXPECT_EQ(t1, t2);
+
+ EXPECT_NO_THROW(t2 = pool.requestPrerequestedTensor(
+ nntrainer::TensorDim({1}), {},
+ nntrainer::TensorLifespan::MAX_LIFESPAN, "abc"));
+ EXPECT_NE(t2, nullptr);
+ EXPECT_FALSE(t2->isAllocated());
+
+ EXPECT_EQ(t1, t2);
+}
+
+/**
+ * @brief set batch
+ */
+TEST(TensorPool, set_batch_01_p) {
+ nntrainer::TensorPool pool;
+ nntrainer::Tensor *t1;
+
+ EXPECT_NO_THROW(
+ t1 = pool.requestTensor(nntrainer::TensorDim({1}), {},
+ nntrainer::TensorLifespan::ZERO_LIFESPAN, "abc"));
+ EXPECT_NE(t1, nullptr);
+ EXPECT_FALSE(t1->isAllocated());
+
+ EXPECT_EQ(t1->batch(), 1);
+ EXPECT_NO_THROW(pool.setBatchSize("abc", 10));
+ EXPECT_EQ(t1->batch(), 10);
+}
+
+/**
+ * @brief set batch
+ */
+TEST(TensorPool, set_batch_02_n) {
+ nntrainer::TensorPool pool;
+ nntrainer::Tensor *t1;
+
+ EXPECT_NO_THROW(
+ t1 = pool.requestTensor(nntrainer::TensorDim({1}), {},
+ nntrainer::TensorLifespan::ZERO_LIFESPAN, "abc"));
+ EXPECT_NE(t1, nullptr);
+ EXPECT_FALSE(t1->isAllocated());
+
+ EXPECT_THROW(pool.setBatchSize("not_exist", 10), std::invalid_argument);
+ EXPECT_EQ(t1->batch(), 1);
+}
+
+/**
+ * @brief zero size pool as no usage
+ */
+TEST(TensorPool, finalize_01_p) {
+ nntrainer::TensorPool pool;
+ nntrainer::Tensor *t1, *t2;
+
+ EXPECT_NO_THROW(
+ t1 = pool.requestTensor(nntrainer::TensorDim({1}), {},
+ nntrainer::TensorLifespan::ZERO_LIFESPAN, "abc1"));
+ EXPECT_NE(t1, nullptr);
+ EXPECT_FALSE(t1->isAllocated());
+
+ EXPECT_NO_THROW(
+ t2 = pool.requestTensor(nntrainer::TensorDim({1}), {},
+ nntrainer::TensorLifespan::MAX_LIFESPAN, "abc2"));
+ EXPECT_NE(t2, nullptr);
+ EXPECT_FALSE(t2->isAllocated());
+
+ EXPECT_NE(t1, t2);
+
+ EXPECT_NO_THROW(pool.finalize(nntrainer::BasicPlanner(), 0, 2));
+ EXPECT_EQ(pool.minMemoryRequirement(), 0);
+
+ EXPECT_FALSE(t1->isAllocated());
+ EXPECT_FALSE(t2->isAllocated());
+}
+
+/**
+ * @brief max lifespan tensors
+ */
+TEST(TensorPool, finalize_02_p) {
+ nntrainer::TensorPool pool;
+ nntrainer::Tensor *t1, *t2;
+
+ EXPECT_NO_THROW(
+ t1 = pool.requestTensor(nntrainer::TensorDim({1}), {0},
+ nntrainer::TensorLifespan::MAX_LIFESPAN, "abc1"));
+ EXPECT_NE(t1, nullptr);
+
+ EXPECT_FALSE(t1->isAllocated());
+
+ EXPECT_NO_THROW(
+ t2 = pool.requestTensor(nntrainer::TensorDim({1}), {1},
+ nntrainer::TensorLifespan::MAX_LIFESPAN, "abc2"));
+ EXPECT_NE(t2, nullptr);
+
+ EXPECT_FALSE(t2->isAllocated());
+
+ EXPECT_NE(t1, t2);
+
+ EXPECT_NO_THROW(pool.finalize(nntrainer::BasicPlanner(), 0, 2));
+ EXPECT_EQ(pool.minMemoryRequirement(), t1->bytes() + t2->bytes());
+
+ EXPECT_FALSE(t1->isAllocated());
+ EXPECT_FALSE(t2->isAllocated());
+}
+
+/**
+ * @brief max lifespan tensors
+ */
+TEST(TensorPool, finalize_03_p) {
+ nntrainer::TensorPool pool;
+
+ EXPECT_NO_THROW(pool.finalize(nntrainer::BasicPlanner(), 0, 2));
+}
+
+/**
+ * @brief allocate
+ */
+TEST(TensorPool, allocate_deallocate_01_p) {
+ nntrainer::TensorPool pool;
+ nntrainer::Tensor *t1, *t2;
+
+ EXPECT_NO_THROW(
+ t1 = pool.requestTensor(nntrainer::TensorDim({1}), {0},
+ nntrainer::TensorLifespan::MAX_LIFESPAN, "abc1"));
+ EXPECT_NE(t1, nullptr);
+ EXPECT_FALSE(t1->isAllocated());
+
+ EXPECT_NO_THROW(
+ t2 = pool.requestTensor(nntrainer::TensorDim({1}), {1},
+ nntrainer::TensorLifespan::MAX_LIFESPAN, "abc2"));
+ EXPECT_NE(t2, nullptr);
+ EXPECT_FALSE(t2->isAllocated());
+
+ EXPECT_NE(t1, t2);
+
+ EXPECT_NO_THROW(pool.finalize(nntrainer::BasicPlanner(), 0, 2));
+
+ EXPECT_NO_THROW(pool.allocate());
+ EXPECT_TRUE(t1->isAllocated());
+ EXPECT_TRUE(t2->isAllocated());
+
+ EXPECT_NO_THROW(pool.deallocate());
+ EXPECT_FALSE(t1->isAllocated());
+ EXPECT_FALSE(t2->isAllocated());
+}
+
+/**
+ * @brief allocate
+ */
+TEST(TensorPool, allocate_deallocate_02_n) {
+ nntrainer::TensorPool pool;
+
+ EXPECT_THROW(pool.allocate(), std::runtime_error);
+
+ EXPECT_NO_THROW(pool.deallocate());
+}
+
+/**
+ * @brief validate memory full overlap
+ */
+TEST(TensorPool, validate_memory) {
+ nntrainer::TensorPool pool;
+ nntrainer::Tensor *t1, *t2;
+
+ EXPECT_NO_THROW(
+ t1 = pool.requestTensor(nntrainer::TensorDim({100}), {0},
+ nntrainer::TensorLifespan::MAX_LIFESPAN, "abc1"));
+
+ EXPECT_NO_THROW(
+ t2 = pool.requestTensor(nntrainer::TensorDim({100}), {1},
+ nntrainer::TensorLifespan::MAX_LIFESPAN, "abc2"));
+
+ EXPECT_NO_THROW(pool.finalize(nntrainer::BasicPlanner(), 0, 2));
+ EXPECT_NO_THROW(pool.allocate());
+
+ nntrainer::Tensor g1 = nntrainer::Tensor(nntrainer::TensorDim({100}));
+ g1.setRandNormal();
+ nntrainer::Tensor g2 = nntrainer::Tensor(nntrainer::TensorDim({100}));
+ g2.setRandNormal();
+
+ t1->copy(g1);
+ t2->copy(g2);
+
+ EXPECT_EQ(*t1, g1);
+ EXPECT_EQ(*t2, g2);
+
+ EXPECT_NO_THROW(pool.deallocate());
+}
+
+/**
+ * @brief Main gtest
+ */
+int main(int argc, char **argv) {
+ int result = -1;
+
+ try {
+ testing::InitGoogleTest(&argc, argv);
+ } catch (...) {
+ std::cerr << "Failed to init gtest\n";
+ }
+
+ try {
+ result = RUN_ALL_TESTS();
+ } catch (...) {
+ std::cerr << "Failed to run test.\n";
+ }
+
+ return result;
+}