[TensorPool] Added tensor pool implementation
authorParichay Kapoor <pk.kapoor@samsung.com>
Fri, 20 Aug 2021 11:10:55 +0000 (20:10 +0900)
committerJijoong Moon <jijoong.moon@samsung.com>
Tue, 28 Sep 2021 04:29:40 +0000 (13:29 +0900)
Added tensor pool implementation with unittests.
Tensorpool provides the interface where all the requested tensors are
managed at one places ensuring the uniqueness of name with their
lifespan and execution orders.
Corresponding unittests checking the working is added.

Signed-off-by: Parichay Kapoor <pk.kapoor@samsung.com>
nntrainer/tensor/memory_pool.h
nntrainer/tensor/meson.build
nntrainer/tensor/tensor_pool.cpp [new file with mode: 0644]
nntrainer/tensor/tensor_pool.h
test/unittest/meson.build
test/unittest/unittest_nntrainer_tensor_pool.cpp [new file with mode: 0644]

index 46ee8c1..c9fcc23 100644 (file)
@@ -126,7 +126,7 @@ private:
    * @brief Calculate the minimum memory requirement for the given memory
    * requests
    *
-   * @returns the minimum memory requirement in bytes
+   * @return the minimum memory requirement in bytes
    *
    * @note This will be theoretical minimum memory requirement ensuring that the
    * memory usages at the same time do not overlap with their validity. This
index f88b814..68fb715 100644 (file)
@@ -7,7 +7,8 @@ tensor_sources = [
   'var_grad.cpp',
   'weight.cpp',
   'basic_planner.cpp',
-  'memory_pool.cpp'
+  'memory_pool.cpp',
+  'tensor_pool.cpp'
 ]
 
 tensor_headers = [
diff --git a/nntrainer/tensor/tensor_pool.cpp b/nntrainer/tensor/tensor_pool.cpp
new file mode 100644 (file)
index 0000000..1d4d479
--- /dev/null
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: Apache-2.0
+/**
+ * Copyright (C) 2020 Parichay Kapoor <pk.kapoor@samsung.com>
+ *
+ * @file   tensor_pool.cpp
+ * @date   19 Aug 2020
+ * @brief  This is TensorPool for all requested tensors
+ * @see    https://github.com/nnstreamer/nntrainer
+ * @author Parichay Kapoor <pk.kapoor@samsung.com>
+ * @bug           No known bugs except for NYI items
+ *
+ * @todo   add checks for request/updates that finalize is not done
+ * @todo   check before allocate that finalize is done
+ */
+
+#include <memory_pool.h>
+#include <tensor.h>
+#include <tensor_pool.h>
+#include <tensor_wrap_specs.h>
+#include <util_func.h>
+
+namespace nntrainer {
+
+/**
+ * @brief     Request tensor with the given spec
+ *
+ * @note returns empty tensor which will be filled when allocate is called.
+ * @note we assume that the caller checks if the exec_order and lifespan are
+ * compatible.
+ */
+Tensor *TensorPool::requestTensor(const TensorDim dim,
+                                  const std::vector<unsigned int> &exec_order,
+                                  TensorLifespan lifespan,
+                                  const std::string &name) {
+  if (pool.find(name) != pool.end())
+    throw std::invalid_argument("Cannot request tensor with same name");
+
+  if (dim.getDataLen() == 0)
+    throw std::invalid_argument("Cannot request tensor with size 0");
+
+  if (name.empty())
+    throw std::invalid_argument("Cannot request tensor with empty name");
+
+  pool[name] = {
+    std::make_unique<Tensor>(dim, false, Tensor::Initializer::NONE, name),
+    exec_order, lifespan, 0};
+
+  return pool[name].tensor.get();
+}
+
+/**
+ * @brief     Request tensor which has been already requested with the given
+ * spec
+ *
+ * @note returns empty tensor which will be filled when allocate is called.
+ * @note we assume that the caller checks if the exec_order and lifespan are
+ * compatible.
+ */
+Tensor *TensorPool::requestPrerequestedTensor(
+  const TensorDim dim, const std::vector<unsigned int> &exec_order,
+
+  TensorLifespan lifespan, const std::string &name) {
+  if (pool.find(name) == pool.end())
+    throw std::invalid_argument("Requested tensor not found");
+
+  auto &spec = pool[name];
+  if (spec.tensor->getDim() != dim)
+    throw std::invalid_argument("Request tensor dimension mismatch");
+
+  spec.exec_order.insert(spec.exec_order.end(), exec_order.begin(),
+                         exec_order.end());
+  spec.lifespan = enum_class_or<TensorLifespan>(spec.lifespan, lifespan);
+
+  return pool[name].tensor.get();
+}
+
+/**
+ * @brief finalize the requested tensors
+ *
+ * @details finalize the requested tensors, request memory for them and plan
+ * layout for their allocations.
+ */
+void TensorPool::finalize(const MemoryPlanner &planner,
+                          unsigned int start_order, unsigned int end_order) {
+  unsigned int bytes_requested = 0;
+  for (auto &entry : pool) {
+    auto &spec = entry.second;
+
+    if (spec.exec_order.empty())
+      continue;
+
+    /** 1. create the validity ranges for the all the requested tensors */
+    unsigned int validity_start =
+      *std::min_element(spec.exec_order.begin(), spec.exec_order.end());
+    unsigned int validity_end =
+      *std::max_element(spec.exec_order.begin(), spec.exec_order.end());
+
+    /**
+     * use lifespan to update the validity.
+     * if the validity is long term, the tensor must stay valid for the
+     * complete duration.
+     */
+    if (isTensorLongTerm(spec.lifespan)) {
+      validity_start = start_order;
+      validity_end = end_order;
+    }
+
+    /** 2. for each tensor request if it is in the provided range */
+    if (validity_end < start_order || validity_start > end_order)
+      continue;
+    validity_start = std::max(validity_start, start_order);
+    validity_end = std::max(validity_end, end_order);
+
+    /**
+     * 3. requestMemory for all the tensors and set their tokens
+     * @note +1 is to make the validity_end exlusive in the interval range
+     */
+    spec.token = mem_pool.requestMemory(spec.tensor->bytes(), validity_start,
+                                        validity_end + 1);
+    bytes_requested += spec.tensor->bytes();
+  }
+
+  /** 4. finalizeLayout for the memory pool. */
+  if (bytes_requested > 0)
+    mem_pool.planLayout(planner);
+}
+
+/**
+ * @brief Set the batch size for the inputs/outputs of the layers
+ */
+void TensorPool::setBatchSize(const std::string &name, unsigned int batch) {
+  if (pool.find(name) == pool.end())
+    throw std::invalid_argument("Requested tensor not found");
+
+  pool[name].tensor->updateBatch(batch);
+}
+
+/**
+ * @brief Allocate memory for all the managed tensors
+ */
+void TensorPool::allocate() {
+  mem_pool.allocate();
+
+  /** set the pointers using the token for all the tensors */
+  for (auto &entry : pool) {
+    auto &spec = entry.second;
+    spec.tensor->setData(mem_pool.getMemory(spec.token));
+  }
+}
+
+/**
+ * @brief Deallocate memory for all the managed tensors
+ */
+void TensorPool::deallocate() {
+  mem_pool.deallocate();
+
+  /** nullify the data pointers for the tensors */
+  for (auto &entry : pool) {
+    auto &spec = entry.second;
+    spec.tensor->setData(nullptr);
+  }
+}
+
+/**
+ * @brief     Expand the lifespan of the tensor with the given name
+ *
+ */
+void TensorPool::expand_lifespan(const std::string &name,
+                                 TensorLifespan lifespan) {
+  if (pool.find(name) == pool.end())
+    throw std::invalid_argument("Requested tensor not found");
+
+  auto &spec = pool[name];
+  spec.lifespan = enum_class_or<TensorLifespan>(spec.lifespan, lifespan);
+}
+
+/**
+ * @brief     Expand the execution order of the tensor with the given name
+ *
+ */
+void TensorPool::expand_lifespan(const std::string &name,
+                                 const std::vector<unsigned int> &exec_order) {
+  if (pool.find(name) == pool.end())
+    throw std::invalid_argument("Requested tensor not found");
+
+  auto &spec = pool[name];
+  spec.exec_order.insert(spec.exec_order.end(), exec_order.begin(),
+                         exec_order.end());
+}
+
+/**
+ * @brief     Check if the lifespan leads to long term valitidy
+ *
+ */
+bool TensorPool::isTensorLongTerm(const TensorLifespan &lifespan) {
+  switch (lifespan) {
+  case TensorLifespan::EPOCH_LIFESPAN:
+    [[fallthrough]];
+  case TensorLifespan::MAX_LIFESPAN:
+    return true;
+  case TensorLifespan::FORWARD_FUNC_LIFESPAN:
+    [[fallthrough]];
+  case TensorLifespan::BACKWARD_FUNC_LIFESPAN:
+    [[fallthrough]];
+  case TensorLifespan::ITERATION_LIFESPAN:
+    [[fallthrough]];
+  case TensorLifespan::ZERO_LIFESPAN:
+    [[fallthrough]];
+  default:
+    return false;
+  }
+}
+
+} // namespace nntrainer
index cbc2fb1..b29bc64 100644 (file)
@@ -76,7 +76,7 @@ public:
    * @note we assume that the caller checks if the exec_order and lifespan are
    * compatible.
    */
-  Tensor *requestPreallocatedTensor(const TensorDim dim,
+  Tensor *requestPrerequestedTensor(const TensorDim dim,
                                     const std::vector<unsigned int> &exec_order,
 
                                     TensorLifespan lifespan,
@@ -126,7 +126,24 @@ public:
   void expand_lifespan(const std::string &name,
                        const std::vector<unsigned int> &exec_order);
 
+  /**
+   * @brief Get the maximum real memory requirement
+   *
+   * @return The real memory requirement with this strategy in bytes
+   */
+  size_t size() { return mem_pool.size(); }
+
+  /**
+   * @brief Get the minimum theoretical memory requirement
+   *
+   * @return The theoretical memory requirement with this strategy in bytes
+   */
+  size_t minMemoryRequirement() { return mem_pool.minMemoryRequirement(); }
+
 private:
+  /**
+   * @brief Spec for storing each request of tensor from tensor pool
+   */
   struct requestSpec {
     std::unique_ptr<Tensor> tensor;       /**< tensor object itself */
     std::vector<unsigned int> exec_order; /**< tensor exec order list */
@@ -137,6 +154,15 @@ private:
   std::unordered_map<std::string, requestSpec>
     pool;              /**< list of requested tensors */
   MemoryPool mem_pool; /**< memory pool for the tensors */
+
+  /**
+   * @brief     Check if the lifespan leads to long term valitidy
+   *
+   * @param lifespan Lifespan for the tensor
+   *
+   * @return true if the tensor should be valid for long term, else false
+   */
+  bool isTensorLongTerm(const TensorLifespan &lifespan);
 };
 
 } // namespace nntrainer
index 66970b6..66bcc2b 100644 (file)
@@ -36,7 +36,8 @@ test_target = [
   'unittest_nntrainer_graph',
   'unittest_nntrainer_appcontext',
   'unittest_base_properties',
-  'unittest_common_properties'
+  'unittest_common_properties',
+  'unittest_nntrainer_tensor_pool'
 ]
 
 if get_option('enable-profile')
diff --git a/test/unittest/unittest_nntrainer_tensor_pool.cpp b/test/unittest/unittest_nntrainer_tensor_pool.cpp
new file mode 100644 (file)
index 0000000..ba2c226
--- /dev/null
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: Apache-2.0
+/**
+ * Copyright (C) 2021 Parichay Kapoor <pk.kapoor@samsung.com>
+ *
+ * @file unittest_nntrainer_tensor_pool.cpp
+ * @date 20 August 2021
+ * @brief Tensor Pool Test
+ * @see        https://github.com/nnstreamer/nntrainer
+ * @author Parichay Kapoor <pk.kapoor@samsung.com>
+ * @bug No known bugs except for NYI items
+ */
+
+#include <cstring>
+#include <random>
+#include <vector>
+
+#include <gtest/gtest.h>
+
+#include <basic_planner.h>
+#include <tensor_pool.h>
+
+constexpr unsigned int MEM_BYTES = 128;
+constexpr unsigned int MEM_QUANT = 100;
+constexpr unsigned int INTERVAL_SIZE = 5;
+
+/**
+ * @brief creation and destruction
+ */
+TEST(TensorPool, create_destroy) { EXPECT_NO_THROW(nntrainer::TensorPool()); }
+
+/**
+ * @brief request empty tensor
+ */
+TEST(TensorPool, request_mem_01_n) {
+  nntrainer::TensorPool pool;
+
+  EXPECT_THROW(pool.requestTensor(nntrainer::TensorDim(), {},
+                                  nntrainer::TensorLifespan::ZERO_LIFESPAN,
+                                  "abc"),
+               std::invalid_argument);
+}
+
+/**
+ * @brief request empty name
+ */
+TEST(TensorPool, request_mem_02_n) {
+  nntrainer::TensorPool pool;
+
+  EXPECT_THROW(pool.requestTensor(nntrainer::TensorDim({1}), {},
+                                  nntrainer::TensorLifespan::ZERO_LIFESPAN, ""),
+               std::invalid_argument);
+}
+
+/**
+ * @brief request tensor
+ */
+TEST(TensorPool, request_mem_03_p) {
+  nntrainer::TensorPool pool;
+  nntrainer::Tensor *t;
+
+  EXPECT_NO_THROW(
+    t = pool.requestTensor(nntrainer::TensorDim({1}), {},
+                           nntrainer::TensorLifespan::ZERO_LIFESPAN, "abc"));
+  EXPECT_NE(t, nullptr);
+  EXPECT_FALSE(t->isAllocated());
+}
+
+/**
+ * @brief request already allocated tensor
+ */
+TEST(TensorPool, request_mem_04_n) {
+  nntrainer::TensorPool pool;
+
+  EXPECT_NO_THROW(pool.requestTensor(nntrainer::TensorDim({1}), {},
+                                     nntrainer::TensorLifespan::ZERO_LIFESPAN,
+                                     "abc"));
+
+  EXPECT_THROW(pool.requestTensor(nntrainer::TensorDim({2}), {},
+                                  nntrainer::TensorLifespan::ZERO_LIFESPAN,
+                                  "abc"),
+               std::invalid_argument);
+}
+
+/**
+ * @brief request already allocated tensor
+ */
+TEST(TensorPool, request_mem_05_p) {
+  nntrainer::TensorPool pool;
+  nntrainer::Tensor *t1, *t2;
+
+  EXPECT_NO_THROW(
+    t1 = pool.requestTensor(nntrainer::TensorDim({1}), {},
+                            nntrainer::TensorLifespan::ZERO_LIFESPAN, "abc"));
+  EXPECT_NE(t1, nullptr);
+  EXPECT_FALSE(t1->isAllocated());
+
+  EXPECT_NO_THROW(t2 = pool.requestPrerequestedTensor(
+                    nntrainer::TensorDim({1}), {},
+                    nntrainer::TensorLifespan::ZERO_LIFESPAN, "abc"));
+  EXPECT_NE(t2, nullptr);
+  EXPECT_FALSE(t2->isAllocated());
+
+  EXPECT_EQ(t1, t2);
+}
+
+/**
+ * @brief request already allocated tensor
+ */
+TEST(TensorPool, request_mem_06_n) {
+  nntrainer::TensorPool pool;
+
+  EXPECT_NO_THROW(pool.requestTensor(nntrainer::TensorDim({1}), {},
+                                     nntrainer::TensorLifespan::ZERO_LIFESPAN,
+                                     "abc"));
+
+  EXPECT_THROW(pool.requestPrerequestedTensor(
+                 nntrainer::TensorDim({2}), {},
+                 nntrainer::TensorLifespan::ZERO_LIFESPAN, "abc"),
+               std::invalid_argument);
+}
+
+/**
+ * @brief request already allocated tensor
+ */
+TEST(TensorPool, request_mem_07_n) {
+  nntrainer::TensorPool pool;
+
+  EXPECT_NO_THROW(pool.requestTensor(nntrainer::TensorDim({1}), {},
+                                     nntrainer::TensorLifespan::ZERO_LIFESPAN,
+                                     "abc"));
+
+  EXPECT_THROW(pool.requestPrerequestedTensor(
+                 nntrainer::TensorDim({1}), {},
+                 nntrainer::TensorLifespan::ZERO_LIFESPAN, "not_exist"),
+               std::invalid_argument);
+}
+
+/**
+ * @brief request already allocated tensor
+ */
+TEST(TensorPool, request_mem_08_p) {
+  nntrainer::TensorPool pool;
+  nntrainer::Tensor *t1, *t2;
+
+  EXPECT_NO_THROW(
+    t1 = pool.requestTensor(nntrainer::TensorDim({1}), {},
+                            nntrainer::TensorLifespan::ZERO_LIFESPAN, "abc"));
+  EXPECT_NE(t1, nullptr);
+  EXPECT_FALSE(t1->isAllocated());
+
+  EXPECT_NO_THROW(t2 = pool.requestPrerequestedTensor(
+                    nntrainer::TensorDim({1}), {},
+                    nntrainer::TensorLifespan::MAX_LIFESPAN, "abc"));
+  EXPECT_NE(t2, nullptr);
+  EXPECT_FALSE(t2->isAllocated());
+
+  EXPECT_EQ(t1, t2);
+
+  EXPECT_NO_THROW(t2 = pool.requestPrerequestedTensor(
+                    nntrainer::TensorDim({1}), {},
+                    nntrainer::TensorLifespan::MAX_LIFESPAN, "abc"));
+  EXPECT_NE(t2, nullptr);
+  EXPECT_FALSE(t2->isAllocated());
+
+  EXPECT_EQ(t1, t2);
+}
+
+/**
+ * @brief set batch
+ */
+TEST(TensorPool, set_batch_01_p) {
+  nntrainer::TensorPool pool;
+  nntrainer::Tensor *t1;
+
+  EXPECT_NO_THROW(
+    t1 = pool.requestTensor(nntrainer::TensorDim({1}), {},
+                            nntrainer::TensorLifespan::ZERO_LIFESPAN, "abc"));
+  EXPECT_NE(t1, nullptr);
+  EXPECT_FALSE(t1->isAllocated());
+
+  EXPECT_EQ(t1->batch(), 1);
+  EXPECT_NO_THROW(pool.setBatchSize("abc", 10));
+  EXPECT_EQ(t1->batch(), 10);
+}
+
+/**
+ * @brief set batch
+ */
+TEST(TensorPool, set_batch_02_n) {
+  nntrainer::TensorPool pool;
+  nntrainer::Tensor *t1;
+
+  EXPECT_NO_THROW(
+    t1 = pool.requestTensor(nntrainer::TensorDim({1}), {},
+                            nntrainer::TensorLifespan::ZERO_LIFESPAN, "abc"));
+  EXPECT_NE(t1, nullptr);
+  EXPECT_FALSE(t1->isAllocated());
+
+  EXPECT_THROW(pool.setBatchSize("not_exist", 10), std::invalid_argument);
+  EXPECT_EQ(t1->batch(), 1);
+}
+
+/**
+ * @brief zero size pool as no usage
+ */
+TEST(TensorPool, finalize_01_p) {
+  nntrainer::TensorPool pool;
+  nntrainer::Tensor *t1, *t2;
+
+  EXPECT_NO_THROW(
+    t1 = pool.requestTensor(nntrainer::TensorDim({1}), {},
+                            nntrainer::TensorLifespan::ZERO_LIFESPAN, "abc1"));
+  EXPECT_NE(t1, nullptr);
+  EXPECT_FALSE(t1->isAllocated());
+
+  EXPECT_NO_THROW(
+    t2 = pool.requestTensor(nntrainer::TensorDim({1}), {},
+                            nntrainer::TensorLifespan::MAX_LIFESPAN, "abc2"));
+  EXPECT_NE(t2, nullptr);
+  EXPECT_FALSE(t2->isAllocated());
+
+  EXPECT_NE(t1, t2);
+
+  EXPECT_NO_THROW(pool.finalize(nntrainer::BasicPlanner(), 0, 2));
+  EXPECT_EQ(pool.minMemoryRequirement(), 0);
+
+  EXPECT_FALSE(t1->isAllocated());
+  EXPECT_FALSE(t2->isAllocated());
+}
+
+/**
+ * @brief max lifespan tensors
+ */
+TEST(TensorPool, finalize_02_p) {
+  nntrainer::TensorPool pool;
+  nntrainer::Tensor *t1, *t2;
+
+  EXPECT_NO_THROW(
+    t1 = pool.requestTensor(nntrainer::TensorDim({1}), {0},
+                            nntrainer::TensorLifespan::MAX_LIFESPAN, "abc1"));
+  EXPECT_NE(t1, nullptr);
+
+  EXPECT_FALSE(t1->isAllocated());
+
+  EXPECT_NO_THROW(
+    t2 = pool.requestTensor(nntrainer::TensorDim({1}), {1},
+                            nntrainer::TensorLifespan::MAX_LIFESPAN, "abc2"));
+  EXPECT_NE(t2, nullptr);
+
+  EXPECT_FALSE(t2->isAllocated());
+
+  EXPECT_NE(t1, t2);
+
+  EXPECT_NO_THROW(pool.finalize(nntrainer::BasicPlanner(), 0, 2));
+  EXPECT_EQ(pool.minMemoryRequirement(), t1->bytes() + t2->bytes());
+
+  EXPECT_FALSE(t1->isAllocated());
+  EXPECT_FALSE(t2->isAllocated());
+}
+
+/**
+ * @brief max lifespan tensors
+ */
+TEST(TensorPool, finalize_03_p) {
+  nntrainer::TensorPool pool;
+
+  EXPECT_NO_THROW(pool.finalize(nntrainer::BasicPlanner(), 0, 2));
+}
+
+/**
+ * @brief allocate
+ */
+TEST(TensorPool, allocate_deallocate_01_p) {
+  nntrainer::TensorPool pool;
+  nntrainer::Tensor *t1, *t2;
+
+  EXPECT_NO_THROW(
+    t1 = pool.requestTensor(nntrainer::TensorDim({1}), {0},
+                            nntrainer::TensorLifespan::MAX_LIFESPAN, "abc1"));
+  EXPECT_NE(t1, nullptr);
+  EXPECT_FALSE(t1->isAllocated());
+
+  EXPECT_NO_THROW(
+    t2 = pool.requestTensor(nntrainer::TensorDim({1}), {1},
+                            nntrainer::TensorLifespan::MAX_LIFESPAN, "abc2"));
+  EXPECT_NE(t2, nullptr);
+  EXPECT_FALSE(t2->isAllocated());
+
+  EXPECT_NE(t1, t2);
+
+  EXPECT_NO_THROW(pool.finalize(nntrainer::BasicPlanner(), 0, 2));
+
+  EXPECT_NO_THROW(pool.allocate());
+  EXPECT_TRUE(t1->isAllocated());
+  EXPECT_TRUE(t2->isAllocated());
+
+  EXPECT_NO_THROW(pool.deallocate());
+  EXPECT_FALSE(t1->isAllocated());
+  EXPECT_FALSE(t2->isAllocated());
+}
+
+/**
+ * @brief allocate
+ */
+TEST(TensorPool, allocate_deallocate_02_n) {
+  nntrainer::TensorPool pool;
+
+  EXPECT_THROW(pool.allocate(), std::runtime_error);
+
+  EXPECT_NO_THROW(pool.deallocate());
+}
+
+/**
+ * @brief validate memory full overlap
+ */
+TEST(TensorPool, validate_memory) {
+  nntrainer::TensorPool pool;
+  nntrainer::Tensor *t1, *t2;
+
+  EXPECT_NO_THROW(
+    t1 = pool.requestTensor(nntrainer::TensorDim({100}), {0},
+                            nntrainer::TensorLifespan::MAX_LIFESPAN, "abc1"));
+
+  EXPECT_NO_THROW(
+    t2 = pool.requestTensor(nntrainer::TensorDim({100}), {1},
+                            nntrainer::TensorLifespan::MAX_LIFESPAN, "abc2"));
+
+  EXPECT_NO_THROW(pool.finalize(nntrainer::BasicPlanner(), 0, 2));
+  EXPECT_NO_THROW(pool.allocate());
+
+  nntrainer::Tensor g1 = nntrainer::Tensor(nntrainer::TensorDim({100}));
+  g1.setRandNormal();
+  nntrainer::Tensor g2 = nntrainer::Tensor(nntrainer::TensorDim({100}));
+  g2.setRandNormal();
+
+  t1->copy(g1);
+  t2->copy(g2);
+
+  EXPECT_EQ(*t1, g1);
+  EXPECT_EQ(*t2, g2);
+
+  EXPECT_NO_THROW(pool.deallocate());
+}
+
+/**
+ * @brief Main gtest
+ */
+int main(int argc, char **argv) {
+  int result = -1;
+
+  try {
+    testing::InitGoogleTest(&argc, argv);
+  } catch (...) {
+    std::cerr << "Failed to init gtest\n";
+  }
+
+  try {
+    result = RUN_ALL_TESTS();
+  } catch (...) {
+    std::cerr << "Failed to run test.\n";
+  }
+
+  return result;
+}