Diff: initialize output tensor (#2627)
author오형석/동작제어Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Thu, 6 Sep 2018 10:23:45 +0000 (19:23 +0900)
committer박세희/동작제어Lab(SR)/Principal Engineer/삼성전자 <saehie.park@samsung.com>
Thu, 6 Sep 2018 10:23:45 +0000 (19:23 +0900)
Initialize output tensor as zero
Need to RNN hidden state initialize

Signed-off-by: Hyeongseok Oh <hseok82.oh@samsung.com>
libs/support/tflite/src/Diff.cpp

index ff607e6..e875571 100644 (file)
@@ -286,6 +286,7 @@ int RandomTestRunner::run(const nnfw::support::tflite::interp::Builder &builder)
   using Initializer = std::function<void(int id, Interpreter *, Interpreter *)>;
 
   std::map<TfLiteType, Initializer> initializers;
+  std::map<TfLiteType, Initializer> reseters;
 
   // Generate singed 32-bit integer (s32) input
   initializers[kTfLiteInt32] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) {
@@ -308,6 +309,26 @@ int RandomTestRunner::run(const nnfw::support::tflite::interp::Builder &builder)
            };
   };
 
+  // Generate singed 32-bit integer (s32) input
+  reseters[kTfLiteInt32] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) {
+    assert(tfl_interp->tensor(id)->type == kTfLiteInt32);
+    assert(nnapi->tensor(id)->type == kTfLiteInt32);
+
+    auto tfl_interp_view = nnfw::support::tflite::TensorView<int32_t>::make(*tfl_interp, id);
+    auto nnapi_view = nnfw::support::tflite::TensorView<int32_t>::make(*nnapi, id);
+
+    assert(tfl_interp_view.shape() == nnapi_view.shape());
+
+    int32_t value = 0;
+
+    nnfw::util::tensor::iterate(tfl_interp_view.shape())
+        << [&](const nnfw::util::tensor::Index &ind) {
+             // TODO Generate random values
+             tfl_interp_view.at(ind) = value;
+             nnapi_view.at(ind) = value;
+           };
+  };
+
   initializers[kTfLiteUInt8] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) {
     assert(tfl_interp->tensor(id)->type == kTfLiteUInt8);
     assert(nnapi->tensor(id)->type == kTfLiteUInt8);
@@ -333,6 +354,31 @@ int RandomTestRunner::run(const nnfw::support::tflite::interp::Builder &builder)
            };
   };
 
+  reseters[kTfLiteUInt8] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) {
+    assert(tfl_interp->tensor(id)->type == kTfLiteUInt8);
+    assert(nnapi->tensor(id)->type == kTfLiteUInt8);
+
+    auto tfl_interp_view = nnfw::support::tflite::TensorView<uint8_t>::make(*tfl_interp, id);
+    auto nnapi_view = nnfw::support::tflite::TensorView<uint8_t>::make(*nnapi, id);
+
+    assert(tfl_interp_view.shape() == nnapi_view.shape());
+
+    auto fp = static_cast<uint8_t (RandomGenerator::*)(const ::nnfw::util::tensor::Shape &,
+                                                       const ::nnfw::util::tensor::Index &)>(
+        &RandomGenerator::generate<uint8_t>);
+    const nnfw::util::tensor::Object<uint8_t> data(tfl_interp_view.shape(),
+                                                   std::bind(fp, _randgen, _1, _2));
+    assert(tfl_interp_view.shape() == data.shape());
+
+    uint8_t value = 0;
+
+    nnfw::util::tensor::iterate(tfl_interp_view.shape())
+        << [&](const nnfw::util::tensor::Index &ind) {
+             tfl_interp_view.at(ind) = value;
+             nnapi_view.at(ind) = value;
+           };
+  };
+
   initializers[kTfLiteFloat32] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) {
     assert(tfl_interp->tensor(id)->type == kTfLiteFloat32);
     assert(nnapi->tensor(id)->type == kTfLiteFloat32);
@@ -359,6 +405,32 @@ int RandomTestRunner::run(const nnfw::support::tflite::interp::Builder &builder)
            };
   };
 
+  reseters[kTfLiteFloat32] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) {
+    assert(tfl_interp->tensor(id)->type == kTfLiteFloat32);
+    assert(nnapi->tensor(id)->type == kTfLiteFloat32);
+
+    auto tfl_interp_view = nnfw::support::tflite::TensorView<float>::make(*tfl_interp, id);
+    auto nnapi_view = nnfw::support::tflite::TensorView<float>::make(*nnapi, id);
+
+    assert(tfl_interp_view.shape() == nnapi_view.shape());
+
+    auto fp = static_cast<float (RandomGenerator::*)(const ::nnfw::util::tensor::Shape &,
+                                                     const ::nnfw::util::tensor::Index &)>(
+        &RandomGenerator::generate<float>);
+    const nnfw::util::tensor::Object<float> data(tfl_interp_view.shape(),
+                                                 std::bind(fp, _randgen, _1, _2));
+
+    assert(tfl_interp_view.shape() == data.shape());
+
+    float value = 0;
+
+    nnfw::util::tensor::iterate(tfl_interp_view.shape())
+        << [&](const nnfw::util::tensor::Index &ind) {
+             tfl_interp_view.at(ind) = value;
+             nnapi_view.at(ind) = value;
+           };
+  };
+
   // Fill IFM with random numbers
   for (const auto id : tfl_interp->inputs())
   {
@@ -374,6 +446,21 @@ int RandomTestRunner::run(const nnfw::support::tflite::interp::Builder &builder)
     it->second(id, tfl_interp.get(), nnapi.get());
   }
 
+  // Fill OFM with 0
+  for (const auto id : tfl_interp->outputs())
+  {
+    assert(tfl_interp->tensor(id)->type == nnapi->tensor(id)->type);
+
+    auto it = reseters.find(tfl_interp->tensor(id)->type);
+
+    if (it == reseters.end())
+    {
+      throw std::runtime_error{"Not supported input type"};
+    }
+
+    it->second(id, tfl_interp.get(), nnapi.get());
+  }
+
   std::cout << "[NNAPI TEST] Run T/F Lite Interpreter without NNAPI" << std::endl;
   tfl_interp->Invoke();