[Test] feature for tf-lite accepted/tizen/unified/20230609.163741
authorJaeyun Jung <jy1210.jung@samsung.com>
Thu, 1 Jun 2023 05:53:49 +0000 (14:53 +0900)
committerjaeyun-jung <39614140+jaeyun-jung@users.noreply.github.com>
Thu, 1 Jun 2023 07:07:23 +0000 (16:07 +0900)
Enable testcase if tensorflow-lite ver2.x is available.

Signed-off-by: Jaeyun Jung <jy1210.jung@samsung.com>
tests/capi/unittest_capi_inference_latency.cc
tests/capi/unittest_capi_inference_nnfw_runtime.cc

index 1beaa3b..5dc21c1 100644 (file)
@@ -248,7 +248,7 @@ class nnstreamer_capi_singleshot_latency : public ::testing::Test
   float single_invoke_duration_f, direct_invoke_duration_f;
 };
 
-#if defined(ENABLE_TENSORFLOW_LITE)
+#if defined(ENABLE_TENSORFLOW_LITE) || defined(ENABLE_TENSORFLOW2_LITE)
 /**
  * @brief Measure latency for NNStreamer single shot (tensorflow-lite)
  * @note Measure the invoke latency added by NNStreamer single shot
index ada1f30..34cbd67 100644 (file)
@@ -707,7 +707,7 @@ TEST_F (MLAPIInferenceNNFW, multimodel_01_p)
   MLAPIInferenceNNFW::wait_for_sink (&call_cnt2, 1);
 }
 
-#ifdef ENABLE_TENSORFLOW_LITE
+#if defined(ENABLE_TENSORFLOW_LITE) || defined(ENABLE_TENSORFLOW2_LITE)
 /**
  * @brief Test nnfw subplugin multi-model (pipeline, ML-API)
  * @detail Invoke two models which have different framework via Pipeline API, sharing a single input stream