#define DBG FALSE
#endif
+static const gchar *tf_accl_support[] = {
+ ACCL_AUTO_STR,
+ ACCL_DEFAULT_STR,
+ NULL
+};
+
/**
* @brief Internal data structure for tensorflow
*/
}
}
+/**
+ * @brief Check support of the backend
+ * @param[in] hw backend to check support of
+ */
+static int
+tf_checkAvailability (accl_hw hw)
+{
+ if (g_strv_contains (tf_accl_support, get_accl_hw_str (hw)))
+ return 0;
+
+ return -ENOENT;
+}
+
static gchar filter_subplugin_tensorflow[] = "tensorflow";
static GstTensorFilterFramework NNS_support_tensorflow = {
NNS_support_tensorflow.getInputDimension = tf_getInputDim;
NNS_support_tensorflow.getOutputDimension = tf_getOutputDim;
NNS_support_tensorflow.destroyNotify = tf_destroyNotify;
+ NNS_support_tensorflow.checkAvailability = tf_checkAvailability;
nnstreamer_filter_probe (&NNS_support_tensorflow);
}
/**
* @brief Test NNStreamer Utility for checking availability of Tensorflow-lite backend
*/
-TEST (nnstreamer_capi_util, availability_fail_n)
+TEST (nnstreamer_capi_util, availability_fail_01_n)
{
bool result;
int status;
}
#endif /* ENABLE_TENSORFLOW_LITE */
+#ifdef ENABLE_TENSORFLOW
+/**
+ * @brief Test NNStreamer Utility for checking availability of Tensorflow backend
+ */
+TEST (nnstreamer_capi_util, availability_02)
+{
+ bool result;
+ int status;
+
+ status = ml_check_nnfw_availability (ML_NNFW_TYPE_TENSORFLOW, ML_NNFW_HW_ANY, &result);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+ EXPECT_EQ (result, true);
+
+ status = ml_check_nnfw_availability (ML_NNFW_TYPE_TENSORFLOW, ML_NNFW_HW_AUTO, &result);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+ EXPECT_EQ (result, true);
+}
+
+/**
+ * @brief Test NNStreamer Utility for checking availability of Tensorflow backend
+ */
+TEST (nnstreamer_capi_util, availability_fail_02_n)
+{
+ bool result;
+ int status;
+
+ status = ml_check_nnfw_availability (ML_NNFW_TYPE_TENSORFLOW, ML_NNFW_HW_CPU, &result);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+ EXPECT_EQ (result, false);
+
+ status = ml_check_nnfw_availability (ML_NNFW_TYPE_TENSORFLOW, ML_NNFW_HW_GPU, &result);
+ EXPECT_EQ (status, ML_ERROR_NONE);
+ EXPECT_EQ (result, false);
+}
+#endif /** ENABLE_TENSORFLOW */
+
/**
* @brief Test NNStreamer Utility for checking tensors info handle
*/