Add dummy package and dummy backend
authorYongjoo Ahn <yongjoo1.ahn@samsung.com>
Wed, 12 Mar 2025 04:02:15 +0000 (13:02 +0900)
committerjaeyun-jung <39614140+jaeyun-jung@users.noreply.github.com>
Wed, 12 Mar 2025 08:12:25 +0000 (17:12 +0900)
- Add dummy package which is published for any configuration.
- The dummy package has backend `dummy-passthrough`.

Signed-off-by: Yongjoo Ahn <yongjoo1.ahn@samsung.com>
CMakeLists.txt
packaging/hal-backend-ml-accelerator.spec
src/hal-backend-ml-dummy-passthrough.cc [new file with mode: 0644]

index d530ded89834674728d02b4c218dc0ff2487860f..529c2e1581370924daf55b2e45d7accf1532d330 100644 (file)
@@ -2,6 +2,7 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.6)
 PROJECT(hal-backend-ml CXX)
 INCLUDE(GNUInstallDirs)
 
+option(ENABLE_DUMMY "Enable dummy-passthrough backend" OFF)
 option(ENABLE_VIVANTE "Enable vivante backend" OFF)
 option(ENABLE_SNPE "Enable snpe backend" OFF)
 
@@ -23,6 +24,27 @@ SET(UTIL_SRCS
   ${PROJECT_SOURCE_DIR}/src/hal-backend-ml-util.cc
 )
 
+pkg_check_modules(pkgs REQUIRED
+  hal-rootstrap
+)
+
+FOREACH(flag ${pkgs_CFLAGS})
+  SET(EXTRA_CFLAGS "${CMAKE_C_FLAGS} ${flag}")
+  SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${flag}")
+ENDFOREACH(flag)
+
+# dummy-passthrough
+IF(ENABLE_DUMMY)
+SET(DUMMY_PASSTHROUGH_SRCS
+${PROJECT_SOURCE_DIR}/src/hal-backend-ml-dummy-passthrough.cc
+)
+
+SET(DUMMY_PASSTHROUGH_LIBRARY_NAME "hal-backend-ml-dummy-passthrough")
+ADD_LIBRARY(${DUMMY_PASSTHROUGH_LIBRARY_NAME} SHARED ${DUMMY_PASSTHROUGH_SRCS} ${UTIL_SRCS})
+TARGET_LINK_LIBRARIES(${DUMMY_PASSTHROUGH_LIBRARY_NAME} ${pkgs_LDFLAGS})
+INSTALL(TARGETS ${DUMMY_PASSTHROUGH_LIBRARY_NAME} DESTINATION ${HAL_LIBDIR} COMPONENT RuntimeLibraries)
+ENDIF()
+
 # vivante
 IF(ENABLE_VIVANTE)
 SET(VIVANTE_SRCS
@@ -64,6 +86,3 @@ ADD_LIBRARY(${SNPE_LIBRARY_NAME} SHARED ${SNPE_SRCS} ${UTIL_SRCS})
 TARGET_LINK_LIBRARIES(${SNPE_LIBRARY_NAME} ${snpe_build_dep_pkgs_LDFLAGS})
 INSTALL(TARGETS ${SNPE_LIBRARY_NAME} DESTINATION ${HAL_LIBDIR} COMPONENT RuntimeLibraries)
 ENDIF()
-
-# Dummy install command when there are no backends deps.
-INSTALL(CODE "message(STATUS \"Performing dummy install actions\")")
index b61ed250da668f15515dec9a1aa3a0787b93a0f8..8debdc7a404f70529016ae941525d90b4f87fa5a 100644 (file)
@@ -28,6 +28,15 @@ BuildRequires:  pkgconfig(hal-rootstrap)
 %description
 ML HAL backend drivers for various targets
 
+# Config dummy backend (dummy-passthrough)
+%define         dummy_support 1
+
+%if 0%{?dummy_support}
+%package dummy
+Summary:  dummy backend for hal-backend-ml-accelerator
+%description dummy
+%define enable_dummy -DENABLE_DUMMY=ON
+%endif
 
 # Config vivante
 %if 0%{?vivante_support}
@@ -53,6 +62,7 @@ Summary:  hal-backend-ml-accelerator for snpe
 %cmake \
   -DCMAKE_HAL_LIBDIR_PREFIX=%{_hal_libdir} \
   -DCMAKE_HAL_LICENSEDIR_PREFIX=%{_hal_licensedir} \
+  %{?enable_dummy} \
   %{?enable_vivante} \
   %{?enable_snpe} \
   .
@@ -68,6 +78,13 @@ make %{?_smp_mflags}
 %postun
 /sbin/ldconfig
 
+%if 0%{?dummy_support}
+%files dummy
+%manifest packaging/hal-backend-ml-accelerator.manifest
+%license LICENSE
+%{_hal_libdir}/libhal-backend-ml-dummy-passthrough.so
+%endif
+
 %if 0%{?vivante_support}
 %files vivante
 %manifest packaging/hal-backend-ml-accelerator.manifest
diff --git a/src/hal-backend-ml-dummy-passthrough.cc b/src/hal-backend-ml-dummy-passthrough.cc
new file mode 100644 (file)
index 0000000..142ec13
--- /dev/null
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: Apache-2.0 */
+
+#include <glib.h>
+#include <stdexcept>
+
+#include <hal-common-interface.h>
+#include <hal-ml-interface.h>
+
+#include "hal-backend-ml-util.h"
+
+
+typedef struct _pass_handle_s {
+  GstTensorsInfo inputInfo;
+  GstTensorsInfo outputInfo;
+} pass_handle_s;
+
+static int
+ml_dummy_passthrough_init (void **backend_private)
+{
+  pass_handle_s *pass = g_new0 (pass_handle_s, 1);
+  gst_tensors_info_init (&pass->inputInfo);
+  gst_tensors_info_init (&pass->outputInfo);
+  *backend_private = pass;
+
+  return HAL_ML_ERROR_NONE;
+}
+
+static int
+ml_dummy_passthrough_deinit (void *backend_private)
+{
+  pass_handle_s *pass = (pass_handle_s *) backend_private;
+  if (!pass) {
+    g_critical ("[dummy backend] ml_dummy_passthrough_deinit called with invalid backend_private");
+    return HAL_ML_ERROR_INVALID_PARAMETER;
+  }
+
+  gst_tensors_info_free (&pass->inputInfo);
+  gst_tensors_info_free (&pass->outputInfo);
+
+  g_free (pass);
+
+  return HAL_ML_ERROR_NONE;
+}
+
+static int
+ml_dummy_passthrough_configure_instance (void *backend_private, const void *prop_)
+{
+  const GstTensorFilterProperties *prop = (const GstTensorFilterProperties *) prop_;
+  pass_handle_s *pass = (pass_handle_s *) backend_private;
+  if (!pass) {
+    g_critical ("[dummy backend] ml_dummy_passthrough_configure_instance called with invalid backend_private");
+    return HAL_ML_ERROR_INVALID_PARAMETER;
+  }
+
+  gst_tensors_info_copy (&pass->inputInfo, &prop->input_meta);
+  gst_tensors_info_copy (&pass->outputInfo, &prop->output_meta);
+
+  return HAL_ML_ERROR_NONE;
+}
+
+static int
+ml_dummy_passthrough_get_framework_info (void *backend_private, void *fw_info)
+{
+  GstTensorFilterFrameworkInfo *info = (GstTensorFilterFrameworkInfo *) fw_info;
+  info->name = "dummy-passthrough";
+  info->allow_in_place = FALSE;
+  info->allocate_in_invoke = FALSE;
+  info->run_without_model = FALSE;
+  info->verify_model_path = FALSE;
+
+  return HAL_ML_ERROR_NONE;
+}
+
+static int
+ml_dummy_passthrough_invoke (void *backend_private, const void *input_, void *output_)
+{
+  const GstTensorMemory *input = (const GstTensorMemory *) input_;
+  GstTensorMemory *output = (GstTensorMemory *) output_;
+  pass_handle_s *pass = (pass_handle_s *) backend_private;
+  if (!pass) {
+    g_critical ("[dummy backend] ml_dummy_passthrough_invoke called with invalid backend_private");
+    return HAL_ML_ERROR_INVALID_PARAMETER;
+  }
+
+  for (unsigned int i = 0; i < pass->inputInfo.num_tensors; i++) {
+    GstTensorInfo *info = gst_tensors_info_get_nth_info (&pass->inputInfo, i);
+    memcpy (output[i].data, input[i].data, gst_tensor_info_get_size (info));
+  }
+
+  return HAL_ML_ERROR_NONE;
+}
+
+static int
+ml_dummy_passthrough_get_model_info (
+    void *backend_private, int ops_, void *in_info_, void *out_info_)
+{
+  int ops = (model_info_ops) ops_;
+  GstTensorsInfo *in_info = (GstTensorsInfo *) in_info_;
+  GstTensorsInfo *out_info = (GstTensorsInfo *) out_info_;
+  pass_handle_s *pass = (pass_handle_s *) backend_private;
+  if (!pass) {
+    g_critical ("[dummy backend] ml_dummy_passthrough_get_model_info called with invalid backend_private");
+    return HAL_ML_ERROR_INVALID_PARAMETER;
+  }
+
+  if (ops == GET_IN_OUT_INFO) {
+    gst_tensors_info_copy (in_info, &pass->inputInfo);
+    gst_tensors_info_copy (out_info, &pass->outputInfo);
+
+    return HAL_ML_ERROR_NONE;
+  }
+
+  return HAL_ML_ERROR_NOT_SUPPORTED;
+}
+
+static int
+ml_dummy_passthrough_event_handler (void *backend_private, int ops_, void *data_)
+{
+  return HAL_ML_ERROR_NOT_SUPPORTED;
+}
+
+static int
+ml_dummy_passthrough_hal_backend_init (void **data)
+{
+  hal_backend_ml_funcs *funcs = NULL;
+
+  if (*data) {
+    funcs = (hal_backend_ml_funcs *) *data;
+  } else {
+    funcs = g_new0 (hal_backend_ml_funcs, 1);
+  }
+  *data = (void *) funcs;
+
+  funcs->init = ml_dummy_passthrough_init;
+  funcs->deinit = ml_dummy_passthrough_deinit;
+  funcs->configure_instance = ml_dummy_passthrough_configure_instance;
+  funcs->invoke = ml_dummy_passthrough_invoke;
+  funcs->get_framework_info = ml_dummy_passthrough_get_framework_info;
+  funcs->get_model_info = ml_dummy_passthrough_get_model_info;
+  funcs->event_handler = ml_dummy_passthrough_event_handler;
+
+  return 0;
+}
+
+static int
+ml_dummy_passthrough_hal_backend_exit (void *data)
+{
+  memset (data, 0x0, sizeof (hal_backend_ml_funcs));
+  return 0;
+}
+
+hal_backend hal_backend_ml_data = {
+  .name = "ml-dummy-passthrough",
+  .vendor = "NNStreamer",
+  .init = ml_dummy_passthrough_hal_backend_init,
+  .exit = ml_dummy_passthrough_hal_backend_exit,
+  .major_version = 1,
+  .minor_version = 0,
+};