- This implements hal backends for hal-api-ml.
Signed-off-by: Yongjoo Ahn <yongjoo1.ahn@samsung.com>
--- /dev/null
+CMAKE_MINIMUM_REQUIRED(VERSION 2.6)
+PROJECT(hal-backend-ml CXX)
+INCLUDE(GNUInstallDirs)
+
+option(ENABLE_VIVANTE "Enable vivante backend" OFF)
+option(ENABLE_SNPE "Enable snpe backend" OFF)
+
+SET(HAL_LIBDIR ${CMAKE_HAL_LIBDIR_PREFIX})
+SET(HAL_LICENSEDIR ${CMAKE_HAL_LICENSEDIR_PREFIX})
+
+INCLUDE(FindPkgConfig)
+pkg_check_modules(hal_rootstrap_pkgs REQUIRED
+ hal-rootstrap
+)
+
+FOREACH(flag ${hal_rootstrap_pkgs_CFLAGS})
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${flag}")
+ENDFOREACH(flag)
+
+# Common Options
+SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -O2 -fomit-frame-pointer -std=gnu++0x")
+SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdata-sections -ffunction-sections")
+SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC")
+
+SET(UTIL_SRCS
+ ${PROJECT_SOURCE_DIR}/src/hal-backend-ml-util.cc
+)
+
+# pass-through
+SET(PASS_SRCS
+ ${PROJECT_SOURCE_DIR}/src/hal-backend-ml-test-passthrough.cc
+)
+
+SET(PASS_LIBRARY_NAME "hal-backend-ml-pass")
+ADD_LIBRARY(${PASS_LIBRARY_NAME} SHARED ${PASS_SRCS} ${UTIL_SRCS})
+TARGET_LINK_LIBRARIES(${PASS_LIBRARY_NAME} ${hal_rootstrap_pkgs_LDFLAGS})
+INSTALL(TARGETS ${PASS_LIBRARY_NAME} DESTINATION ${HAL_LIBDIR} COMPONENT RuntimeLibraries)
+
+# vivante
+IF(ENABLE_VIVANTE)
+SET(VIVANTE_SRCS
+${PROJECT_SOURCE_DIR}/src/hal-backend-ml-vivante.cc
+)
+
+SET(VIVANTE_LIBRARY_NAME "hal-backend-ml-vivante")
+ADD_LIBRARY(${VIVANTE_LIBRARY_NAME} SHARED ${VIVANTE_SRCS} ${UTIL_SRCS})
+TARGET_LINK_LIBRARIES(${VIVANTE_LIBRARY_NAME} ${hal_rootstrap_pkgs_LDFLAGS})
+INSTALL(TARGETS ${VIVANTE_LIBRARY_NAME} DESTINATION ${HAL_LIBDIR} COMPONENT RuntimeLibraries)
+ENDIF()
+
+# snpe
+IF(ENABLE_SNPE)
+SET(SNPE_SRCS
+${PROJECT_SOURCE_DIR}/src/hal-backend-ml-snpe.cc
+)
+
+SET(SNPE_LIBRARY_NAME "hal-backend-ml-snpe")
+ADD_LIBRARY(${SNPE_LIBRARY_NAME} SHARED ${SNPE_SRCS} ${UTIL_SRCS})
+TARGET_LINK_LIBRARIES(${SNPE_LIBRARY_NAME} ${hal_rootstrap_pkgs_LDFLAGS})
+INSTALL(TARGETS ${SNPE_LIBRARY_NAME} DESTINATION ${HAL_LIBDIR} COMPONENT RuntimeLibraries)
+ENDIF()
--- /dev/null
+Copyright (c) 2000 - 2011 Samsung Electronics Co., Ltd. All rights reserved.\r
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+\r
--- /dev/null
+<manifest>
+ <request>
+ <domain name="_"/>
+ </request>
+</manifest>
--- /dev/null
+Name: hal-backend-ml
+Summary: ML HAL backend drivers for various targets
+Version: 0.0.1
+Release: 0
+Group: Machine Learning/ML Framework
+License: Apache-2.0
+Source0: %{name}-%{version}.tar.gz
+
+BuildRequires: cmake
+BuildRequires: pkgconfig(hal-rootstrap)
+
+
+# For DA
+%if 0%{?_with_da_profile}
+
+## For meson board
+%if 0%{?_with_meson64}
+%define vivante_support 1
+BuildRequires: pkgconfig(ovxlib)
+BuildRequires: pkgconfig(amlogic-vsi-npu-sdk)
+%endif
+
+## For qrb board
+%if 0%{?_with_qrb4210}
+%define snpe_support 1
+BuildRequires: snpe-devel
+%endif
+
+%endif # For DA
+
+
+%description
+ML HAL backend drivers for various targets
+
+
+# Config vivante
+%if 0%{?vivante_support}
+%package vivante
+Summary: hal-backend-ml for vivante
+%description vivante
+%define enable_vivante -DENABLE_VIVANTE=ON
+%endif
+
+# Config snpe
+%if 0%{?snpe_support}
+%package snpe
+Summary: hal-backend-ml for snpe
+Requires: snpe
+%description snpe
+%define enable_snpe -DENABLE_SNPE=ON
+%endif
+
+
+%prep
+%setup -q
+
+%build
+%cmake \
+ -DCMAKE_HAL_LIBDIR_PREFIX=%{_hal_libdir} \
+ -DCMAKE_HAL_LICENSEDIR_PREFIX=%{_hal_licensedir} \
+ %{?enable_vivante} \
+ %{?enable_snpe} \
+ .
+
+make %{?_smp_mflags}
+
+%install
+%make_install
+
+%post
+/sbin/ldconfig
+
+%postun
+/sbin/ldconfig
+
+%files
+%manifest packaging/hal-backend-ml.manifest
+%license LICENSE
+%{_hal_libdir}/libhal-backend-ml-*.so*
+
+%if 0%{?vivante_support}
+%files vivante
+%manifest packaging/hal-backend-ml.manifest
+%license LICENSE
+%{_hal_libdir}/libhal-backend-ml-vivante.so
+%endif
+
+%if 0%{?snpe_support}
+%files snpe
+%manifest packaging/hal-backend-ml.manifest
+%license LICENSE
+%{_hal_libdir}/libhal-backend-ml-snpe.so
+%endif
--- /dev/null
+/* SPDX-License-Identifier: Apache-2.0 */
+
+#include <vector>
+#include <stdexcept>
+#include <glib.h>
+
+#include <hal-common-interface.h>
+#include <hal-ml-interface.h>
+
+#include <DlContainer/DlContainer.h>
+#include <DlSystem/DlEnums.h>
+#include <DlSystem/DlError.h>
+#include <DlSystem/DlVersion.h>
+#include <DlSystem/IUserBuffer.h>
+#include <DlSystem/RuntimeList.h>
+#include <DlSystem/UserBufferMap.h>
+#include <SNPE/SNPE.h>
+#include <SNPE/SNPEBuilder.h>
+#include <SNPE/SNPEUtil.h>
+
+#include "hal-backend-ml-util.h"
+
+
+typedef struct _snpe_handle_s
+{
+ char *model_path;
+ GstTensorsInfo inputInfo; /**< Input tensors metadata */
+ GstTensorsInfo outputInfo; /**< Output tensors metadata */
+
+ Snpe_SNPE_Handle_t snpe_h;
+ Snpe_UserBufferMap_Handle_t inputMap_h;
+ Snpe_UserBufferMap_Handle_t outputMap_h;
+ std::vector<Snpe_IUserBuffer_Handle_t> user_buffers;
+} snpe_handle_s;
+
+static int ml_snpe_init(void **backend_private)
+{
+ snpe_handle_s *snpe = g_new0 (snpe_handle_s, 1);
+
+ gst_tensors_info_init (&snpe->inputInfo);
+ gst_tensors_info_init (&snpe->outputInfo);
+
+ *backend_private = snpe;
+ return 0;
+}
+
+static int ml_snpe_deinit(void *backend_private)
+{
+ snpe_handle_s *snpe = (snpe_handle_s *) backend_private;
+ if (!snpe) {
+ g_critical ("[snpe backend] ml_snpe_deinit called with invalid backend_private");
+ return HAL_ML_ERROR_INVALID_PARAMETER;
+ }
+
+ if (snpe->inputMap_h)
+ Snpe_UserBufferMap_Delete (snpe->inputMap_h);
+
+ if (snpe->outputMap_h)
+ Snpe_UserBufferMap_Delete (snpe->outputMap_h);
+
+ for (auto &ub : snpe->user_buffers)
+ if (ub)
+ Snpe_IUserBuffer_Delete (ub);
+
+ snpe->user_buffers.clear ();
+
+ if (snpe->snpe_h)
+ Snpe_SNPE_Delete (snpe->snpe_h);
+
+ if (snpe->model_path)
+ g_free (snpe->model_path);
+
+ gst_tensors_info_free (&snpe->inputInfo);
+ gst_tensors_info_free (&snpe->outputInfo);
+
+ g_free (snpe);
+ return 0;
+}
+
+static int ml_snpe_configure_instance(void *backend_private, const GstTensorFilterProperties *prop)
+{
+ snpe_handle_s *snpe = (snpe_handle_s *) backend_private;
+ if (!snpe) {
+ g_critical ("[snpe backend] ml_snpe_configure_instance called with invalid backend_private");
+ return HAL_ML_ERROR_INVALID_PARAMETER;
+ }
+
+ Snpe_DlVersion_Handle_t lib_version_h = NULL;
+ Snpe_RuntimeList_Handle_t runtime_list_h = NULL;
+ Snpe_DlContainer_Handle_t container_h = NULL;
+ Snpe_SNPEBuilder_Handle_t snpebuilder_h = NULL;
+ Snpe_StringList_Handle_t inputstrListHandle = NULL;
+ Snpe_StringList_Handle_t outputstrListHandle = NULL;
+ std::vector<Snpe_UserBufferEncoding_ElementType_t> inputTypeVec;
+ std::vector<Snpe_UserBufferEncoding_ElementType_t> outputTypeVec;
+
+ auto _clean_handles = [&] () {
+ if (lib_version_h)
+ Snpe_DlVersion_Delete (lib_version_h);
+ if (runtime_list_h)
+ Snpe_RuntimeList_Delete (runtime_list_h);
+ if (container_h)
+ Snpe_DlContainer_Delete (container_h);
+ if (snpebuilder_h)
+ Snpe_SNPEBuilder_Delete (snpebuilder_h);
+ if (inputstrListHandle)
+ Snpe_StringList_Delete (inputstrListHandle);
+ if (outputstrListHandle)
+ Snpe_StringList_Delete (outputstrListHandle);
+ };
+
+ /* default runtime is CPU */
+ Snpe_Runtime_t runtime = SNPE_RUNTIME_CPU;
+
+ /* lambda function to handle tensor */
+ auto handleTensor = [&] (const char *tensorName, GstTensorInfo *info,
+ Snpe_UserBufferMap_Handle_t bufferMapHandle,
+ Snpe_UserBufferEncoding_ElementType_t type) {
+ Snpe_IBufferAttributes_Handle_t bufferAttributesOpt
+ = Snpe_SNPE_GetInputOutputBufferAttributes (snpe->snpe_h, tensorName);
+ if (!bufferAttributesOpt)
+ throw std::runtime_error ("Error obtaining buffer attributes");
+
+ auto default_type = Snpe_IBufferAttributes_GetEncodingType (bufferAttributesOpt);
+
+ /* parse tensor data type with user given element type */
+ switch (type) {
+ case SNPE_USERBUFFERENCODING_ELEMENTTYPE_UNKNOWN:
+ /* If the type is not provided by user, use default type */
+ type = default_type;
+ if (default_type == SNPE_USERBUFFERENCODING_ELEMENTTYPE_FLOAT) {
+ info->type = _NNS_FLOAT32;
+ } else if (default_type == SNPE_USERBUFFERENCODING_ELEMENTTYPE_TF8) {
+ info->type = _NNS_UINT8;
+ } else {
+ throw std::invalid_argument ("Unsupported data type");
+ }
+ break;
+ case SNPE_USERBUFFERENCODING_ELEMENTTYPE_FLOAT:
+ info->type = _NNS_FLOAT32;
+ break;
+ case SNPE_USERBUFFERENCODING_ELEMENTTYPE_TF8:
+ info->type = _NNS_UINT8;
+ if (default_type == SNPE_USERBUFFERENCODING_ELEMENTTYPE_FLOAT) {
+ throw std::invalid_argument (
+ "ERROR: Quantization parameters are not present in model. Use TF8 type.");
+ }
+ break;
+ default:
+ throw std::invalid_argument ("Unsupported data type");
+ }
+
+ /* parse tensor dimension */
+ auto shapeHandle = Snpe_IBufferAttributes_GetDims (bufferAttributesOpt);
+ auto rank = Snpe_TensorShape_Rank (shapeHandle);
+ const size_t *sdims = Snpe_TensorShape_GetDimensions (shapeHandle);
+ for (size_t j = 0; j < rank; j++) {
+ info->dimension[rank - 1 - j] = sdims[j];
+ }
+
+ /* calculate strides */
+ std::vector<size_t> strides (rank);
+ strides[rank - 1] = gst_tensor_get_element_size (info->type);
+ for (size_t j = rank - 1; j > 0; j--) {
+ strides[j - 1] = strides[j] * sdims[j];
+ }
+
+ auto stride_h = Snpe_TensorShape_CreateDimsSize (strides.data (), strides.size ());
+ Snpe_TensorShape_Delete (shapeHandle);
+ Snpe_IBufferAttributes_Delete (bufferAttributesOpt);
+
+ /* assign user_buffermap */
+ size_t bufsize = gst_tensor_info_get_size (info);
+ Snpe_UserBufferEncoding_Handle_t ube_h = NULL;
+ if (type == SNPE_USERBUFFERENCODING_ELEMENTTYPE_TF8) {
+ Snpe_IBufferAttributes_Handle_t bufferAttributesOpt
+ = Snpe_SNPE_GetInputOutputBufferAttributes (snpe->snpe_h, tensorName);
+ Snpe_UserBufferEncoding_Handle_t ubeTfNHandle
+ = Snpe_IBufferAttributes_GetEncoding_Ref (bufferAttributesOpt);
+ uint64_t stepEquivalentTo0 = Snpe_UserBufferEncodingTfN_GetStepExactly0 (ubeTfNHandle);
+ float quantizedStepSize
+ = Snpe_UserBufferEncodingTfN_GetQuantizedStepSize (ubeTfNHandle);
+ ube_h = Snpe_UserBufferEncodingTfN_Create (stepEquivalentTo0, quantizedStepSize, 8);
+ Snpe_IBufferAttributes_Delete (bufferAttributesOpt);
+ Snpe_UserBufferEncodingTfN_Delete (ubeTfNHandle);
+ } else if (type == SNPE_USERBUFFERENCODING_ELEMENTTYPE_FLOAT) {
+ ube_h = Snpe_UserBufferEncodingFloat_Create ();
+ }
+ auto iub = Snpe_Util_CreateUserBuffer (NULL, bufsize, stride_h, ube_h);
+ snpe->user_buffers.push_back (iub);
+
+ if (type == SNPE_USERBUFFERENCODING_ELEMENTTYPE_TF8)
+ Snpe_UserBufferEncodingTfN_Delete (ube_h);
+ else if (type == SNPE_USERBUFFERENCODING_ELEMENTTYPE_FLOAT)
+ Snpe_UserBufferEncodingFloat_Delete (ube_h);
+ Snpe_TensorShape_Delete (stride_h);
+
+ Snpe_UserBufferMap_Add (bufferMapHandle, tensorName, iub);
+ };
+
+ auto parse_custom_prop = [&runtime, &outputstrListHandle, &inputTypeVec,
+ &outputTypeVec] (const char *custom_prop) {
+ if (!custom_prop)
+ return;
+
+ gchar **options = g_strsplit (custom_prop, ",", -1);
+
+ for (guint op = 0; op < g_strv_length (options); ++op) {
+ gchar **option = g_strsplit (options[op], ":", -1);
+
+ if (g_strv_length (option) > 1) {
+ g_strstrip (option[0]);
+ g_strstrip (option[1]);
+
+ if (g_ascii_strcasecmp (option[0], "Runtime") == 0) {
+ if (g_ascii_strcasecmp (option[1], "CPU") == 0) {
+ runtime = SNPE_RUNTIME_CPU;
+ } else if (g_ascii_strcasecmp (option[1], "GPU") == 0) {
+ runtime = SNPE_RUNTIME_GPU;
+ } else if (g_ascii_strcasecmp (option[1], "DSP") == 0) {
+ runtime = SNPE_RUNTIME_DSP;
+ } else if (g_ascii_strcasecmp (option[1], "NPU") == 0
+ || g_ascii_strcasecmp (option[1], "AIP") == 0) {
+ runtime = SNPE_RUNTIME_AIP_FIXED8_TF;
+ } else {
+ g_warning ("Unknown runtime (%s), set CPU as default.", options[op]);
+ }
+ } else if (g_ascii_strcasecmp (option[0], "OutputTensor") == 0) {
+ /* the tensor name may contain ':' */
+ gchar *_ot_str = g_strjoinv (":", &option[1]);
+ gchar **names = g_strsplit (_ot_str, ";", -1);
+ guint num_names = g_strv_length (names);
+ outputstrListHandle = Snpe_StringList_Create ();
+ for (guint i = 0; i < num_names; ++i) {
+ if (g_strcmp0 (names[i], "") == 0) {
+ throw std::invalid_argument ("Given tensor name is invalid.");
+ }
+
+ g_info ("Add output tensor name of %s", names[i]);
+ if (Snpe_StringList_Append (outputstrListHandle, names[i]) != SNPE_SUCCESS) {
+ const std::string err_msg = "Failed to append output tensor name: "
+ + (const std::string) names[i];
+ throw std::runtime_error (err_msg);
+ }
+ }
+ g_free (_ot_str);
+ g_strfreev (names);
+ } else if (g_ascii_strcasecmp (option[0], "OutputType") == 0) {
+ gchar **types = g_strsplit (option[1], ";", -1);
+ guint num_types = g_strv_length (types);
+ for (guint i = 0; i < num_types; ++i) {
+ if (g_ascii_strcasecmp (types[i], "FLOAT32") == 0) {
+ outputTypeVec.push_back (SNPE_USERBUFFERENCODING_ELEMENTTYPE_FLOAT);
+ } else if (g_ascii_strcasecmp (types[i], "TF8") == 0) {
+ outputTypeVec.push_back (SNPE_USERBUFFERENCODING_ELEMENTTYPE_TF8);
+ } else {
+ g_warning ("Ignore unknown output type (%s)", types[i]);
+ }
+ }
+ g_strfreev (types);
+ } else if (g_ascii_strcasecmp (option[0], "InputType") == 0) {
+ gchar **types = g_strsplit (option[1], ";", -1);
+ guint num_types = g_strv_length (types);
+ for (guint i = 0; i < num_types; ++i) {
+ if (g_ascii_strcasecmp (types[i], "FLOAT32") == 0) {
+ inputTypeVec.push_back (SNPE_USERBUFFERENCODING_ELEMENTTYPE_FLOAT);
+ } else if (g_ascii_strcasecmp (types[i], "TF8") == 0) {
+ inputTypeVec.push_back (SNPE_USERBUFFERENCODING_ELEMENTTYPE_TF8);
+ } else {
+ g_warning ("Ignore unknown input type (%s)", types[i]);
+ }
+ }
+ g_strfreev (types);
+ } else {
+ g_warning ("Unknown option (%s).", options[op]);
+ }
+ }
+
+ g_strfreev (option);
+ }
+
+ g_strfreev (options);
+ };
+
+ try {
+ /* Log SNPE version */
+ lib_version_h = Snpe_Util_GetLibraryVersion ();
+ if (!lib_version_h)
+ throw std::runtime_error ("Failed to get SNPE library version");
+
+ g_info ("SNPE Version: %s", Snpe_DlVersion_ToString (lib_version_h));
+
+ int32_t ver_major = Snpe_DlVersion_GetMajor (lib_version_h);
+ if (ver_major < 2) {
+ const std::string err_msg = "Invalid SNPE version, version 2.x is supported but has "
+ + std::to_string (ver_major) + ".x.";
+ g_critical ("%s", err_msg.c_str ());
+ throw std::runtime_error (err_msg);
+ }
+
+ /* parse custom properties */
+ parse_custom_prop (prop->custom_properties);
+
+ /* Check the given Runtime is available */
+ std::string runtime_str = std::string (Snpe_RuntimeList_RuntimeToString (runtime));
+ if (Snpe_Util_IsRuntimeAvailable (runtime) == 0)
+ throw std::runtime_error ("Given runtime " + runtime_str + " is not available");
+
+ g_info ("Given runtime %s is available", runtime_str.c_str ());
+
+ /* set runtimelist config */
+ runtime_list_h = Snpe_RuntimeList_Create ();
+ if (Snpe_RuntimeList_Add (runtime_list_h, runtime) != SNPE_SUCCESS)
+ throw std::runtime_error ("Failed to add given runtime to Snpe_RuntimeList");
+
+ /* Load network (dlc file) */
+ if (!g_file_test (prop->model_files[0], G_FILE_TEST_IS_REGULAR)) {
+ const std::string err_msg
+ = "Given file " + (std::string) prop->model_files[0] + " is not valid";
+ throw std::invalid_argument (err_msg);
+ }
+
+ snpe->model_path = g_strdup (prop->model_files[0]);
+ container_h = Snpe_DlContainer_Open (snpe->model_path);
+ if (!container_h)
+ throw std::runtime_error ("Failed to open the model file " + std::string (snpe->model_path));
+
+ /* Build SNPE handle */
+ snpebuilder_h = Snpe_SNPEBuilder_Create (container_h);
+ if (!snpebuilder_h)
+ throw std::runtime_error ("Failed to create SNPE builder");
+
+ if (Snpe_SNPEBuilder_SetRuntimeProcessorOrder (snpebuilder_h, runtime_list_h) != SNPE_SUCCESS)
+ throw std::runtime_error ("Failed to set runtime processor order");
+
+ /* set UserBuffer mode */
+ if (Snpe_SNPEBuilder_SetUseUserSuppliedBuffers (snpebuilder_h, true) != SNPE_SUCCESS)
+ throw std::runtime_error ("Failed to set use user supplied buffers");
+
+ /* Set Output Tensors (if given by custom prop) */
+ if (outputstrListHandle) {
+ if (Snpe_SNPEBuilder_SetOutputTensors (snpebuilder_h, outputstrListHandle) != SNPE_SUCCESS) {
+ throw std::runtime_error ("Failed to set output tensors");
+ }
+ }
+
+ snpe->snpe_h = Snpe_SNPEBuilder_Build (snpebuilder_h);
+ if (!snpe->snpe_h)
+ throw std::runtime_error ("Failed to build SNPE handle");
+
+ /* set inputTensorsInfo and inputMap */
+ snpe->inputMap_h = Snpe_UserBufferMap_Create ();
+ inputstrListHandle = Snpe_SNPE_GetInputTensorNames (snpe->snpe_h);
+ if (!snpe->inputMap_h || !inputstrListHandle)
+ throw std::runtime_error ("Error while setting Input tensors");
+
+ snpe->inputInfo.num_tensors = Snpe_StringList_Size (inputstrListHandle);
+ for (size_t i = 0; i < snpe->inputInfo.num_tensors; i++) {
+ GstTensorInfo *info
+ = gst_tensors_info_get_nth_info (std::addressof (snpe->inputInfo), i);
+ const char *inputName = Snpe_StringList_At (inputstrListHandle, i);
+ info->name = g_strdup (inputName);
+
+ auto inputType = SNPE_USERBUFFERENCODING_ELEMENTTYPE_UNKNOWN;
+
+ /* set input type from custom prop if it is provided */
+ if (inputTypeVec.size () > i)
+ inputType = inputTypeVec[i];
+ handleTensor (inputName, info, snpe->inputMap_h, inputType);
+ }
+
+ /* set outputTensorsInfo and outputMap */
+ snpe->outputMap_h = Snpe_UserBufferMap_Create ();
+
+ /* Get default output tensor names (if not provided by custom prop) */
+ if (outputstrListHandle == NULL)
+ outputstrListHandle = Snpe_SNPE_GetOutputTensorNames (snpe->snpe_h);
+
+ if (!snpe->outputMap_h || !outputstrListHandle)
+ throw std::runtime_error ("Error while setting Output tensors");
+
+ snpe->outputInfo.num_tensors = Snpe_StringList_Size (outputstrListHandle);
+ for (size_t i = 0; i < snpe->outputInfo.num_tensors; i++) {
+ GstTensorInfo *info
+ = gst_tensors_info_get_nth_info (std::addressof (snpe->outputInfo), i);
+ const char *outputName = Snpe_StringList_At (outputstrListHandle, i);
+ info->name = g_strdup (outputName);
+
+ /* set output type from custom prop if it is provided */
+ auto outputType = SNPE_USERBUFFERENCODING_ELEMENTTYPE_UNKNOWN;
+ if (outputTypeVec.size () > i) {
+ outputType = outputTypeVec[i];
+ }
+ handleTensor (outputName, info, snpe->outputMap_h, outputType);
+ }
+
+ _clean_handles ();
+ } catch (const std::exception &e) {
+ _clean_handles ();
+ g_critical ("%s", e.what ());
+ return HAL_ML_ERROR_RUNTIME_ERROR;
+ }
+
+
+ // g_info ("configure_instance was successful");
+ // gchar *inputinfostr = gst_tensors_info_to_string (&snpe->inputInfo);
+ // gchar *outputinfostr = gst_tensors_info_to_string (&snpe->outputInfo);
+ // g_info ("inputinfo: %s", inputinfostr);
+ // g_info ("outputinfo: %s", outputinfostr);
+ // g_free (inputinfostr);
+ // g_free (outputinfostr);
+
+
+ return HAL_ML_ERROR_NONE;
+}
+
+static int ml_snpe_invoke(void *backend_private, const GstTensorMemory *input, GstTensorMemory *output)
+{
+ snpe_handle_s *snpe = (snpe_handle_s *) backend_private;
+ for (unsigned int i = 0; i < snpe->inputInfo.num_tensors; i++) {
+ GstTensorInfo *info = gst_tensors_info_get_nth_info (std::addressof (snpe->inputInfo), i);
+ auto iub = Snpe_UserBufferMap_GetUserBuffer_Ref (snpe->inputMap_h, info->name);
+ Snpe_IUserBuffer_SetBufferAddress (iub, input[i].data);
+ }
+
+ for (unsigned int i = 0; i < snpe->outputInfo.num_tensors; i++) {
+ GstTensorInfo *info = gst_tensors_info_get_nth_info (std::addressof (snpe->outputInfo), i);
+ auto iub = Snpe_UserBufferMap_GetUserBuffer_Ref (snpe->outputMap_h, info->name);
+ Snpe_IUserBuffer_SetBufferAddress (iub, output[i].data);
+ }
+
+ Snpe_SNPE_ExecuteUserBuffers (snpe->snpe_h, snpe->inputMap_h, snpe->outputMap_h);
+
+ return HAL_ML_ERROR_NONE;
+}
+
+static int ml_snpe_get_model_info(void *backend_private, model_info_ops ops, GstTensorsInfo *in_info, GstTensorsInfo *out_info)
+{
+ snpe_handle_s *snpe = (snpe_handle_s *) backend_private;
+ if (ops == GET_IN_OUT_INFO) {
+ gst_tensors_info_copy (in_info, &snpe->inputInfo);
+ gst_tensors_info_copy (out_info, &snpe->outputInfo);
+
+ return HAL_ML_ERROR_NONE;
+ }
+
+ return -2;
+}
+
+static int ml_snpe_event_handler(void *backend_private, event_ops ops, GstTensorFilterFrameworkEventData *data)
+{
+ return HAL_ML_ERROR_NOT_SUPPORTED;
+}
+
+static int ml_snpe_hal_backend_init(void **data)
+{
+ hal_backend_ml_funcs *funcs = NULL;
+
+ if (*data) {
+ funcs = (hal_backend_ml_funcs *) *data;
+ } else {
+ funcs = g_new0 (hal_backend_ml_funcs, 1);
+ }
+ *data = (void *) funcs;
+
+ funcs->init = ml_snpe_init;
+ funcs->deinit = ml_snpe_deinit;
+ funcs->configure_instance = ml_snpe_configure_instance;
+ funcs->invoke = ml_snpe_invoke;
+ funcs->get_model_info = ml_snpe_get_model_info;
+ funcs->event_handler = ml_snpe_event_handler;
+
+ return 0;
+}
+
+static int ml_snpe_hal_backend_exit(void *data)
+{
+ memset (data, 0x0, sizeof(hal_backend_ml_funcs));
+ return 0;
+}
+
+hal_backend hal_backend_ml_data = {
+ .name = "ml-snpe",
+ .vendor = "YONGJOO",
+ .init = ml_snpe_hal_backend_init,
+ .exit = ml_snpe_hal_backend_exit,
+ .major_version = 1,
+ .minor_version = 1,
+};
--- /dev/null
+/* SPDX-License-Identifier: Apache-2.0 */
+
+#include <stdexcept>
+
+#include <glib.h>
+
+#include <hal-common-interface.h>
+#include <hal-ml-interface.h>
+
+#include "hal-backend-ml-util.h"
+
+
+typedef struct _pass_handle_s
+{
+ GstTensorsInfo inputInfo;
+ GstTensorsInfo outputInfo;
+} pass_handle_s;
+
+static int ml_pass_init(void **backend_private)
+{
+ pass_handle_s *pass = g_new0 (pass_handle_s, 1);
+ gst_tensors_info_init (&pass->inputInfo);
+ gst_tensors_info_init (&pass->outputInfo);
+ *backend_private = pass;
+ return 0;
+}
+
+static int ml_pass_deinit(void *backend_private)
+{
+ pass_handle_s *pass = (pass_handle_s *) backend_private;
+ if (!pass) {
+ g_critical ("[pass backend] ml_pass_deinit called with invalid backend_private");
+ return HAL_ML_ERROR_INVALID_PARAMETER;
+ }
+
+ gst_tensors_info_free (&pass->inputInfo);
+ gst_tensors_info_free (&pass->outputInfo);
+
+ g_free (pass);
+
+ return HAL_ML_ERROR_NONE;
+}
+
+static int ml_pass_configure_instance(void *backend_private, const GstTensorFilterProperties *prop)
+{
+ pass_handle_s *pass = (pass_handle_s *) backend_private;
+ if (!pass) {
+ g_critical ("[pass backend] ml_pass_configure_instance called with invalid backend_private");
+ return HAL_ML_ERROR_INVALID_PARAMETER;
+ }
+
+
+ gst_tensors_info_copy (&pass->inputInfo, &prop->input_meta);
+ gst_tensors_info_copy (&pass->outputInfo, &prop->output_meta);
+
+ return 0;
+}
+
+static int ml_pass_invoke(void *backend_private, const GstTensorMemory *input, GstTensorMemory *output)
+{
+ pass_handle_s *pass = (pass_handle_s *) backend_private;
+ if (!pass) {
+ g_critical ("[pass backend] ml_pass_invoke called with invalid backend_private");
+ return HAL_ML_ERROR_INVALID_PARAMETER;
+ }
+ g_warning ("skip rungraph.. 됐다 치고 하하"); g_usleep (1000 * 100);
+ // pass->result_vsi_nn_RunGraph (pass->graph);
+ return 0;
+}
+
+static int ml_pass_get_model_info(void *backend_private, model_info_ops ops, GstTensorsInfo *in_info, GstTensorsInfo *out_info)
+{
+ pass_handle_s *pass = (pass_handle_s *) backend_private;
+ if (!pass) {
+ g_critical ("[pass backend] ml_pass_get_model_info called with invalid backend_private");
+ return HAL_ML_ERROR_INVALID_PARAMETER;
+ }
+
+ gst_tensors_info_copy (in_info, &pass->inputInfo);
+ gst_tensors_info_copy (out_info, &pass->outputInfo);
+
+ return 0;
+}
+
+static int ml_pass_event_handler(void *backend_private, event_ops ops, GstTensorFilterFrameworkEventData *data)
+{
+ return -ENOENT;
+}
+
+static int ml_pass_hal_backend_init(void **data)
+{
+ hal_backend_ml_funcs *funcs = NULL;
+
+ if (*data) {
+ funcs = (hal_backend_ml_funcs *) *data;
+ } else {
+ funcs = g_new0 (hal_backend_ml_funcs, 1);
+ }
+ *data = (void *) funcs;
+
+ funcs->init = ml_pass_init;
+ funcs->deinit = ml_pass_deinit;
+ funcs->configure_instance = ml_pass_configure_instance;
+ funcs->invoke = ml_pass_invoke;
+ funcs->get_model_info = ml_pass_get_model_info;
+ funcs->event_handler = ml_pass_event_handler;
+
+ return 0;
+}
+
+static int ml_pass_hal_backend_exit(void *data)
+{
+ memset (data, 0x0, sizeof(hal_backend_ml_funcs));
+ return 0;
+}
+
+hal_backend hal_backend_ml_data = {
+ .name = "ml-pass",
+ .vendor = "YONGJOO",
+ .init = ml_pass_hal_backend_init,
+ .exit = ml_pass_hal_backend_exit,
+ .major_version = 1,
+ .minor_version = 1,
+};
--- /dev/null
+/* SPDX-License-Identifier: Apache-2.0 */
+
+#include <glib.h>
+
+#include "hal-backend-ml-util.h"
+
+void gst_tensor_info_init (GstTensorInfo * info)
+{
+ guint i;
+
+ g_return_if_fail (info != NULL);
+
+ info->name = NULL;
+ info->type = _NNS_END;
+
+ for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++) {
+ info->dimension[i] = 0;
+ }
+}
+
+void gst_tensor_info_free (GstTensorInfo * info)
+{
+ g_return_if_fail (info != NULL);
+
+ g_free (info->name);
+
+ /* Init default */
+ gst_tensor_info_init (info);
+}
+
+void gst_tensors_info_init (GstTensorsInfo * info)
+{
+ guint i;
+
+ g_return_if_fail (info != NULL);
+
+ info->num_tensors = 0;
+ info->extra = NULL;
+
+ /** @note default format is */
+ info->format = _NNS_TENSOR_FORMAT_STATIC;
+
+ for (i = 0; i < NNS_TENSOR_MEMORY_MAX; i++) {
+ gst_tensor_info_init (&info->info[i]);
+ }
+}
+
+void gst_tensors_info_free (GstTensorsInfo * info)
+{
+ guint i;
+
+ g_return_if_fail (info != NULL);
+
+ for (i = 0; i < NNS_TENSOR_MEMORY_MAX; i++) {
+ gst_tensor_info_free (&info->info[i]);
+ }
+
+ if (info->extra) {
+ for (i = 0; i < NNS_TENSOR_SIZE_EXTRA_LIMIT; ++i)
+ gst_tensor_info_free (&info->extra[i]);
+
+ g_free (info->extra);
+ info->extra = NULL;
+ }
+
+ /* Init default */
+ gst_tensors_info_init (info);
+}
+
+static const guint tensor_element_size[] = {
+ [_NNS_INT32] = 4,
+ [_NNS_UINT32] = 4,
+ [_NNS_INT16] = 2,
+ [_NNS_UINT16] = 2,
+ [_NNS_INT8] = 1,
+ [_NNS_UINT8] = 1,
+ [_NNS_FLOAT64] = 8,
+ [_NNS_FLOAT32] = 4,
+ [_NNS_INT64] = 8,
+ [_NNS_UINT64] = 8,
+ [_NNS_FLOAT16] = 2,
+ [_NNS_END] = 0,
+};
+
+gsize gst_tensor_get_element_size (tensor_type type)
+{
+ g_return_val_if_fail (type >= 0 && type <= _NNS_END, 0);
+
+ return tensor_element_size[type];
+}
+
+gulong gst_tensor_get_element_count (const tensor_dim dim)
+{
+ gulong count = 1;
+ guint i;
+
+ for (i = 0; i < NNS_TENSOR_RANK_LIMIT; i++) {
+ if (dim[i] == 0)
+ break;
+
+ count *= dim[i];
+ }
+
+ return (i > 0) ? count : 0;
+}
+
+gsize gst_tensor_info_get_size (const GstTensorInfo * info)
+{
+ gsize data_size;
+
+ g_return_val_if_fail (info != NULL, 0);
+
+ data_size = gst_tensor_get_element_count (info->dimension) *
+ gst_tensor_get_element_size (info->type);
+
+ return data_size;
+}
+
+GstTensorInfo * gst_tensors_info_get_nth_info (GstTensorsInfo * info, guint index)
+{
+ guint i;
+
+ g_return_val_if_fail (info != NULL, NULL);
+
+ if (index < NNS_TENSOR_MEMORY_MAX)
+ return &info->info[index];
+
+ if (!info->extra) {
+ info->extra = g_new0 (GstTensorInfo, NNS_TENSOR_SIZE_EXTRA_LIMIT);
+
+ for (i = 0; i < NNS_TENSOR_SIZE_EXTRA_LIMIT; ++i)
+ gst_tensor_info_init (&info->extra[i]);
+ }
+
+ if (index < NNS_TENSOR_SIZE_LIMIT)
+ return &info->extra[index - NNS_TENSOR_MEMORY_MAX];
+
+ g_critical ("Failed to get the information, invalid index %u (max %d).",
+ index, NNS_TENSOR_SIZE_LIMIT);
+ return NULL;
+}
+
+void gst_tensor_info_copy_n (GstTensorInfo * dest, const GstTensorInfo * src,
+ const guint n)
+{
+ guint i;
+
+ g_return_if_fail (dest != NULL);
+ g_return_if_fail (src != NULL);
+
+ dest->name = g_strdup (src->name);
+ dest->type = src->type;
+
+ for (i = 0; i < n; i++) {
+ dest->dimension[i] = src->dimension[i];
+ }
+}
+
+void gst_tensor_info_copy (GstTensorInfo * dest, const GstTensorInfo * src)
+{
+ gst_tensor_info_copy_n (dest, src, NNS_TENSOR_RANK_LIMIT);
+}
+
+void gst_tensors_info_copy (GstTensorsInfo * dest, const GstTensorsInfo * src)
+{
+ guint i, num;
+ GstTensorInfo *_dest, *_src;
+
+ g_return_if_fail (dest != NULL);
+ g_return_if_fail (src != NULL);
+
+ gst_tensors_info_init (dest);
+ num = dest->num_tensors = src->num_tensors;
+ dest->format = src->format;
+
+ if (src->format != _NNS_TENSOR_FORMAT_STATIC)
+ return;
+
+ for (i = 0; i < num; i++) {
+ _dest = gst_tensors_info_get_nth_info (dest, i);
+ _src = gst_tensors_info_get_nth_info ((GstTensorsInfo *) src, i);
+
+ gst_tensor_info_copy (_dest, _src);
+ }
+}
--- /dev/null
+/* SPDX-License-Identifier: Apache-2.0 */
+
+#include <glib.h>
+
+#include "tensor_typedef.h"
+#include "nnstreamer_plugin_api_filter.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void gst_tensor_info_init (GstTensorInfo * info);
+void gst_tensor_info_free (GstTensorInfo * info);
+void gst_tensors_info_init (GstTensorsInfo * info);
+void gst_tensors_info_free (GstTensorsInfo * info);
+gsize gst_tensor_get_element_size (tensor_type type);
+gulong gst_tensor_get_element_count (const tensor_dim dim);
+gsize gst_tensor_info_get_size (const GstTensorInfo * info);
+GstTensorInfo * gst_tensors_info_get_nth_info (GstTensorsInfo * info, guint index);
+void gst_tensor_info_copy_n (GstTensorInfo * dest, const GstTensorInfo * src, const guint n);
+void gst_tensor_info_copy (GstTensorInfo * dest, const GstTensorInfo * src);
+void gst_tensors_info_copy (GstTensorsInfo * dest, const GstTensorsInfo * src);
+
+#ifdef __cplusplus
+}
+#endif
--- /dev/null
+/* SPDX-License-Identifier: Apache-2.0 */
+
+#include <stdexcept>
+
+#include <glib.h>
+#include <dlfcn.h>
+
+#include <hal-common-interface.h>
+#include <hal-ml-interface.h>
+
+#include <ovx/vsi_nn_pub.h>
+
+#include "hal-backend-ml-util.h"
+
+
+static tensor_type convert_tensortype (vsi_nn_type_e tensor_type)
+{
+ switch (tensor_type) {
+ case VSI_NN_TYPE_INT8:
+ return _NNS_INT8;
+ case VSI_NN_TYPE_UINT8:
+ return _NNS_UINT8;
+ case VSI_NN_TYPE_INT16:
+ return _NNS_INT16;
+ case VSI_NN_TYPE_UINT16:
+ return _NNS_UINT16;
+ case VSI_NN_TYPE_FLOAT16:
+#ifdef FLOAT16_SUPPORT
+ return _NNS_FLOAT16;
+#else
+ return _NNS_UINT16;
+#endif
+ case VSI_NN_TYPE_FLOAT32:
+ return _NNS_FLOAT32;
+ default:
+ break;
+ }
+ return _NNS_END;
+}
+
+typedef struct _vivante_handle_s
+{
+ char *model_path;
+ char *so_path;
+ GstTensorsInfo inputInfo;
+ GstTensorsInfo outputInfo;
+
+ vsi_nn_graph_t *graph;
+ void *handle; /* dlopened model so */
+ vsi_status (*result_vsi_nn_CopyDataToTensor) (vsi_nn_graph_t *, vsi_nn_tensor_t *, uint8_t *);
+ void (*result_vnn_ReleaseNeuralNetwork) (vsi_nn_graph_t *);
+ vsi_nn_graph_t *(*result_vnn_CreateNeuralNetwork) (const char *);
+ vsi_status (*result_vsi_nn_RunGraph) (vsi_nn_graph_t *);
+ int postProcess;
+ vsi_status (*postProcessFunc) (vsi_nn_graph_t * graph);
+} vivante_handle_s;
+
+static int ml_vivante_init(void **backend_private)
+{
+ vivante_handle_s *vivante = g_new0 (vivante_handle_s, 1);
+ gst_tensors_info_init (&vivante->inputInfo);
+ gst_tensors_info_init (&vivante->outputInfo);
+ *backend_private = vivante;
+ return 0;
+}
+
+static int ml_vivante_deinit(void *backend_private)
+{
+ vivante_handle_s *vivante = (vivante_handle_s *) backend_private;
+ if (!vivante) {
+ g_error ("[vivante backend] ml_vivante_deinit called with invalid backend_private");
+ return HAL_ML_ERROR_INVALID_PARAMETER;
+ }
+
+ if (vivante->graph)
+ vivante->result_vnn_ReleaseNeuralNetwork (vivante->graph);
+
+ if (vivante->handle)
+ dlclose (vivante->handle);
+
+ gst_tensors_info_free (&vivante->inputInfo);
+ gst_tensors_info_free (&vivante->outputInfo);
+
+ g_free (vivante->model_path);
+ g_free (vivante->so_path);
+ g_free (vivante);
+
+ return HAL_ML_ERROR_NONE;
+}
+
+static int ml_vivante_configure_instance(void *backend_private, const GstTensorFilterProperties *prop)
+{
+ vivante_handle_s *vivante = (vivante_handle_s *) backend_private;
+ if (!vivante) {
+ g_error ("[vivante backend] ml_vivante_configure_instance called with invalid backend_private");
+ return HAL_ML_ERROR_INVALID_PARAMETER;
+ }
+
+ vivante->model_path = g_strdup(prop->model_files[0]);
+ vivante->so_path = g_strdup(prop->model_files[1]);
+
+ vivante->handle = dlopen (vivante->so_path, RTLD_NOW);
+ if (!vivante->handle) {
+ g_error ("Failed to load shared library: %s", vivante->so_path);
+ return HAL_ML_ERROR_RUNTIME_ERROR;
+ }
+
+ vivante->result_vsi_nn_CopyDataToTensor = (vsi_status (*)(vsi_nn_graph_t *, vsi_nn_tensor_t *, uint8_t *)) dlsym(vivante->handle, "vsi_nn_CopyDataToTensor");
+ vivante->result_vnn_ReleaseNeuralNetwork = (void (*)(vsi_nn_graph_t *)) dlsym(vivante->handle, "vnn_ReleaseNeuralNetwork");
+ vivante->result_vnn_CreateNeuralNetwork = (vsi_nn_graph_t *(*)(const char *)) dlsym(vivante->handle, "vnn_CreateNeuralNetwork");
+ vivante->result_vsi_nn_RunGraph = (vsi_status (*)(vsi_nn_graph_t *)) dlsym(vivante->handle, "vsi_nn_RunGraph");
+
+ if (vivante->postProcess) {
+ vivante->postProcessFunc = (vsi_status (*)(vsi_nn_graph_t *)) dlsym(vivante->handle, "vnn_PostProcessNeuralNetwork");
+ }
+
+ vivante->graph = vivante->result_vnn_CreateNeuralNetwork(vivante->model_path);
+
+ /* setting input and output tensors info */
+ gst_tensors_info_init (&vivante->inputInfo);
+ gst_tensors_info_init (&vivante->outputInfo);
+
+ vivante->inputInfo.num_tensors = vivante->graph->input.num;
+ for (unsigned int i = 0; i < vivante->graph->input.num; i++) {
+ vsi_nn_tensor_t *i_tensor = vsi_nn_GetTensor (vivante->graph, vivante->graph->input.tensors[i]);
+ GstTensorInfo *info = gst_tensors_info_get_nth_info (&vivante->inputInfo, i);
+
+ info->type = convert_tensortype (i_tensor->attr.dtype.vx_type);
+ info->name = g_strdup_printf ("%i", vivante->graph->input.tensors[i]);
+ for (unsigned int j = 0; j < i_tensor->attr.dim_num; ++j) {
+ info->dimension[j] = i_tensor->attr.size[j];
+ }
+ }
+
+ vivante->outputInfo.num_tensors = vivante->graph->output.num;
+ for (unsigned int i = 0; i < vivante->graph->output.num; i++) {
+ vsi_nn_tensor_t *o_tensor = vsi_nn_GetTensor (vivante->graph, vivante->graph->output.tensors[i]);
+ GstTensorInfo *info = gst_tensors_info_get_nth_info (&vivante->outputInfo, i);
+
+ info->type = convert_tensortype (o_tensor->attr.dtype.vx_type);
+ info->name = g_strdup_printf ("%i", vivante->graph->output.tensors[i]);
+ for (unsigned int j = 0; j < o_tensor->attr.dim_num; ++j) {
+ info->dimension[j] = o_tensor->attr.size[j];
+ }
+ }
+
+ return 0;
+}
+
+static int ml_vivante_invoke(void *backend_private, const GstTensorMemory *input, GstTensorMemory *output)
+{
+ vivante_handle_s *vivante = (vivante_handle_s *) backend_private;
+ if (!vivante) {
+ g_error ("[vivante backend] ml_vivante_invoke called with invalid backend_private");
+ return HAL_ML_ERROR_INVALID_PARAMETER;
+ }
+
+ for (unsigned int i = 0; i < vivante->graph->input.num; i++) {
+ vsi_nn_tensor_t *tensor = vsi_nn_GetTensor (vivante->graph, vivante->graph->input.tensors[i]);
+ vivante->result_vsi_nn_CopyDataToTensor (vivante->graph, tensor, (uint8_t *) input[i].data);
+ }
+
+ g_warning ("skip rungraph.. 됐다 치고"); g_usleep (1000 * 100);
+ // vivante->result_vsi_nn_RunGraph (vivante->graph);
+
+ if (vivante->postProcess)
+ vivante->postProcessFunc (vivante->graph);
+
+ for (unsigned int i = 0; i < vivante->graph->output.num; i++) {
+ vsi_nn_tensor_t *out_tensor = vsi_nn_GetTensor (vivante->graph, vivante->graph->output.tensors[i]);
+ vsi_nn_CopyTensorToBuffer (vivante->graph, out_tensor, output[i].data);
+ }
+
+ return 0;
+}
+
+static int ml_vivante_get_model_info(void *backend_private, model_info_ops ops, GstTensorsInfo *in_info, GstTensorsInfo *out_info)
+{
+ vivante_handle_s *vivante = (vivante_handle_s *) backend_private;
+ if (!vivante) {
+ g_error ("[vivante backend] ml_vivante_get_model_info called with invalid backend_private");
+ return HAL_ML_ERROR_INVALID_PARAMETER;
+ }
+
+ gst_tensors_info_copy (in_info, &vivante->inputInfo);
+ gst_tensors_info_copy (out_info, &vivante->outputInfo);
+
+ return 0;
+}
+
+static int ml_vivante_event_handler(void *backend_private, event_ops ops, GstTensorFilterFrameworkEventData *data)
+{
+ return -ENOENT;
+}
+
+static int ml_vivante_hal_backend_init(void **data)
+{
+ hal_backend_ml_funcs *funcs = NULL;
+
+ if (*data) {
+ funcs = (hal_backend_ml_funcs *) *data;
+ } else {
+ funcs = g_new0 (hal_backend_ml_funcs, 1);
+ }
+ *data = (void *) funcs;
+
+ funcs->init = ml_vivante_init;
+ funcs->deinit = ml_vivante_deinit;
+ funcs->configure_instance = ml_vivante_configure_instance;
+ funcs->invoke = ml_vivante_invoke;
+ funcs->get_model_info = ml_vivante_get_model_info;
+ funcs->event_handler = ml_vivante_event_handler;
+
+ return 0;
+}
+
+static int ml_vivante_hal_backend_exit(void *data)
+{
+ memset (data, 0x0, sizeof(hal_backend_ml_funcs));
+ return 0;
+}
+
+hal_backend hal_backend_ml_data = {
+ .name = "ml-vivante",
+ .vendor = "YONGJOO",
+ .init = ml_vivante_hal_backend_init,
+ .exit = ml_vivante_hal_backend_exit,
+ .major_version = 1,
+ .minor_version = 1,
+};