nns_construct_pipe_info (JNIEnv * env, jobject thiz, gpointer handle, nns_pipe_type_e type)
{
pipeline_info_s *pipe_info;
+ jclass cls_data, cls_info;
pipe_info = g_new0 (pipeline_info_s, 1);
g_return_val_if_fail (pipe_info != NULL, NULL);
pipe_info->version = (*env)->GetVersion (env);
pipe_info->instance = (*env)->NewGlobalRef (env, thiz);
- jclass cls_data = (*env)->FindClass (env, "org/nnsuite/nnstreamer/TensorsData");
+ cls_data = (*env)->FindClass (env, "org/nnsuite/nnstreamer/TensorsData");
pipe_info->cls_tensors_data = (*env)->NewGlobalRef (env, cls_data);
(*env)->DeleteLocalRef (env, cls_data);
- jclass cls_info = (*env)->FindClass (env, "org/nnsuite/nnstreamer/TensorsInfo");
+ cls_info = (*env)->FindClass (env, "org/nnsuite/nnstreamer/TensorsInfo");
pipe_info->cls_tensors_info = (*env)->NewGlobalRef (env, cls_info);
(*env)->DeleteLocalRef (env, cls_info);
ml_tensors_data_s * data, jobject * result)
{
guint i;
+ jmethodID mid_init, mid_add;
+ jobject obj_data = NULL;
g_return_val_if_fail (pipe_info, FALSE);
g_return_val_if_fail (env, FALSE);
g_return_val_if_fail (data && result, FALSE);
/* method to generate tensors data */
- jmethodID mid_init = (*env)->GetMethodID (env, pipe_info->cls_tensors_data, "<init>", "()V");
- jmethodID mid_add = (*env)->GetMethodID (env, pipe_info->cls_tensors_data, "addTensorData", "([B)V");
+ mid_init = (*env)->GetMethodID (env, pipe_info->cls_tensors_data, "<init>", "()V");
+ mid_add = (*env)->GetMethodID (env, pipe_info->cls_tensors_data, "addTensorData", "([B)V");
- jobject obj_data = (*env)->NewObject (env, pipe_info->cls_tensors_data, mid_init);
+ obj_data = (*env)->NewObject (env, pipe_info->cls_tensors_data, mid_init);
if (!obj_data) {
nns_loge ("Failed to allocate object for tensors data.");
goto done;
(*env)->DeleteLocalRef (env, tensor_data);
}
-
- print_log ("Parse tensors data [%d] data at %p size %zd",
- i, data->tensors[i].tensor, data->tensors[i].size);
}
(*env)->DeleteLocalRef (env, cls_arraylist);
ml_tensors_info_s * info, jobject * result)
{
guint i, j;
+ jmethodID mid_init, mid_add;
+ jobject obj_info = NULL;
g_return_val_if_fail (pipe_info, FALSE);
g_return_val_if_fail (env, FALSE);
g_return_val_if_fail (info && result, FALSE);
/* method to generate tensors info */
- jmethodID mid_init = (*env)->GetMethodID (env, pipe_info->cls_tensors_info, "<init>", "()V");
- jmethodID mid_add = (*env)->GetMethodID (env, pipe_info->cls_tensors_info, "addTensorInfo", "(Ljava/lang/String;I[I)V");
+ mid_init = (*env)->GetMethodID (env, pipe_info->cls_tensors_info, "<init>", "()V");
+ mid_add = (*env)->GetMethodID (env, pipe_info->cls_tensors_info, "addTensorInfo", "(Ljava/lang/String;I[I)V");
- jobject obj_info = (*env)->NewObject (env, pipe_info->cls_tensors_info, mid_init);
+ obj_info = (*env)->NewObject (env, pipe_info->cls_tensors_info, mid_init);
if (!obj_info) {
nns_loge ("Failed to allocate object for tensors info.");
goto done;
jstring name = NULL;
jint type;
jintArray dimension;
+ jint *dim;
if (info->info[i].name)
name = (*env)->NewStringUTF (env, info->info[i].name);
dimension = (*env)->NewIntArray (env, ML_TENSOR_RANK_LIMIT);
- jint *dim = (*env)->GetIntArrayElements (env, dimension, NULL);
+ dim = (*env)->GetIntArrayElements (env, dimension, NULL);
for (j = 0; j < ML_TENSOR_RANK_LIMIT; j++) {
dim[j] = (jint) info->info[i].dimension[j];
}
(*env)->DeleteLocalRef (env, cls_info);
(*env)->DeleteLocalRef (env, item);
-
- print_log ("Parse tensors info [%d] name %s type %d dim %d:%d:%d:%d",
- i, GST_STR_NULL (info->info[i].name), info->info[i].type,
- info->info[i].dimension[0], info->info[i].dimension[1],
- info->info[i].dimension[2], info->info[i].dimension[3]);
}
(*env)->DeleteLocalRef (env, cls_arraylist);
{
pipeline_info_s *pipe_info = NULL;
ml_tensors_data_s *in_data, *out_data;
- ml_tensors_info_s *in_info, *out_info;
+ ml_tensors_info_h in_info, out_info;
JNIEnv *env;
+ jclass cls_custom;
+ jmethodID mid_invoke;
+ jobject obj_in_data, obj_out_data;
+ jobject obj_in_info, obj_out_info;
guint i;
int ret = -1;
- in_data = out_data = NULL;
- in_info = out_info = NULL;
-
/* get pipe info and init */
pipe_info = g_hash_table_lookup (g_customfilters, prop->fwname);
g_return_val_if_fail (pipe_info, -1);
env = nns_get_jni_env (pipe_info);
g_return_val_if_fail (env, -1);
- in_data = g_new0 (ml_tensors_data_s, 1);
- if (in_data == NULL) {
+ in_data = out_data = NULL;
+ in_info = out_info = NULL;
+ obj_in_data = obj_out_data = NULL;
+ obj_in_info = obj_out_info = NULL;
+
+ if ((in_data = g_new0 (ml_tensors_data_s, 1)) == NULL) {
nns_loge ("Failed to allocate memory for input tensors data.");
goto done;
}
- out_data = g_new0 (ml_tensors_data_s, 1);
- if (out_data == NULL) {
+ if ((out_data = g_new0 (ml_tensors_data_s, 1)) == NULL) {
nns_loge ("Failed to allocate memory for output tensors data.");
goto done;
}
- in_info = g_new0 (ml_tensors_info_s, 1);
- if (in_info == NULL) {
- nns_loge ("Failed to allocate memory for input tensors info.");
+ if (ml_tensors_info_create (&in_info) != ML_ERROR_NONE) {
+ nns_loge ("Failed to create input tensors info.");
goto done;
}
- out_info = g_new0 (ml_tensors_info_s, 1);
- if (out_info == NULL) {
- nns_loge ("Failed to allocate memory for output tensors info.");
+ if (ml_tensors_info_create (&out_info) != ML_ERROR_NONE) {
+ nns_loge ("Failed to create output tensors info.");
goto done;
}
+ cls_custom = (*env)->GetObjectClass (env, pipe_info->instance);
+ mid_invoke = (*env)->GetMethodID (env, cls_custom, "invoke",
+ "(Lorg/nnsuite/nnstreamer/TensorsData;"
+ "Lorg/nnsuite/nnstreamer/TensorsInfo;"
+ "Lorg/nnsuite/nnstreamer/TensorsInfo;)"
+ "Lorg/nnsuite/nnstreamer/TensorsData;");
+
/* convert to c-api data type */
in_data->num_tensors = prop->input_meta.num_tensors;
for (i = 0; i < in_data->num_tensors; i++) {
ml_tensors_info_copy_from_gst (out_info, &prop->output_meta);
/* call invoke callback */
- jobject obj_in_data, obj_out_data;
- jobject obj_in_info, obj_out_info;
-
- obj_in_data = obj_out_data = NULL;
- obj_in_info = obj_out_info = NULL;
-
if (!nns_convert_tensors_info (pipe_info, env, in_info, &obj_in_info)) {
nns_loge ("Failed to convert input info to info-object.");
goto done;
goto done;
}
- jclass cls_custom = (*env)->GetObjectClass (env, pipe_info->instance);
- jmethodID mid_invoke = (*env)->GetMethodID (env, cls_custom, "invoke",
- "(Lorg/nnsuite/nnstreamer/TensorsData;"
- "Lorg/nnsuite/nnstreamer/TensorsInfo;"
- "Lorg/nnsuite/nnstreamer/TensorsInfo;)"
- "Lorg/nnsuite/nnstreamer/TensorsData;");
-
obj_out_data = (*env)->CallObjectMethod (env, pipe_info->instance, mid_invoke,
obj_in_data, obj_in_info, obj_out_info);
+
if (!nns_parse_tensors_data (pipe_info, env, obj_out_data, out_data)) {
nns_loge ("Failed to parse output data.");
goto done;
g_free (in_data);
g_free (out_data);
- ml_tensors_info_destroy ((ml_tensors_info_h) in_info);
- ml_tensors_info_destroy ((ml_tensors_info_h) out_info);
+ ml_tensors_info_destroy (in_info);
+ ml_tensors_info_destroy (out_info);
return ret;
}
const GstTensorsInfo * in_info, GstTensorsInfo * out_info)
{
pipeline_info_s *pipe_info = NULL;
- ml_tensors_info_s *in, *out;
+ ml_tensors_info_h in, out;
+ jobject obj_in_info, obj_out_info;
JNIEnv *env;
+ jclass cls_custom;
+ jmethodID mid_info;
int ret = -1;
- in = out = NULL;
-
/* get pipe info and init */
pipe_info = g_hash_table_lookup (g_customfilters, prop->fwname);
g_return_val_if_fail (pipe_info, -1);
env = nns_get_jni_env (pipe_info);
g_return_val_if_fail (env, -1);
- in = g_new0 (ml_tensors_info_s, 1);
- if (in == NULL) {
- nns_loge ("Failed to allocate memory for input tensors info.");
+ in = out = NULL;
+ obj_in_info = obj_out_info = NULL;
+
+ if (ml_tensors_info_create (&in) != ML_ERROR_NONE) {
+ nns_loge ("Failed to create input tensors info.");
goto done;
}
- out = g_new0 (ml_tensors_info_s, 1);
- if (out == NULL) {
- nns_loge ("Failed to allocate memory for output tensors info.");
+ if (ml_tensors_info_create (&out) != ML_ERROR_NONE) {
+ nns_loge ("Failed to create output tensors info.");
goto done;
}
+ cls_custom = (*env)->GetObjectClass (env, pipe_info->instance);
+ mid_info = (*env)->GetMethodID (env, cls_custom, "getOutputInfo",
+ "(Lorg/nnsuite/nnstreamer/TensorsInfo;)"
+ "Lorg/nnsuite/nnstreamer/TensorsInfo;");
+
/* convert to c-api data type */
ml_tensors_info_copy_from_gst (in, in_info);
/* call output info callback */
- jobject obj_in_info, obj_out_info;
-
- obj_in_info = obj_out_info = NULL;
if (!nns_convert_tensors_info (pipe_info, env, in, &obj_in_info)) {
nns_loge ("Failed to convert input tensors info to data object.");
goto done;
}
- jclass cls_custom = (*env)->GetObjectClass (env, pipe_info->instance);
- jmethodID mid_info = (*env)->GetMethodID (env, cls_custom, "getOutputInfo",
- "(Lorg/nnsuite/nnstreamer/TensorsInfo;)"
- "Lorg/nnsuite/nnstreamer/TensorsInfo;");
-
obj_out_info = (*env)->CallObjectMethod (env, pipe_info->instance, mid_info, obj_in_info);
- if (!obj_out_info || !nns_parse_tensors_info (pipe_info, env, obj_out_info, out)) {
+
+ if (!nns_parse_tensors_info (pipe_info, env, obj_out_info, out)) {
nns_loge ("Failed to parse output info.");
goto done;
}
(*env)->DeleteLocalRef (env, obj_out_info);
(*env)->DeleteLocalRef (env, cls_custom);
- ml_tensors_info_destroy ((ml_tensors_info_h) in);
- ml_tensors_info_destroy ((ml_tensors_info_h) out);
+ ml_tensors_info_destroy (in);
+ ml_tensors_info_destroy (out);
return ret;
}
fw->invoke_NN = nns_customfilter_invoke;
fw->setInputDimension = nns_customfilter_set_dimension;
+ /* register custom-filter */
if (!nnstreamer_filter_probe (fw)) {
nns_loge ("Failed to register custom-filter %s.", filter_name);
g_free (fw->name);
pipeline_info_s *pipe_info;
ml_tensors_data_s *out_data;
ml_tensors_info_s *out_info;
+ jobject obj_data, obj_info;
+ JNIEnv *env;
cb_data = (element_data_s *) user_data;
pipe_info = cb_data->pipe_info;
out_data = (ml_tensors_data_s *) data;
out_info = (ml_tensors_info_s *) info;
- print_log ("Received new data from %s (total %d tensors)",
- cb_data->name, out_data->num_tensors);
-
- JNIEnv *env = nns_get_jni_env (pipe_info);
- if (env == NULL) {
+ if ((env = nns_get_jni_env (pipe_info)) == NULL) {
nns_logw ("Cannot get jni env in the sink callback.");
return;
}
- jobject obj_data, obj_info;
-
obj_data = obj_info = NULL;
if (nns_convert_tensors_data (pipe_info, env, out_data, &obj_data) &&
nns_convert_tensors_info (pipe_info, env, out_info, &obj_info)) {
int status;
const char *pipeline = (*env)->GetStringUTFChars (env, description, NULL);
- print_log ("Pipeline: %s", pipeline);
pipe_info = nns_construct_pipe_info (env, thiz, NULL, NNS_PIPE_TYPE_PIPELINE);
if (add_state_cb)
{
pipeline_info_s *pipe_info = NULL;
ml_pipeline_src_h src;
- ml_tensors_data_s *input = NULL;
+ ml_tensors_data_s *in_data = NULL;
int status;
jboolean res = JNI_FALSE;
const char *element_name = (*env)->GetStringUTFChars (env, name, NULL);
goto done;
}
- input = g_new0 (ml_tensors_data_s, 1);
- if (input == NULL) {
+ if ((in_data = g_new0 (ml_tensors_data_s, 1)) == NULL) {
nns_loge ("Failed to allocate memory for input data.");
goto done;
}
- if (!nns_parse_tensors_data (pipe_info, env, in, input)) {
+ if (!nns_parse_tensors_data (pipe_info, env, in, in_data)) {
nns_loge ("Failed to parse input data.");
- ml_tensors_data_destroy ((ml_tensors_data_h) input);
+ ml_tensors_data_destroy ((ml_tensors_data_h) in_data);
goto done;
}
- status = ml_pipeline_src_input_data (src, (ml_tensors_data_h) input,
+ status = ml_pipeline_src_input_data (src, (ml_tensors_data_h) in_data,
ML_PIPELINE_BUF_POLICY_AUTO_FREE);
if (status != ML_ERROR_NONE) {
- nns_loge ("Failed to input tensors data.");
+ nns_loge ("Failed to input tensors data to source node %s.", element_name);
goto done;
}
result = (*env)->NewObjectArray (env, total, cls_string, (*env)->NewStringUTF (env, ""));
if (result == NULL) {
nns_loge ("Failed to allocate string array.");
+ (*env)->DeleteLocalRef (env, cls_string);
goto done;
}
pipeline_info_s *pipe_info = NULL;
ml_single_h single;
ml_tensors_info_h in_info, out_info;
- int status;
const char *model_info = (*env)->GetStringUTFChars (env, model, NULL);
- single = in_info = out_info = NULL;
+ single = NULL;
+ in_info = out_info = NULL;
if (in) {
- ml_tensors_info_create (&in_info);
- nns_parse_tensors_info (pipe_info, env, in, (ml_tensors_info_s *) in_info);
+ if (ml_tensors_info_create (&in_info) != ML_ERROR_NONE) {
+ nns_loge ("Failed to create input tensors info.");
+ goto done;
+ }
+
+ if (!nns_parse_tensors_info (pipe_info, env, in, in_info)) {
+ nns_loge ("Failed to parse input tensor.");
+ goto done;
+ }
}
if (out) {
- ml_tensors_info_create (&out_info);
- nns_parse_tensors_info (pipe_info, env, out, (ml_tensors_info_s *) out_info);
+ if (ml_tensors_info_create (&out_info) != ML_ERROR_NONE) {
+ nns_loge ("Failed to create output tensors info.");
+ goto done;
+ }
+
+ if (!nns_parse_tensors_info (pipe_info, env, out, out_info)) {
+ nns_loge ("Failed to parse output tensor.");
+ goto done;
+ }
}
/* supposed tensorflow-lite only for android */
- status = ml_single_open (&single, model_info, in_info, out_info,
- ML_NNFW_TYPE_ANY, ML_NNFW_HW_AUTO);
- if (status != ML_ERROR_NONE) {
+ if (ml_single_open (&single, model_info, in_info, out_info,
+ ML_NNFW_TYPE_ANY, ML_NNFW_HW_AUTO) != ML_ERROR_NONE) {
nns_loge ("Failed to create the pipeline.");
goto done;
}
pipeline_info_s *pipe_info;
ml_single_h single;
ml_tensors_info_h in_info, out_info;
- ml_tensors_data_s *input, *output;
+ ml_tensors_data_s *in_data, *out_data;
int status;
jobject result = NULL;
pipe_info = CAST_TO_TYPE (handle, pipeline_info_s*);
single = pipe_info->pipeline_handle;
in_info = out_info = NULL;
- output = NULL;
+ in_data = out_data = NULL;
- input = g_new0 (ml_tensors_data_s, 1);
- if (input == NULL) {
+ if ((in_data = g_new0 (ml_tensors_data_s, 1)) == NULL) {
nns_loge ("Failed to allocate memory for input data.");
goto done;
}
- if (!nns_parse_tensors_data (pipe_info, env, obj_data, input)) {
+ if (!nns_parse_tensors_data (pipe_info, env, obj_data, in_data)) {
nns_loge ("Failed to parse input data.");
goto done;
}
if (obj_info) {
- ml_tensors_info_create (&in_info);
+ if (ml_tensors_info_create (&in_info) != ML_ERROR_NONE) {
+ nns_loge ("Failed to create input tensors info.");
+ goto done;
+ }
- if (!nns_parse_tensors_info (pipe_info, env, obj_info,
- (ml_tensors_info_s *) in_info)) {
+ if (!nns_parse_tensors_info (pipe_info, env, obj_info, in_info)) {
nns_loge ("Failed to parse input tensors info.");
goto done;
}
- status = ml_single_invoke_dynamic (single, input, in_info,
- (ml_tensors_data_h *) &output, &out_info);
+ status = ml_single_invoke_dynamic (single, in_data, in_info,
+ (ml_tensors_data_h *) &out_data, &out_info);
} else {
- status = ml_single_invoke (single, input, (ml_tensors_data_h *) &output);
+ status = ml_single_invoke (single, in_data, (ml_tensors_data_h *) &out_data);
}
if (status != ML_ERROR_NONE) {
goto done;
}
- if (!nns_convert_tensors_data (pipe_info, env, output, &result)) {
+ if (!nns_convert_tensors_data (pipe_info, env, out_data, &result)) {
nns_loge ("Failed to convert the result to data.");
result = NULL;
}
done:
- ml_tensors_data_destroy ((ml_tensors_data_h) input);
- ml_tensors_data_destroy ((ml_tensors_data_h) output);
+ ml_tensors_data_destroy ((ml_tensors_data_h) in_data);
+ ml_tensors_data_destroy ((ml_tensors_data_h) out_data);
ml_tensors_info_destroy (in_info);
ml_tensors_info_destroy (out_info);
return result;
* @brief Native method for single-shot API.
*/
jobject
-Java_org_nnsuite_nnstreamer_SingleShot_nativeGetInputInfo (JNIEnv * env, jobject thiz,
- jlong handle)
+Java_org_nnsuite_nnstreamer_SingleShot_nativeGetInputInfo (JNIEnv * env,
+ jobject thiz, jlong handle)
{
pipeline_info_s *pipe_info;
ml_single_h single;
ml_tensors_info_h info;
- int status;
jobject result = NULL;
pipe_info = CAST_TO_TYPE (handle, pipeline_info_s*);
single = pipe_info->pipeline_handle;
- status = ml_single_get_input_info (single, &info);
- if (status != ML_ERROR_NONE) {
+ if (ml_single_get_input_info (single, &info) != ML_ERROR_NONE) {
nns_loge ("Failed to get input info.");
goto done;
}
pipeline_info_s *pipe_info;
ml_single_h single;
ml_tensors_info_h info;
- int status;
jobject result = NULL;
pipe_info = CAST_TO_TYPE (handle, pipeline_info_s*);
single = pipe_info->pipeline_handle;
- status = ml_single_get_output_info (single, &info);
- if (status != ML_ERROR_NONE) {
+ if (ml_single_get_output_info (single, &info) != ML_ERROR_NONE) {
nns_loge ("Failed to get output info.");
goto done;
}
{
pipeline_info_s *pipe_info;
ml_single_h single;
- int status;
pipe_info = CAST_TO_TYPE (handle, pipeline_info_s*);
single = pipe_info->pipeline_handle;
- status = ml_single_set_timeout (single, (unsigned int) timeout);
- if (status != ML_ERROR_NONE) {
+ if (ml_single_set_timeout (single, (unsigned int) timeout) != ML_ERROR_NONE) {
nns_loge ("Failed to set the timeout.");
return JNI_FALSE;
}
pipe_info = CAST_TO_TYPE (handle, pipeline_info_s*);
single = pipe_info->pipeline_handle;
- if (ml_tensors_info_create (&in_info) != ML_ERROR_NONE)
+ if (ml_tensors_info_create (&in_info) != ML_ERROR_NONE) {
+ nns_loge ("Failed to create input info handle.");
return JNI_FALSE;
+ }
- if (nns_parse_tensors_info (pipe_info, env, in,
- (ml_tensors_info_s *) in_info) == FALSE)
+ if (!nns_parse_tensors_info (pipe_info, env, in, in_info)) {
+ nns_loge ("Failed to parse input tensor.");
goto done;
+ }
if (ml_single_set_input_info (single, in_info) != ML_ERROR_NONE) {
nns_loge ("Failed to set input info.");
done:
ml_tensors_info_destroy (in_info);
-
return ret;
}
#include "nnstreamer-capi-private.h"
#include "nnstreamer_plugin_api_filter.h"
-#ifndef DBG
-#define DBG FALSE
-#endif
-
#define TAG "NNStreamer-native"
#define nns_logi(...) \
#define nns_logd(...) \
__android_log_print (ANDROID_LOG_DEBUG, TAG, __VA_ARGS__)
-#if (DBG)
-#define print_log nns_logd
-#else
-#define print_log(...)
-#endif
-
#if GLIB_SIZEOF_VOID_P == 8
#define CAST_TO_LONG(p) (jlong)(p)
#define CAST_TO_TYPE(l,type) (type)(l)
* @retval #ML_ERROR_TIMED_OUT Failed to get the result from sink element.
*
*/
-static inline int ml_single_invoke_dynamic (ml_single_h single, const ml_tensors_data_h input, const ml_tensors_info_h in_info, ml_tensors_data_h *output, ml_tensors_info_h *out_info);
+int ml_single_invoke_dynamic (ml_single_h single, const ml_tensors_data_h input, const ml_tensors_info_h in_info, ml_tensors_data_h *output, ml_tensors_info_h *out_info);
/*************
* UTILITIES *
* @details Note that a model/framework may not support setting such information.
* @since_tizen 6.0
* @param[in] single The model handle.
- * @param[in] info The handle of input tensors information.
- * @param[out] info The handle of output tensors information. The caller is responsible for freeing the information with ml_tensors_info_destroy().
+ * @param[in] in_info The handle of input tensors information.
+ * @param[out] out_info The handle of output tensors information. The caller is responsible for freeing the information with ml_tensors_info_destroy().
* @return @c 0 on success. Otherwise a negative error value.
* @retval #ML_ERROR_NONE Successful
* @retval #ML_ERROR_NOT_SUPPORTED This implies that the given framework does not support dynamic dimensions.
* Use ml_single_set_input_info/ml_single_get_output_info APIs instead for this framework.
* @retval #ML_ERROR_INVALID_PARAMETER Fail. The parameter is invalid.
*/
-static inline int ml_single_update_info (ml_single_h single, const ml_tensors_info_h in_info, ml_tensors_info_h *out_info);
+int ml_single_update_info (ml_single_h single, const ml_tensors_info_h in_info, ml_tensors_info_h *out_info);
/**
* @brief Sets the maximum amount of time to wait for an output, in milliseconds.
*/
int ml_single_set_timeout (ml_single_h single, unsigned int timeout);
-/*****************************
- * STATIC INLINE DEFINITIONS *
- ****************************/
-
-/**
- * @brief Invokes the model with the given input data with the given info.
- */
-static inline int ml_single_invoke_dynamic (ml_single_h single,
- const ml_tensors_data_h input, const ml_tensors_info_h in_info,
- ml_tensors_data_h *output, ml_tensors_info_h *out_info)
-{
- int status;
- ml_tensors_info_h cur_in_info;
-
- status = ml_single_get_input_info (single, &cur_in_info);
- if (status != ML_ERROR_NONE)
- return status;
-
- status = ml_single_update_info (single, in_info, out_info);
- if (status != ML_ERROR_NONE)
- return status;
-
- status = ml_single_invoke (single, input, output);
- if (status != ML_ERROR_NONE) {
- ml_single_set_input_info (single, cur_in_info);
- ml_tensors_info_destroy (*out_info);
- }
-
- return status;
-}
-
-/**
- * @brief Sets the information (tensor dimension, type, name and so on) of required input data for the given model.
- */
-static inline int ml_single_update_info (ml_single_h single,
- const ml_tensors_info_h in_info, ml_tensors_info_h *out_info)
-{
- int status;
-
- status = ml_single_set_input_info (single, in_info);
- if (status != ML_ERROR_NONE)
- return status;
-
- return ml_single_get_output_info (single, out_info);
-}
-
/**
* @}
*/
/**
* @brief Sets the information (tensor dimension, type, name and so on) of required input data for the given model.
*/
-int ml_single_set_input_info (ml_single_h single, const ml_tensors_info_h info)
+int
+ml_single_set_input_info (ml_single_h single, const ml_tensors_info_h info)
{
ml_single *single_h;
GTensorFilterSingleClass *klass;
ML_SINGLE_HANDLE_UNLOCK (single_h);
return status;
}
+
+/**
+ * @brief Invokes the model with the given input data with the given info.
+ */
+int
+ml_single_invoke_dynamic (ml_single_h single,
+ const ml_tensors_data_h input, const ml_tensors_info_h in_info,
+ ml_tensors_data_h * output, ml_tensors_info_h * out_info)
+{
+ int status;
+ ml_tensors_info_h cur_in_info = NULL;
+
+ if (!single || !input || !in_info || !output || !out_info)
+ return ML_ERROR_INVALID_PARAMETER;
+
+ /* init null */
+ *output = NULL;
+ *out_info = NULL;
+
+ status = ml_single_get_input_info (single, &cur_in_info);
+ if (status != ML_ERROR_NONE)
+ goto exit;
+
+ status = ml_single_update_info (single, in_info, out_info);
+ if (status != ML_ERROR_NONE)
+ goto exit;
+
+ status = ml_single_invoke (single, input, output);
+ if (status != ML_ERROR_NONE) {
+ ml_single_set_input_info (single, cur_in_info);
+ }
+
+exit:
+ if (cur_in_info)
+ ml_tensors_info_destroy (cur_in_info);
+
+ if (status != ML_ERROR_NONE) {
+ if (*out_info) {
+ ml_tensors_info_destroy (*out_info);
+ *out_info = NULL;
+ }
+ }
+
+ return status;
+}
+
+/**
+ * @brief Sets the information (tensor dimension, type, name and so on) of required input data for the given model.
+ */
+int
+ml_single_update_info (ml_single_h single,
+ const ml_tensors_info_h in_info, ml_tensors_info_h * out_info)
+{
+ int status;
+
+ if (!single || !in_info || !out_info)
+ return ML_ERROR_INVALID_PARAMETER;
+
+ /* init null */
+ *out_info = NULL;
+
+ status = ml_single_set_input_info (single, in_info);
+ if (status != ML_ERROR_NONE)
+ return status;
+
+ return ml_single_get_output_info (single, out_info);
+}
/**
* @brief Validates the given tensor info is valid.
*/
-static int
+static gboolean
ml_tensor_info_validate (const ml_tensor_info_s * info)
{
guint i;
if (!info)
- return ML_ERROR_INVALID_PARAMETER;
+ return FALSE;
if (info->type < 0 || info->type >= ML_TENSOR_TYPE_UNKNOWN)
- return ML_ERROR_INVALID_PARAMETER;
+ return FALSE;
for (i = 0; i < ML_TENSOR_RANK_LIMIT; i++) {
if (info->dimension[i] == 0)
- return ML_ERROR_INVALID_PARAMETER;
+ return FALSE;
}
- return ML_ERROR_NONE;
+ return TRUE;
}
/**
*valid = false;
for (i = 0; i < tensors_info->num_tensors; i++) {
- /* Failed if returned value is not 0 (ML_ERROR_NONE) */
- if (ml_tensor_info_validate (&tensors_info->info[i]) != ML_ERROR_NONE)
+ if (!ml_tensor_info_validate (&tensors_info->info[i]))
goto done;
}
ml_loge ("NNFW is not supported.");
status = ML_ERROR_NOT_SUPPORTED;
break;
+ case ML_NNFW_TYPE_MVNC:
+ /** @todo Need to check method for NCSDK2 */
+ ml_loge ("Intel Movidius NCSDK2 is not supported.");
+ status = ML_ERROR_NOT_SUPPORTED;
+ break;
default:
status = ML_ERROR_INVALID_PARAMETER;
break;
goto done;
}
break;
+ case ML_NNFW_TYPE_MVNC:
+ /** @todo Condition to support Movidius NCSDK2 */
+ if (nnstreamer_filter_find ("movidius-ncsdk2") == NULL) {
+ ml_logw ("Intel Movidius NCSDK2 is not supported.");
+ goto done;
+ }
+ break;
default:
break;
}
#include <tensor_filter_custom_easy.h>
-/** * @brief In-Code Test Function for custom-easy filter
+/**
+ * @brief In-Code Test Function for custom-easy filter
*/
-int cef_func_safe_memcpy (void *data, const GstTensorFilterProperties *prop,
+static int cef_func_safe_memcpy (void *data, const GstTensorFilterProperties *prop,
const GstTensorMemory *in, GstTensorMemory *out)
{
unsigned int t;