Prevent error case if filter framework does not need a model file.
Add a flag in nnfw struct and change condition to check model in transform callback.
Signed-off-by: Jaeyun Jung <jy1210.jung@samsung.com>
char *name; /**< Name of the neural network framework, searchable by FRAMEWORK property */
int allow_in_place; /**< TRUE(nonzero) if InPlace transfer of input-to-output is allowed. Not supported in main, yet */
int allocate_in_invoke; /**< TRUE(nonzero) if invoke_NN is going to allocate outputptr by itself and return the address via outputptr. Do not change this value after cap negotiation is complete (or the stream has been started). */
+ int run_without_model; /**< TRUE(nonzero) when the neural network framework does not need a model file. Tensor-filter will run invoke_NN without model. */
int (*invoke_NN) (const GstTensorFilterProperties * prop, void **private_data,
const GstTensorMemory * input, GstTensorMemory * output);
goto unknown_format;
if (G_UNLIKELY (!self->fw))
goto unknown_framework;
- if (G_UNLIKELY (!prop->model_file))
+ if (G_UNLIKELY (!self->fw->run_without_model) &&
+ G_UNLIKELY (!prop->model_file))
goto unknown_model;
if (G_UNLIKELY (!self->fw->invoke_NN))
goto unknown_invoke;
/* 0. Check all properties. */
silent_debug ("Invoking %s with %s model\n", self->fw->name,
- prop->model_file);
+ GST_STR_NULL (prop->model_file));
/* 1. Set input tensors from inbuf. */
g_assert (gst_buffer_n_memory (inbuf) == prop->input_meta.num_tensors);
.name = filter_subplugin_custom,
.allow_in_place = FALSE, /* custom cannot support in-place (output == input). */
.allocate_in_invoke = FALSE, /* GstTensorFilter allocates output buffers */
+ .run_without_model = FALSE, /* custom needs a so file */
.invoke_NN = custom_invoke,
/* We need to disable getI/O-dim or setI-dim with the first call */