tf_loadModelFile (const GstTensorFilterProperties * prop, void **private_data)
{
tf_data *tf;
- gboolean tf_mem_optmz;
if (*private_data != NULL) {
tf = *private_data;
}
}
- tf_mem_optmz = nnsconf_get_custom_value_bool ("tensorflow",
- "enable_mem_optimization", FALSE);
-
tf = g_new0 (tf_data, 1); /** initialize tf Fill Zero! */
*private_data = tf;
tf->tf_private_data = tf_core_new (prop->model_file);
if (tf->tf_private_data) {
- if (tf_core_init (tf->tf_private_data, prop, tf_mem_optmz)) {
+ if (tf_core_init (tf->tf_private_data, prop)) {
g_printerr ("failed to initailize the object: tensorflow");
return -2;
}
* @return 0 if OK. non-zero if error.
*/
int
-tf_core_init (void * tf, const GstTensorFilterProperties * prop,
- const gboolean tf_mem_optmz)
+tf_core_init (void * tf, const GstTensorFilterProperties * prop)
{
TFCore *c = (TFCore *) tf;
return c->init (prop);
void *tf_core_new (const char * _model_path);
void tf_core_delete (void * tf);
- int tf_core_init (void * tf, const GstTensorFilterProperties * prop,
- const gboolean tf_mem_optmz);
+ int tf_core_init (void * tf, const GstTensorFilterProperties * prop);
const char *tf_core_getModelPath (void * tf);
int tf_core_getInputDim (void * tf, GstTensorsInfo * info);
int tf_core_getOutputDim (void * tf, GstTensorsInfo * info);
nnstreamer_install_conf.set('ENABLE_ENV_VAR', get_option('enable-env-var'))
nnstreamer_install_conf.set('ENABLE_SYMBOLIC_LINK', get_option('enable-symbolic-link'))
-nnstreamer_install_conf.set('TF_MEM_OPTMZ', get_option('enable-tensorflow-mem-optmz'))
nnstreamer_install_conf.set('TORCH_USE_GPU', get_option('enable-pytorch-use-gpu'))
# Install .ini
option('install-test', type: 'boolean', value: false)
option('enable-tensorflow-lite', type: 'boolean', value: true)
option('enable-tensorflow', type: 'boolean', value: true)
-option('enable-tensorflow-mem-optmz', type: 'boolean', value: true)
option('enable-caffe2', type: 'boolean', value: true)
option('enable-pytorch', type: 'boolean', value: true)
option('enable-pytorch-use-gpu', type: 'boolean', value: false) # default value, can be specified at run time
# Set 1 or True if you want to eliminate memcpy of tensorflow input frames for faster executions.
# It may break in some special cases (running tensorflow & nnstreamer in a chroot of a AWS VM); in such a case, keep it 0 or FALSE.
-[tensorflow]
-enable_mem_optimization=@TF_MEM_OPTMZ@
# Set 1 or True if you want to use NNAPI with tensorflow-lite, which enables to use NNAPI backend, which may use GPU or NPU/TPU.
[tensorflowlite]