1 /* SPDX-License-Identifier: LGPL-2.1-only */
3 * NNStreamer Common Header's Contents (pipeline extension)
4 * Copyright (C) 2020 MyungJoo Ham <myungjoo.ham@samsung.com>
7 * @file nnstreamer_plugin_api_impl.c
9 * @brief Common data for NNStreamer, the GStreamer plugin for neural networks
10 * @see https://github.com/nnstreamer/nnstreamer
11 * @author MyungJoo Ham <myungjoo.ham@samsung.com>
12 * @bug No known bugs except for NYI items
16 #include <nnstreamer_util.h>
18 #include <tensor_common.h>
20 static const gchar *gst_tensor_time_sync_mode_string[] = {
21 [SYNC_NOSYNC] = "nosync",
22 [SYNC_SLOWEST] = "slowest",
23 [SYNC_BASEPAD] = "basepad",
24 [SYNC_REFRESH] = "refresh",
29 * @brief The old rank of tensor.
31 #define NNS_TENSOR_RANK_LIMIT_PREV (4)
33 #define NNS_TENSOR_EXTRA_MAGIC 0xf00dc0de
36 * @brief Data structure to describe a "extra" tensor data.
37 * This represents the information of the NNS_TENSOR_SIZE_LIMIT-th memory block for tensor stream.
43 uint32_t num_extra_tensors;
45 GstTensorInfo infos[NNS_TENSOR_SIZE_EXTRA_LIMIT];
49 * @brief Check if given memory has extra tensors.
50 * @param[in] map GstMapInfo of GstMemory to be checked.
51 * @return TRUE if @map has extra tensors, otherwise FALSE.
54 gst_memory_map_is_extra_tensor (GstMapInfo * map)
56 GstTensorExtraInfo *extra_info;
59 g_return_val_if_fail (map != NULL, FALSE);
61 if (map->size < sizeof (GstTensorExtraInfo))
64 extra_info = (GstTensorExtraInfo *) map->data;
66 /* check magic in header (extra info) of the memory */
67 is_extra = (extra_info && extra_info->magic == NNS_TENSOR_EXTRA_MAGIC);
73 * @brief Initialize GstTensorExtraInfo structure with given @a memory.
74 * @param[in/out] extra GstTensorExtraInfo to be initialized.
75 * @param[in] reserved_size The memory size of extra memory block.
78 gst_tensor_extra_info_init (GstTensorExtraInfo * extra, gsize reserved_size)
82 g_return_if_fail (extra != NULL);
84 extra->magic = NNS_TENSOR_EXTRA_MAGIC;
86 extra->num_extra_tensors = 0;
88 /* set reserved size of NNS_TENSOR_SIZE_LIMIT-th memory */
89 extra->reserved = reserved_size;
90 for (i = 0; i < NNS_TENSOR_SIZE_EXTRA_LIMIT; ++i) {
91 gst_tensor_info_init (&extra->infos[i]);
96 * @brief Get the corresponding mode from the string value.
97 * @param[in] str The string value for the mode.
98 * @return Corresponding mode for the string. SYNC_END for errors.
100 tensor_time_sync_mode
101 gst_tensor_time_sync_get_mode (const gchar * str)
105 index = find_key_strv (gst_tensor_time_sync_mode_string, str);
107 return (index < 0) ? SYNC_END : index;
111 * @brief Get the time-sync mode string.
112 * @return Corresponding mode string.
115 gst_tensor_time_sync_get_mode_string (tensor_time_sync_mode mode)
117 return gst_tensor_time_sync_mode_string[mode];
121 * @brief Setup time sync option.
122 * @param[in/out] filter "this" pointer. Sync mode & option MUST BE set already.
123 * @return True if successfully set the option.
126 gst_tensor_time_sync_set_option_data (tensor_time_sync_data * sync)
128 g_return_val_if_fail (sync != NULL, FALSE);
130 if (sync->mode == SYNC_END || sync->option == NULL)
133 switch (sync->mode) {
140 g_auto (GStrv) strv = g_strsplit (sync->option, ":", 2);
145 sink_id = (guint) g_ascii_strtoull (strv[0], NULL, 10);
150 duration = (guint) g_ascii_strtoull (strv[1], NULL, 10);
154 sync->data_basepad.sink_id = sink_id;
155 sync->data_basepad.duration = duration;
160 GST_WARNING ("Unknown mode = %d", sync->mode);
168 * @brief Internal function to detect EOS using the number of empty pads.
169 * @param[in] collect Collect pad.
170 * @param[in] sync Synchronization option.
171 * @param[in] empty The number of empty pads (pad has no buffer).
172 * @return True if EOS.
175 _gst_tensor_time_sync_is_eos (GstCollectPads * collect,
176 tensor_time_sync_data * sync, guint empty)
179 gboolean is_eos = FALSE;
181 total = g_slist_length (collect->data);
183 switch (sync->mode) {
198 * @brief A function call to decide current timestamp among collected pads based on PTS.
199 * It will decide current timestamp according to sync option.
200 * GstMeta is also copied with same sync mode.
203 gst_tensor_time_sync_get_current_time (GstCollectPads * collect,
204 tensor_time_sync_data * sync, GstClockTime * current_time,
205 GstBuffer * tensors_buf)
208 guint count, empty_pad;
210 g_return_val_if_fail (collect != NULL, FALSE);
211 g_return_val_if_fail (sync != NULL, FALSE);
212 g_return_val_if_fail (current_time != NULL, FALSE);
214 walk = collect->data;
215 count = empty_pad = 0;
218 GstCollectData *data;
220 gboolean need_update = FALSE;
222 data = (GstCollectData *) walk->data;
223 buf = gst_collect_pads_peek (collect, data);
224 walk = g_slist_next (walk);
227 switch (sync->mode) {
232 if (*current_time < GST_BUFFER_PTS (buf))
236 if (count == sync->data_basepad.sink_id)
243 *current_time = GST_BUFFER_PTS (buf);
244 gst_buffer_copy_into (tensors_buf, buf, GST_BUFFER_COPY_METADATA,
247 gst_buffer_unref (buf);
255 return _gst_tensor_time_sync_is_eos (collect, sync, empty_pad);
259 * @brief A function to be called while processing a flushing event.
260 * It should clear old buffer and reset pad data.
263 gst_tensor_time_sync_flush (GstCollectPads * collect)
266 GstTensorCollectPadData *pad;
268 g_return_if_fail (collect != NULL);
270 walk = collect->data;
272 pad = (GstTensorCollectPadData *) walk->data;
275 gst_buffer_unref (pad->buffer);
279 walk = g_slist_next (walk);
284 * @brief Internal function to update buffer in pad data based on the sync mode.
287 _gst_tensor_time_sync_buffer_update (GstCollectPads * collect,
288 GstCollectData * data, GstClockTime current, GstClockTime base,
289 tensor_time_sync_data * sync)
291 GstTensorCollectPadData *pad;
294 pad = (GstTensorCollectPadData *) data;
296 buf = gst_collect_pads_peek (collect, data);
298 if (GST_BUFFER_PTS (buf) < current) {
299 gst_buffer_unref (buf);
300 if (pad->buffer != NULL)
301 gst_buffer_unref (pad->buffer);
302 pad->buffer = gst_collect_pads_pop (collect, data);
306 if ((sync->mode == SYNC_SLOWEST && pad->buffer != NULL &&
307 (ABS (GST_CLOCK_DIFF (current, GST_BUFFER_PTS (pad->buffer))) <
308 ABS (GST_CLOCK_DIFF (current, GST_BUFFER_PTS (buf))))) ||
309 (sync->mode == SYNC_BASEPAD && pad->buffer != NULL &&
310 (((GstClockTime) ABS (GST_CLOCK_DIFF (current,
311 GST_BUFFER_PTS (buf)))) > base))) {
312 /* keep last buffer */
314 /* update last buffer */
315 if (pad->buffer != NULL)
316 gst_buffer_unref (pad->buffer);
317 pad->buffer = gst_collect_pads_pop (collect, data);
320 gst_buffer_unref (buf);
327 * @brief A function call to make tensors from collected pads.
328 * It decide which buffer is going to be used according to sync option.
329 * @return True to push buffer.
332 gst_tensor_time_sync_buffer_from_collectpad (GstCollectPads * collect,
333 tensor_time_sync_data * sync, GstClockTime current_time,
334 GstBuffer * tensors_buf, GstTensorsConfig * configs, gboolean * is_eos)
337 GstCollectData *data;
338 GstTensorCollectPadData *pad;
339 GstBuffer *buf = NULL;
341 gint old_numerator = G_MAXINT;
342 gint old_denominator = G_MAXINT;
343 guint counting, empty_pad;
344 GstTensorsConfig in_configs;
345 GstClockTime base_time = 0;
346 GstTensorInfo *_info;
348 GstMemory *in_mem[NNS_TENSOR_SIZE_LIMIT];
349 tensor_format in_formats[NNS_TENSOR_SIZE_LIMIT];
351 g_return_val_if_fail (collect != NULL, FALSE);
352 g_return_val_if_fail (sync != NULL, FALSE);
353 g_return_val_if_fail (tensors_buf != NULL, FALSE);
354 g_return_val_if_fail (configs != NULL, FALSE);
355 g_return_val_if_fail (is_eos != NULL, FALSE);
357 walk = collect->data;
358 counting = empty_pad = 0;
360 if (sync->mode == SYNC_BASEPAD) {
361 walk = g_slist_nth (walk, sync->data_basepad.sink_id);
363 GST_ERROR_OBJECT (collect, "Cannot get GstCollectData from GSList");
367 data = (GstCollectData *) walk->data;
368 pad = (GstTensorCollectPadData *) data;
370 buf = gst_collect_pads_peek (collect, data);
372 if (pad->buffer != NULL)
374 MIN ((GstClockTimeDiff) sync->data_basepad.duration,
375 ABS (GST_CLOCK_DIFF (GST_BUFFER_PTS (buf),
376 GST_BUFFER_PTS (pad->buffer))) - 1);
377 gst_buffer_unref (buf);
381 walk = collect->data;
383 gst_tensors_config_init (&in_configs);
386 gboolean configured = FALSE;
387 gboolean is_empty = FALSE;
389 data = (GstCollectData *) walk->data;
390 pad = (GstTensorCollectPadData *) data;
392 if (gst_pad_has_current_caps (pad->pad)) {
393 GstCaps *caps = gst_pad_get_current_caps (pad->pad);
394 GstStructure *s = gst_caps_get_structure (caps, 0);
396 if (gst_tensors_config_validate (&in_configs))
397 gst_tensors_config_free (&in_configs);
399 gst_tensors_config_from_structure (&in_configs, s);
400 gst_caps_unref (caps);
402 configured = gst_tensors_config_validate (&in_configs);
406 * This would be an internal logic error.
407 * in_configs should be already confirmed valid at the negotiation phase
408 * and this function should be called in a running pipeline.
409 * If new sync mode is enabled (e.g., handle output when a pad gets new buffer),
410 * this may cause unexpected exception.
416 if (in_configs.rate_d < old_denominator)
417 old_denominator = in_configs.rate_d;
418 if (in_configs.rate_n < old_numerator)
419 old_numerator = in_configs.rate_n;
421 walk = g_slist_next (walk);
423 switch (sync->mode) {
427 if (!_gst_tensor_time_sync_buffer_update (collect, data,
428 current_time, base_time, sync))
430 buf = gst_buffer_ref (pad->buffer);
431 is_empty = (buf == NULL);
434 buf = gst_collect_pads_pop (collect, data);
435 is_empty = (buf == NULL);
438 buf = gst_collect_pads_pop (collect, data);
440 if (pad->buffer != NULL) {
441 gst_buffer_unref (pad->buffer);
443 pad->buffer = gst_buffer_ref (buf);
445 if (pad->buffer == NULL) {
447 ml_logd ("Not the all buffers are arrived yet.");
451 buf = gst_buffer_ref (pad->buffer);
458 if (GST_IS_BUFFER (buf)) {
459 guint32 n_tensor = gst_tensor_buffer_get_count (buf);
460 buf = gst_tensor_buffer_from_config (buf, &in_configs);
462 /** These are internal logic error. If given inputs are incorrect,
463 the negotiation should have been failed before this stage. */
464 if (gst_tensors_config_is_static (&in_configs))
465 g_assert (n_tensor == in_configs.info.num_tensors);
466 g_assert ((counting + n_tensor) <= NNS_TENSOR_SIZE_LIMIT);
468 if (gst_tensors_config_is_flexible (&in_configs))
469 configs->info.format = _NNS_TENSOR_FORMAT_FLEXIBLE;
471 for (i = 0; i < n_tensor; ++i) {
472 in_mem[counting] = gst_tensor_buffer_get_nth_memory (buf, i);
475 gst_tensor_info_copy (gst_tensors_info_get_nth_info (&configs->info,
476 counting), gst_tensors_info_get_nth_info (&in_configs.info, i));
477 in_formats[counting] = in_configs.info.format;
481 gst_buffer_unref (buf);
487 /* append memories to output buffer */
488 for (i = 0; i < counting; i++) {
489 _info = gst_tensors_info_get_nth_info (&configs->info, i);
492 if (gst_tensors_config_is_flexible (configs)) {
493 /* append header if input tensor is not flexible */
494 if (in_formats[i] != _NNS_TENSOR_FORMAT_FLEXIBLE) {
495 GstTensorMetaInfo meta;
497 gst_tensor_info_convert_to_meta (_info, &meta);
498 mem = gst_tensor_meta_info_append_header (&meta, in_mem[i]);
499 gst_memory_unref (in_mem[i]);
503 if (!gst_tensor_buffer_append_memory (tensors_buf, mem, _info)) {
504 for (j = i + 1; j < counting; j++)
505 gst_memory_unref (in_mem[j]);
507 nns_loge ("Failed to append memory to buffer.");
512 configs->info.num_tensors = counting;
513 configs->rate_d = old_denominator;
514 configs->rate_n = old_numerator;
516 GST_BUFFER_PTS (tensors_buf) = current_time;
518 gst_tensors_config_free (&in_configs);
521 *is_eos = _gst_tensor_time_sync_is_eos (collect, sync, empty_pad);
526 * @brief Configure gst-buffer with tensors information.
527 * NNStreamer handles single memory chunk as single tensor.
528 * If incoming buffer has invalid memories, separate it and generate new gst-buffer using tensors information.
529 * Note that this function always takes the ownership of input buffer.
530 * @param in input buffer
531 * @param config tensors config structure
532 * @return Newly allocated buffer. Null if failed. Caller should unref the buffer using gst_buffer_unref().
535 gst_tensor_buffer_from_config (GstBuffer * in, GstTensorsConfig * config)
537 GstBuffer *out = NULL;
538 GstMemory *all = NULL;
542 gsize mem_size[NNS_TENSOR_MEMORY_MAX];
543 gboolean configured = FALSE;
544 gboolean is_extra = FALSE;
546 if (!GST_IS_BUFFER (in)) {
547 nns_loge ("Failed to get tensor buffer, invalid input buffer.");
551 if (!gst_tensors_config_validate (config)) {
552 nns_loge ("Failed to get tensor buffer, invalid tensor configuration.");
556 num = gst_buffer_n_memory (in);
557 total = gst_buffer_get_size (in);
559 /* get memory size */
560 if (gst_tensors_config_is_static (config)) {
561 if (num == config->info.num_tensors) {
562 /* Do nothing, pass input buffer. */
563 out = gst_buffer_ref (in);
567 num = config->info.num_tensors;
568 if ((is_extra = (num > NNS_TENSOR_MEMORY_MAX)))
569 num = NNS_TENSOR_MEMORY_MAX;
570 for (i = 0; i < num; i++)
571 mem_size[i] = gst_tensors_info_get_size (&config->info, i);
573 mem_size[num - 1] += sizeof (GstTensorExtraInfo);
574 for (; i < config->info.num_tensors; i++)
575 mem_size[num - 1] += gst_tensors_info_get_size (&config->info, i);
579 /* Suppose it is already configured. */
580 out = gst_buffer_ref (in);
584 if (!gst_buffer_map (in, &map, GST_MAP_READ)) {
585 nns_loge ("Failed to get tensor buffer, cannot get the memory info.");
591 while (offset < total) {
592 GstTensorMetaInfo meta;
593 gpointer h = map.data + offset;
595 if (num >= NNS_TENSOR_MEMORY_MAX - 1) {
596 /* Suppose remained memory may include extra tensors. */
597 mem_size[num++] = total - offset;
601 gst_tensor_meta_info_parse_header (&meta, h);
602 mem_size[num] = gst_tensor_meta_info_get_header_size (&meta);
603 mem_size[num] += gst_tensor_meta_info_get_data_size (&meta);
605 offset += mem_size[num];
609 gst_buffer_unmap (in, &map);
612 /* Do nothing, pass input buffer. */
613 out = gst_buffer_ref (in);
618 /* configure output buffer */
619 out = gst_buffer_new ();
620 all = gst_buffer_get_all_memory (in);
623 for (i = 0; i < num; i++) {
624 /* invalid memory size */
625 if (offset + mem_size[i] > total) {
626 nns_loge ("Failed to get tensor buffer, data size is mismatched.");
630 gst_buffer_append_memory (out, gst_memory_share (all, offset, mem_size[i]));
631 offset += mem_size[i];
634 gst_buffer_copy_into (out, in, GST_BUFFER_COPY_METADATA, 0, -1);
639 gst_buffer_unref (in);
642 gst_memory_unref (all);
646 gst_buffer_unref (out);
655 * @brief Internal struct to handle aggregation data in hash table.
660 } gst_tensor_aggregation_data_s;
662 #define AGGREGATION_DEFAULT_KEY 0xC0FFEEU
665 * @brief Internal function to free aggregation data.
668 gst_tensor_aggregation_free_data (gpointer data)
670 gst_tensor_aggregation_data_s *aggr;
672 aggr = (gst_tensor_aggregation_data_s *) data;
674 gst_adapter_clear (aggr->adapter);
675 g_object_unref (aggr->adapter);
682 * @brief Internal function to add new aggregation data.
684 static gst_tensor_aggregation_data_s *
685 gst_tensor_aggregation_add_data (GHashTable * table, const guint32 key)
687 gst_tensor_aggregation_data_s *aggr;
690 g_return_val_if_fail (table != NULL, NULL);
692 hashkey = AGGREGATION_DEFAULT_KEY;
695 aggr = g_new0 (gst_tensor_aggregation_data_s, 1);
696 aggr->adapter = gst_adapter_new ();
698 g_hash_table_insert (table, GINT_TO_POINTER (hashkey), aggr);
703 * @brief Internal function to get aggregation data.
705 static gst_tensor_aggregation_data_s *
706 gst_tensor_aggregation_get_data (GHashTable * table, const guint32 key)
708 g_return_val_if_fail (table != NULL, NULL);
710 return (gst_tensor_aggregation_data_s *) g_hash_table_lookup (table,
711 GINT_TO_POINTER (key == 0 ? AGGREGATION_DEFAULT_KEY : key));
715 * @brief Internal function to remove all buffers from aggregation data.
718 gst_tensor_aggregation_clear_internal (gpointer key, gpointer value,
721 gst_tensor_aggregation_data_s *aggr;
726 aggr = (gst_tensor_aggregation_data_s *) value;
728 gst_adapter_clear (aggr->adapter);
733 * @brief Gets new hash table for tensor aggregation.
734 * @return Newly allocated hash table, caller should release this using g_hash_table_destroy().
737 gst_tensor_aggregation_init (void)
741 table = g_hash_table_new_full (g_direct_hash, g_direct_equal, NULL,
742 gst_tensor_aggregation_free_data);
745 * Add default adapter (for the case if buffer has no specific id).
746 * If gst-buffer has tensor-meta which includes client-id,
747 * e.g., aggregation frames from multiple clients on query-server pipeline,
748 * nnstreamer element should parse meta and request adapter with this id.
749 * However, on normal pipeline, gst-buffer does not contain tensor-meta,
750 * then the element may request adapter with null key string.
752 gst_tensor_aggregation_add_data (table, AGGREGATION_DEFAULT_KEY);
758 * @brief Clears buffers from adapter.
759 * @param table a hash table instance initialized with gst_tensor_aggregation_init()
760 * @param key the key to look up (set null to get default adapter)
763 gst_tensor_aggregation_clear (GHashTable * table, const guint32 key)
765 gst_tensor_aggregation_data_s *aggr;
767 g_return_if_fail (table != NULL);
769 aggr = gst_tensor_aggregation_get_data (table, key);
770 gst_tensor_aggregation_clear_internal (NULL, aggr, NULL);
774 * @brief Clears buffers from all adapters in hash table.
775 * @param table a hash table instance initialized with gst_tensor_aggregation_init()
778 gst_tensor_aggregation_clear_all (GHashTable * table)
780 g_hash_table_foreach (table, gst_tensor_aggregation_clear_internal, NULL);
784 * @brief Gets adapter from hash table.
785 * @param table a hash table instance initialized with gst_tensor_aggregation_init()
786 * @param key the key to look up (set null to get default adapter)
787 * @return gst-adapter instance. DO NOT release this instance.
790 gst_tensor_aggregation_get_adapter (GHashTable * table, const guint32 key)
792 gst_tensor_aggregation_data_s *aggr;
794 g_return_val_if_fail (table != NULL, NULL);
796 aggr = gst_tensor_aggregation_get_data (table, key);
799 aggr = gst_tensor_aggregation_add_data (table, key);
802 return aggr->adapter;
806 * @brief Internal function to check tensor dimensions to append old caps for backward compatibility (rank 4).
809 _append_prev_caps (const GstTensorsConfig * config)
811 GstTensorsInfo *info;
812 GstTensorInfo *_info;
813 guint i, rank, min_rank;
815 g_return_val_if_fail (config != NULL, FALSE);
817 info = (GstTensorsInfo *) (&config->info);
818 if (!gst_tensors_info_validate (info))
821 for (i = 0; i < info->num_tensors; i++) {
822 _info = gst_tensors_info_get_nth_info (info, i);
824 rank = gst_tensor_dimension_get_rank (_info->dimension);
825 min_rank = gst_tensor_dimension_get_min_rank (_info->dimension);
827 if (rank <= NNS_TENSOR_RANK_LIMIT_PREV ||
828 min_rank > NNS_TENSOR_RANK_LIMIT_PREV)
836 * @brief Internal function to get caps for single tensor from config.
839 _get_tensor_caps (const GstTensorsConfig * config)
842 GstStructure *structure = NULL;
843 GstTensorsInfo *info;
844 GstTensorInfo *_info;
846 g_return_val_if_fail (config != NULL, NULL);
848 info = (GstTensorsInfo *) (&config->info);
849 if (info->num_tensors > 1)
852 caps = gst_caps_from_string (GST_TENSOR_CAP_DEFAULT);
853 _info = gst_tensors_info_get_nth_info (info, 0);
855 /* structure for backward compatibility */
856 if (_append_prev_caps (config))
857 structure = gst_structure_new_empty (NNS_MIMETYPE_TENSOR);
859 if (gst_tensor_dimension_is_valid (_info->dimension)) {
861 g_autofree gchar *dim_str =
862 gst_tensor_get_dimension_string (_info->dimension);
864 gst_caps_set_simple (caps, "dimension", G_TYPE_STRING, dim_str, NULL);
868 g_autofree gchar *dim_str =
869 gst_tensor_get_rank_dimension_string (_info->dimension,
870 NNS_TENSOR_RANK_LIMIT_PREV);
872 gst_structure_set (structure, "dimension", G_TYPE_STRING, dim_str, NULL);
876 if (_info->type != _NNS_END) {
877 const gchar *type_str = gst_tensor_get_type_string (_info->type);
879 gst_caps_set_simple (caps, "type", G_TYPE_STRING, type_str, NULL);
882 gst_structure_set (structure, "type", G_TYPE_STRING, type_str, NULL);
885 if (config->rate_n >= 0 && config->rate_d > 0) {
886 gst_caps_set_simple (caps, "framerate", GST_TYPE_FRACTION,
887 config->rate_n, config->rate_d, NULL);
890 gst_structure_set (structure, "framerate", GST_TYPE_FRACTION,
891 config->rate_n, config->rate_d, NULL);
895 gst_caps_append_structure (caps, structure);
901 * @brief Internal function to get caps for multi tensors from config.
904 _get_tensors_caps (const GstTensorsConfig * config)
907 GstStructure *structure = NULL;
909 g_return_val_if_fail (config != NULL, NULL);
911 caps = gst_caps_from_string (GST_TENSORS_CAP_DEFAULT);
913 /* structure for backward compatibility */
914 if (_append_prev_caps (config))
915 structure = gst_structure_new_empty (NNS_MIMETYPE_TENSORS);
917 if (config->info.num_tensors > 0) {
918 g_autofree gchar *type_str =
919 gst_tensors_info_get_types_string (&config->info);
923 g_autofree gchar *dim_str =
924 gst_tensors_info_get_dimensions_string (&config->info);
926 gst_caps_set_simple (caps, "num_tensors", G_TYPE_INT,
927 config->info.num_tensors, NULL);
928 gst_caps_set_simple (caps, "dimensions", G_TYPE_STRING, dim_str, NULL);
929 gst_caps_set_simple (caps, "types", G_TYPE_STRING, type_str, NULL);
932 /* Set GstStructure */
934 g_autofree gchar *dim_str =
935 gst_tensors_info_get_rank_dimensions_string (&config->info,
936 NNS_TENSOR_RANK_LIMIT_PREV);
938 gst_structure_set (structure, "num_tensors", G_TYPE_INT,
939 config->info.num_tensors, NULL);
940 gst_structure_set (structure, "dimensions", G_TYPE_STRING, dim_str, NULL);
941 gst_structure_set (structure, "types", G_TYPE_STRING, type_str, NULL);
945 if (config->rate_n >= 0 && config->rate_d > 0) {
946 gst_caps_set_simple (caps, "framerate", GST_TYPE_FRACTION,
947 config->rate_n, config->rate_d, NULL);
950 gst_structure_set (structure, "framerate", GST_TYPE_FRACTION,
951 config->rate_n, config->rate_d, NULL);
955 gst_caps_append_structure (caps, structure);
961 * @brief Internal function to get caps for flexible tensor from config.
964 _get_flexible_caps (const GstTensorsConfig * config)
968 caps = gst_caps_from_string (GST_TENSORS_FLEX_CAP_DEFAULT);
970 if (config->rate_n >= 0 && config->rate_d > 0) {
971 gst_caps_set_simple (caps, "framerate", GST_TYPE_FRACTION,
972 config->rate_n, config->rate_d, NULL);
979 * @brief Check given mimetype is tensor stream.
980 * @param structure structure to be interpreted
981 * @return TRUE if mimetype is tensor stream
984 gst_structure_is_tensor_stream (const GstStructure * structure)
988 name = gst_structure_get_name (structure);
989 g_return_val_if_fail (name != NULL, FALSE);
991 return (g_str_equal (name, NNS_MIMETYPE_TENSOR) ||
992 g_str_equal (name, NNS_MIMETYPE_TENSORS));
996 * @brief Get media type from structure
997 * @param structure structure to be interpreted
998 * @return corresponding media type (returns _NNS_MEDIA_INVALID for unsupported type)
1001 gst_structure_get_media_type (const GstStructure * structure)
1005 name = gst_structure_get_name (structure);
1007 g_return_val_if_fail (name != NULL, _NNS_MEDIA_INVALID);
1009 if (g_str_has_prefix (name, "video/")) {
1013 if (g_str_has_prefix (name, "audio/")) {
1017 if (g_str_has_prefix (name, "text/")) {
1021 if (g_str_equal (name, "application/octet-stream")) {
1025 if (gst_structure_is_tensor_stream (structure)) {
1029 /* unknown or unsupported type */
1030 return _NNS_MEDIA_INVALID;
1034 * @brief Parse caps from peer pad and set tensors config.
1035 * @param pad GstPad to get the capabilities
1036 * @param config tensors config structure to be filled
1037 * @param is_fixed flag to be updated when peer caps is fixed (not mandatory, do nothing when the param is null)
1038 * @return TRUE if successfully configured from peer
1041 gst_tensors_config_from_peer (GstPad * pad, GstTensorsConfig * config,
1042 gboolean * is_fixed)
1045 GstStructure *structure;
1046 gboolean ret = FALSE;
1048 g_return_val_if_fail (GST_IS_PAD (pad), FALSE);
1049 g_return_val_if_fail (config != NULL, FALSE);
1051 gst_tensors_config_init (config);
1053 if ((peer_caps = gst_pad_peer_query_caps (pad, NULL))) {
1054 if (gst_caps_get_size (peer_caps) > 0) {
1055 structure = gst_caps_get_structure (peer_caps, 0);
1056 ret = gst_tensors_config_from_structure (config, structure);
1059 if (ret && is_fixed)
1060 *is_fixed = gst_caps_is_fixed (peer_caps);
1062 gst_caps_unref (peer_caps);
1069 * @brief Check whether two structures have the same dimension
1072 _is_structure_dimension_same (GstStructure * st1, GstStructure * st2,
1073 const gchar * fieldname)
1075 const char *dim_str1;
1076 const char *dim_str2;
1078 g_return_val_if_fail (gst_structure_has_field (st1, fieldname), FALSE);
1079 g_return_val_if_fail (gst_structure_has_field (st2, fieldname), FALSE);
1081 dim_str1 = gst_structure_get_string (st1, fieldname);
1082 dim_str2 = gst_structure_get_string (st2, fieldname);
1084 return gst_tensor_dimension_string_is_equal (dim_str1, dim_str2);
1088 * @brief Update caps dimensions for negotiation
1089 * @param caps caps to compare and update
1090 * @param filter caps to compare
1093 gst_tensor_caps_update_dimension (GstCaps * caps, GstCaps * filter)
1095 GstStructure *st_caps, *st_filter;
1098 g_return_if_fail (GST_IS_CAPS (caps));
1099 g_return_if_fail (GST_IS_CAPS (filter));
1101 for (i = 0; i < gst_caps_get_size (caps); i++) {
1102 st_caps = gst_caps_get_structure (caps, i);
1104 if (!gst_structure_is_tensor_stream (st_caps))
1107 for (j = 0; j < gst_caps_get_size (filter); j++) {
1108 st_filter = gst_caps_get_structure (filter, j);
1110 if (!gst_structure_is_tensor_stream (st_filter))
1114 if (gst_structure_has_field (st_caps, "dimension")
1115 && gst_structure_has_field (st_filter, "dimension")) {
1116 /* update dimensions for negotiation */
1117 if (_is_structure_dimension_same (st_caps, st_filter, "dimension")) {
1118 gst_structure_set (st_caps, "dimension", G_TYPE_STRING,
1119 gst_structure_get_string (st_filter, "dimension"), NULL);
1123 else if (gst_structure_has_field (st_caps, "dimensions")
1124 && gst_structure_has_field (st_filter, "dimensions")) {
1125 /* update dimensions for negotiation */
1126 if (_is_structure_dimension_same (st_caps, st_filter, "dimensions")) {
1127 gst_structure_set (st_caps, "dimensions", G_TYPE_STRING,
1128 gst_structure_get_string (st_filter, "dimensions"), NULL);
1136 * @brief Try intersecting @caps1 and @caps2 for tensor stream
1137 * @param caps1 a GstCaps to intersect
1138 * @param caps2 a GstCaps to intersect
1139 * @return TRUE if intersection would be not empty.
1142 gst_tensor_caps_can_intersect (GstCaps * caps1, GstCaps * caps2)
1144 GstStructure *structure1;
1145 GstStructure *structure2;
1146 GstStructure *structure_copy1;
1147 GstStructure *structure_copy2;
1152 gboolean intersectable;
1154 if (gst_caps_can_intersect (caps1, caps2))
1157 structure1 = gst_caps_get_structure (caps1, 0);
1158 structure2 = gst_caps_get_structure (caps2, 0);
1160 if (!gst_structure_is_tensor_stream (structure1)
1161 || !gst_structure_is_tensor_stream (structure2))
1164 name1 = gst_structure_get_name (structure1);
1165 name2 = gst_structure_get_name (structure2);
1167 if (!g_str_equal (name1, name2))
1171 if (g_str_equal (name1, NNS_MIMETYPE_TENSOR)) {
1172 if (gst_structure_has_field (structure1, "dimension")
1173 && gst_structure_has_field (structure2, "dimension")) {
1174 if (!_is_structure_dimension_same (structure1, structure2, "dimension"))
1179 else if (gst_structure_has_field (structure1, "dimensions")
1180 && gst_structure_has_field (structure2, "dimensions")) {
1181 if (!_is_structure_dimension_same (structure1, structure2, "dimensions"))
1185 structure_copy1 = gst_structure_copy (structure1);
1186 structure_copy2 = gst_structure_copy (structure2);
1188 gst_structure_remove_field (structure_copy1, "dimension");
1189 gst_structure_remove_field (structure_copy1, "dimensions");
1190 gst_structure_remove_field (structure_copy2, "dimension");
1191 gst_structure_remove_field (structure_copy2, "dimensions");
1194 gst_structure_can_intersect (structure_copy1, structure_copy2);
1196 gst_structure_free (structure_copy1);
1197 gst_structure_free (structure_copy2);
1199 return intersectable;
1203 * @brief Get pad caps from tensors config and caps of the peer connected to the pad.
1204 * @param pad GstPad to get possible caps
1205 * @param config tensors config structure
1206 * @return caps for given config. Caller is responsible for unreffing the returned caps.
1209 gst_tensor_pad_caps_from_config (GstPad * pad, const GstTensorsConfig * config)
1211 GstCaps *caps = NULL;
1213 gboolean is_flexible, peer_is_flexible, peer_has_tensor_caps;
1216 g_return_val_if_fail (GST_IS_PAD (pad), NULL);
1217 g_return_val_if_fail (config != NULL, NULL);
1219 templ = gst_pad_get_pad_template_caps (pad);
1221 /* check peer caps */
1222 peer_is_flexible = peer_has_tensor_caps = FALSE;
1224 peer_caps = gst_pad_peer_query_caps (pad, NULL);
1225 if (peer_caps && gst_caps_get_size (peer_caps) > 0) {
1228 GstTensorsConfig peer_config;
1230 tmp = gst_caps_from_string (GST_TENSOR_CAP_DEFAULT);
1231 peer_has_tensor_caps = gst_caps_can_intersect (tmp, peer_caps);
1232 gst_caps_unref (tmp);
1234 st = gst_caps_get_structure (peer_caps, 0);
1235 if (gst_tensors_config_from_structure (&peer_config, st))
1236 peer_is_flexible = gst_tensors_config_is_flexible (&peer_config);
1237 gst_tensors_config_free (&peer_config);
1240 /* other/tensors (flexible) */
1241 is_flexible = gst_tensors_config_is_flexible (config);
1243 if (is_flexible || peer_is_flexible) {
1244 caps = _get_flexible_caps (config);
1249 if (config->info.num_tensors == 1 && peer_has_tensor_caps) {
1250 caps = _get_tensor_caps (config);
1252 gst_tensor_caps_update_dimension (caps, peer_caps);
1254 if (gst_caps_can_intersect (caps, templ))
1257 gst_caps_unref (caps);
1260 /* other/tensors (static) */
1261 caps = _get_tensors_caps (config);
1263 gst_tensor_caps_update_dimension (caps, peer_caps);
1266 if (!gst_caps_can_intersect (caps, templ)) {
1267 gst_caps_unref (caps);
1272 gst_caps_unref (templ);
1274 gst_caps_unref (peer_caps);
1275 caps = gst_caps_truncate (caps);
1280 * @brief Get all possible caps from tensors config. Unlike gst_tensor_pad_caps_from_config(), this function does not check peer caps.
1281 * @param pad GstPad to get possible caps
1282 * @param config tensors config structure
1283 * @return caps for given config. Caller is responsible for unreffing the returned caps.
1286 gst_tensor_pad_possible_caps_from_config (GstPad * pad,
1287 const GstTensorsConfig * config)
1289 GstCaps *caps, *tmp;
1292 g_return_val_if_fail (GST_IS_PAD (pad), NULL);
1293 g_return_val_if_fail (config != NULL, NULL);
1295 caps = gst_caps_new_empty ();
1296 templ = gst_pad_get_pad_template_caps (pad);
1298 /* append caps for static tensor */
1299 if (gst_tensors_config_is_static (config)) {
1301 if ((tmp = _get_tensor_caps (config)) != NULL) {
1302 if (gst_caps_can_intersect (tmp, templ))
1303 gst_caps_append (caps, tmp);
1305 gst_caps_unref (tmp);
1309 if ((tmp = _get_tensors_caps (config)) != NULL) {
1310 if (gst_caps_can_intersect (tmp, templ))
1311 gst_caps_append (caps, tmp);
1313 gst_caps_unref (tmp);
1317 /* caps for flexible tensor */
1318 if ((tmp = _get_flexible_caps (config)) != NULL) {
1319 if (gst_caps_can_intersect (tmp, templ))
1320 gst_caps_append (caps, tmp);
1322 gst_caps_unref (tmp);
1325 /* if no possible caps for given config, return null. */
1326 if (gst_caps_is_empty (caps)) {
1327 gst_caps_unref (caps);
1331 gst_caps_unref (templ);
1336 * @brief Get tensor format of current pad caps.
1337 * @param pad GstPad to check current caps.
1338 * @return The tensor_format of current pad caps.
1340 * If pad does not have tensor caps return _NNS_TENSOR_FORMAT_END
1343 gst_tensor_pad_get_format (GstPad * pad)
1346 tensor_format ret = _NNS_TENSOR_FORMAT_END;
1348 g_return_val_if_fail (GST_IS_PAD (pad), _NNS_TENSOR_FORMAT_END);
1350 caps = gst_pad_get_current_caps (pad);
1352 GstStructure *structure;
1353 GstTensorsConfig config;
1355 structure = gst_caps_get_structure (caps, 0);
1356 if (gst_tensors_config_from_structure (&config, structure)) {
1357 ret = config.info.format;
1359 gst_caps_unref (caps);
1360 gst_tensors_config_free (&config);
1367 * @brief Get caps from tensors config (for other/tensors)
1368 * @param config tensors config info
1369 * @return caps for given config
1372 gst_tensors_caps_from_config (const GstTensorsConfig * config)
1376 g_return_val_if_fail (config != NULL, NULL);
1378 if (gst_tensors_config_is_flexible (config)) {
1379 caps = _get_flexible_caps (config);
1381 caps = _get_tensors_caps (config);
1384 caps = gst_caps_truncate (caps);
1390 * @brief Get tensor caps from tensors config
1391 * @param config tensors config info
1392 * @return caps for given config
1395 gst_tensor_caps_from_config (const GstTensorsConfig * config)
1398 g_return_val_if_fail (config != NULL, NULL);
1400 caps = _get_tensor_caps (config);
1401 caps = gst_caps_truncate (caps);
1407 * @brief Parse structure and set tensors config (for other/tensors)
1408 * @param config tensors config structure to be filled
1409 * @param structure structure to be interpreted
1410 * @return TRUE if no error
1413 gst_tensors_config_from_structure (GstTensorsConfig * config,
1414 const GstStructure * structure)
1417 tensor_format format = _NNS_TENSOR_FORMAT_STATIC;
1419 g_return_val_if_fail (config != NULL, FALSE);
1420 gst_tensors_config_init (config);
1422 g_return_val_if_fail (structure != NULL, FALSE);
1424 name = gst_structure_get_name (structure);
1426 if (g_str_equal (name, NNS_MIMETYPE_TENSOR)) {
1427 /* other/tensor is always static */
1428 config->info.num_tensors = 1;
1430 if (gst_structure_has_field (structure, "dimension")) {
1431 const gchar *dim_str = gst_structure_get_string (structure, "dimension");
1432 gst_tensor_parse_dimension (dim_str, config->info.info[0].dimension);
1435 if (gst_structure_has_field (structure, "type")) {
1436 const gchar *type_str = gst_structure_get_string (structure, "type");
1437 config->info.info[0].type = gst_tensor_get_type (type_str);
1439 } else if (g_str_equal (name, NNS_MIMETYPE_TENSORS)) {
1440 if (gst_structure_has_field (structure, "format")) {
1441 const gchar *format_str;
1443 format_str = gst_structure_get_string (structure, "format");
1444 format = gst_tensor_get_format (format_str);
1446 if (format == _NNS_TENSOR_FORMAT_END) {
1448 ("Invalid format %s, it should be one of %s. Suppose tensor format is static.",
1449 _STR_NULL (format_str), GST_TENSOR_FORMAT_ALL);
1451 config->info.format = format;
1455 if (config->info.format == _NNS_TENSOR_FORMAT_STATIC) {
1456 gst_structure_get_int (structure, "num_tensors",
1457 (gint *) (&config->info.num_tensors));
1459 /* parse dimensions */
1460 if (gst_structure_has_field (structure, "dimensions")) {
1461 const gchar *dims_str;
1464 dims_str = gst_structure_get_string (structure, "dimensions");
1466 gst_tensors_info_parse_dimensions_string (&config->info, dims_str);
1468 if (config->info.num_tensors != num_dims) {
1469 nns_logw ("Invalid param, dimensions (%d) tensors (%d)\n",
1470 num_dims, config->info.num_tensors);
1475 if (gst_structure_has_field (structure, "types")) {
1476 const gchar *types_str;
1479 types_str = gst_structure_get_string (structure, "types");
1481 gst_tensors_info_parse_types_string (&config->info, types_str);
1483 if (config->info.num_tensors != num_types) {
1484 nns_logw ("Invalid param, types (%d) tensors (%d)\n",
1485 num_types, config->info.num_tensors);
1490 nns_logw ("Unsupported type = %s\n", name ? name : "Unknown");
1494 if (gst_structure_has_field (structure, "framerate")) {
1495 gst_structure_get_fraction (structure, "framerate", &config->rate_n,
1503 * @brief Parse memory and fill the tensor meta.
1504 * @param[out] meta tensor meta structure to be filled
1505 * @param[in] mem pointer to GstMemory to be parsed
1506 * @return TRUE if successfully set the meta
1509 gst_tensor_meta_info_parse_memory (GstTensorMetaInfo * meta, GstMemory * mem)
1515 g_return_val_if_fail (mem != NULL, FALSE);
1516 g_return_val_if_fail (meta != NULL, FALSE);
1518 gst_tensor_meta_info_init (meta);
1520 /* Check header size of tensor-meta. */
1521 hsize = gst_tensor_meta_info_get_header_size (meta);
1522 msize = gst_memory_get_sizes (mem, NULL, NULL);
1526 if (!gst_memory_map (mem, &map, GST_MAP_READ)) {
1527 nns_loge ("Failed to get the meta, cannot map the memory.");
1531 ret = gst_tensor_meta_info_parse_header (meta, map.data);
1533 gst_memory_unmap (mem, &map);
1538 * @brief Append header to memory.
1539 * @param[in] meta tensor meta structure
1540 * @param[in] mem pointer to GstMemory
1541 * @return Newly allocated GstMemory (Caller should free returned memory using gst_memory_unref())
1544 gst_tensor_meta_info_append_header (GstTensorMetaInfo * meta, GstMemory * mem)
1546 GstMemory *new_mem = NULL;
1548 GstMapInfo old_map, new_map;
1550 g_return_val_if_fail (mem != NULL, NULL);
1551 g_return_val_if_fail (gst_tensor_meta_info_validate (meta), NULL);
1553 if (!gst_memory_map (mem, &old_map, GST_MAP_READ)) {
1554 nns_loge ("Failed to append header, cannot map the old memory.");
1558 /* memory size (header + old memory) */
1559 hsize = gst_tensor_meta_info_get_header_size (meta);
1560 msize = hsize + old_map.size;
1562 new_mem = gst_allocator_alloc (NULL, msize, NULL);
1563 if (!gst_memory_map (new_mem, &new_map, GST_MAP_WRITE)) {
1564 nns_loge ("Failed to append header, cannot map the new memory.");
1565 gst_memory_unmap (mem, &old_map);
1566 gst_memory_unref (new_mem);
1570 /* set header and copy old data */
1571 gst_tensor_meta_info_update_header (meta, new_map.data);
1572 memcpy (new_map.data + hsize, old_map.data, old_map.size);
1574 gst_memory_unmap (mem, &old_map);
1575 gst_memory_unmap (new_mem, &new_map);
1580 * @brief Get the nth GstMemory from given @a buffer.
1581 * @param[in] buffer GstBuffer to be parsed.
1582 * @param[in] index Index of GstMemory to be returned.
1583 * @return GstMemory if found, otherwise NULL (Caller should free returned memory using gst_memory_unref()).
1586 gst_tensor_buffer_get_nth_memory (GstBuffer * buffer, const guint index)
1588 guint i, num_tensors;
1590 GstMemory *extra_tensors_memory, *res_mem = NULL;
1591 GstMapInfo extra_tensors_map;
1592 GstTensorExtraInfo *extra_info;
1594 if (!GST_IS_BUFFER (buffer)) {
1595 nns_loge ("Failed to parse GstBuffer (invalid input buffer).");
1599 num_tensors = gst_tensor_buffer_get_count (buffer);
1600 if (index >= num_tensors) {
1601 nns_loge ("Invalid index %u, the number of tensors in the buffer is %u.",
1602 index, num_tensors);
1606 /* If num_tensors is less than or equal to NNS_TENSOR_MEMORY_MAX, it's trivial. */
1607 if (num_tensors <= NNS_TENSOR_MEMORY_MAX || index < NNS_TENSOR_MEMORY_MAX - 1) {
1608 return gst_buffer_get_memory (buffer, index);
1611 /* If num_tensors is greater than NNS_TENSOR_MEMORY_MAX, we need to parse extra info. */
1612 extra_tensors_memory =
1613 gst_buffer_peek_memory (buffer, NNS_TENSOR_MEMORY_MAX - 1);
1614 if (!extra_tensors_memory) {
1615 nns_loge ("Failed to get %d-th memory", NNS_TENSOR_MEMORY_MAX);
1619 if (!gst_memory_map (extra_tensors_memory, &extra_tensors_map, GST_MAP_READ)) {
1620 nns_loge ("Failed to map %d-th memory", NNS_TENSOR_MEMORY_MAX);
1624 /* check header (extra info) of the memory */
1625 if (!gst_memory_map_is_extra_tensor (&extra_tensors_map)) {
1626 nns_loge ("Invalid extra header");
1630 /* parse the memory */
1631 extra_info = (GstTensorExtraInfo *) extra_tensors_map.data;
1632 offset = sizeof (GstTensorExtraInfo);
1634 /* If index is NNS_TENSOR_MEMORY_MAX - 1 */
1635 if (index == NNS_TENSOR_MEMORY_MAX - 1) {
1637 gst_memory_share (extra_tensors_memory, offset, extra_info->reserved);
1641 offset += extra_info->reserved;
1643 for (i = 1; i <= index - NNS_TENSOR_MEMORY_MAX; ++i) {
1644 offset += gst_tensor_info_get_size (&extra_info->infos[i - 1]);
1647 /* wrap it as GstMemory */
1649 gst_memory_share (extra_tensors_memory, offset,
1650 gst_tensor_info_get_size (&extra_info->infos[index -
1651 NNS_TENSOR_MEMORY_MAX]));
1654 gst_memory_unmap (extra_tensors_memory, &extra_tensors_map);
1659 * @brief Append @a memory to given @a buffer.
1660 * @param[in/out] buffer GstBuffer to be appended.
1661 * @param[in] memory GstMemory to append. This function takes ownership of this, even if it returns failure.
1662 * @param[in] info GstTensorInfo of given @a memory.
1663 * @return TRUE if successfully appended, otherwise FALSE.
1666 gst_tensor_buffer_append_memory (GstBuffer * buffer, GstMemory * memory,
1667 const GstTensorInfo * info)
1669 guint num_mems, offset, new_mem_index;
1670 GstMemory *new_memory = NULL, *last_memory = NULL;
1671 gsize new_mem_size, last_mem_size;
1672 GstMapInfo new_memory_map, last_memory_map, incoming_memory_map;
1673 GstTensorExtraInfo *extra_info;
1674 GstTensorMetaInfo meta;
1675 gboolean is_extra, is_static;
1676 gboolean appended = FALSE;
1678 if (!GST_IS_BUFFER (buffer)) {
1679 nns_loge ("Failed to append memory, given buffer is invalid.");
1684 nns_loge ("Failed to append memory, given memory is NULL.");
1688 if (gst_tensor_meta_info_parse_memory (&meta, memory)) {
1689 is_static = (meta.format == _NNS_TENSOR_FORMAT_STATIC);
1691 /* Suppose given memory is static tensor. */
1694 /* Error case if given tensor-info is invalid. */
1695 if (!gst_tensor_info_validate (info)) {
1696 nns_loge ("Failed to get tensor info (invalid input info).");
1701 num_mems = gst_buffer_n_memory (buffer);
1703 /* trivial call to gst_buffer_append_memory */
1704 if (num_mems < NNS_TENSOR_MEMORY_MAX) {
1705 gst_buffer_append_memory (buffer, memory);
1709 /* given buffer has NNS_TENSOR_MEMORY_MAX memory blocks */
1710 last_memory = gst_buffer_peek_memory (buffer, num_mems - 1);
1712 nns_loge ("Failed to get last memory");
1716 if (!gst_memory_map (last_memory, &last_memory_map, GST_MAP_READ)) {
1717 nns_loge ("Failed to map last memory");
1722 new_mem_size = last_mem_size = gst_memory_get_sizes (last_memory, NULL, NULL);
1724 /* if the memory does not have proper header, append it */
1725 is_extra = gst_memory_map_is_extra_tensor (&last_memory_map);
1727 new_mem_size += sizeof (GstTensorExtraInfo);
1730 new_mem_size += gst_memory_get_sizes (memory, NULL, NULL);
1732 new_memory = gst_allocator_alloc (NULL, new_mem_size, NULL);
1734 nns_loge ("Failed to allocate memory for extra tensors.");
1738 if (!gst_memory_map (new_memory, &new_memory_map, GST_MAP_WRITE)) {
1739 nns_loge ("Failed to map extra memory");
1740 gst_memory_unref (new_memory);
1745 if (!gst_memory_map (memory, &incoming_memory_map, GST_MAP_READ)) {
1746 nns_loge ("Failed to map incoming memory");
1750 extra_info = (GstTensorExtraInfo *) new_memory_map.data;
1752 /* if the last_memory does not have proper header, append it */
1754 gst_tensor_extra_info_init (extra_info, last_mem_size);
1755 offset = sizeof (GstTensorExtraInfo);
1760 /* copy last_memory into new_memory */
1761 memcpy (new_memory_map.data + offset, last_memory_map.data,
1762 last_memory_map.size);
1764 /* copy incoming_memory into new_memory */
1765 new_mem_index = extra_info->num_extra_tensors;
1766 extra_info->num_extra_tensors += 1;
1768 /* Copy tensor info into extra. */
1770 gst_tensor_info_copy (&extra_info->infos[new_mem_index], info);
1773 * Free the name string, cause it does not freed by gstreamer.
1774 * @todo Make custom gst_allocator later?
1776 g_free (extra_info->infos[new_mem_index].name);
1777 extra_info->infos[new_mem_index].name = NULL;
1779 gst_tensor_meta_info_convert (&meta, &extra_info->infos[new_mem_index]);
1782 memcpy (new_memory_map.data + offset + last_memory_map.size,
1783 incoming_memory_map.data, incoming_memory_map.size);
1785 gst_memory_unmap (memory, &incoming_memory_map);
1786 gst_memory_unmap (last_memory, &last_memory_map);
1789 gst_buffer_replace_memory (buffer, num_mems - 1, new_memory);
1794 gst_memory_unmap (new_memory, &new_memory_map);
1796 gst_memory_unref (new_memory);
1800 gst_memory_unmap (last_memory, &last_memory_map);
1802 /* Release incoming memory even if failed to append it into buffer. */
1804 gst_memory_unref (memory);
1810 * @brief Get the number of tensors in the buffer.
1813 gst_tensor_buffer_get_count (GstBuffer * buffer)
1818 GstTensorExtraInfo *extra_info;
1820 g_return_val_if_fail (buffer != NULL, 0);
1822 num_mems = gst_buffer_n_memory (buffer);
1823 if (num_mems < NNS_TENSOR_MEMORY_MAX) {
1827 /* num_mems == NNS_TENSOR_MEMORY_MAX */
1828 mem = gst_buffer_peek_memory (buffer, num_mems - 1);
1830 nns_loge ("Failed to get the last memory.");
1834 if (!gst_memory_map (mem, &map, GST_MAP_READ)) {
1835 nns_loge ("Failed to map the last memory.");
1839 if (gst_memory_map_is_extra_tensor (&map)) {
1840 extra_info = (GstTensorExtraInfo *) map.data;
1841 num_mems = extra_info->num_extra_tensors + NNS_TENSOR_MEMORY_MAX;
1843 nns_logi ("The last memory does not have extra tensors header. "
1844 "Assuming the number of tensors is %d.", num_mems);
1847 gst_memory_unmap (mem, &map);
1853 * @brief Sets the value of a property based on the specified property value and GParamSpec.
1855 * @param prop_value A pointer to the GValue where the property value will be set.
1856 * @param param_spec A pointer to the GParamSpec that describes the property.
1857 * @param property_value A string representing the value to be set for the property.
1859 * @note This API is intended to be used by gst_tensor_parse_config_file ()
1862 set_property_value (GValue * prop_value, const GParamSpec * param_spec,
1863 const gchar * property_value)
1865 GType value_type = G_PARAM_SPEC_VALUE_TYPE (param_spec);
1866 g_value_init (prop_value, value_type);
1868 if (value_type == G_TYPE_BOOLEAN) {
1869 gboolean value = g_ascii_strcasecmp (property_value, "true") == 0;
1870 g_value_set_boolean (prop_value, value);
1871 } else if (value_type == G_TYPE_INT) {
1872 gint value = atoi (property_value);
1873 g_value_set_int (prop_value, value);
1874 } else if (value_type == G_TYPE_UINT) {
1875 guint value = atoi (property_value);
1876 g_value_set_uint (prop_value, value);
1877 } else if (value_type == G_TYPE_FLOAT) {
1878 gfloat value = atof (property_value);
1879 g_value_set_float (prop_value, value);
1880 } else if (value_type == G_TYPE_DOUBLE) {
1881 gdouble value = atof (property_value);
1882 g_value_set_double (prop_value, value);
1884 g_value_set_string (prop_value, property_value); /** default is string */
1889 * @brief Parses a configuration file and sets the corresponding properties on a GObject.
1891 * This function reads the contents of the configuration file located at the given path
1892 * and sets the properties of the specified GObject based on the configuration data.
1894 * @param config_path The path to the configuration file.
1895 * @param object The GObject on which to set the properties.
1897 * @note The responsibility of managing the memory of the GObject passed as a parameter
1898 * lies outside this function.
1902 gst_tensor_parse_config_file (const gchar * config_path, const GObject * object)
1904 g_autofree gchar *config_data = NULL;
1905 g_auto (GStrv) lines = NULL;
1907 GError *error = NULL;
1908 GObjectClass *g_object_class = G_OBJECT_GET_CLASS (object);
1910 if (!g_file_get_contents (config_path, &config_data, NULL, &error)) {
1911 GST_DEBUG ("Failed to read config file: %s\n", error->message);
1912 g_error_free (error);
1916 lines = g_strsplit (config_data, "\n", -1);
1918 /** Iterate over each line */
1919 for (line = lines; *line; ++line) {
1920 g_auto (GStrv) parts = g_strsplit (*line, "=", 2);
1922 if (g_strv_length (parts) == 2) {
1923 g_autofree gchar *property_name = g_strstrip (g_strdup (parts[0]));
1924 g_autofree gchar *property_value = g_strstrip (g_strdup (parts[1]));
1927 g_object_class_find_property (g_object_class, property_name);
1929 if (pdata != NULL) {
1930 GValue prop_value = G_VALUE_INIT;
1931 set_property_value (&prop_value, pdata, property_value);
1932 g_object_set_property (G_OBJECT (object), pdata->name, &prop_value);
1933 g_value_unset (&prop_value);