Some systems may use NDEBUG and skip all assert calls.
Do not call essential ops in there.
Fixes #2282
Reported-by: @ndufresne
Signed-off-by: MyungJoo Ham <myungjoo.ham@samsung.com>
GstMemory *out_mem;
GArray *results = NULL;
const int num_tensors = config->info.num_tensors;
+ gboolean status;
g_assert (outbuf);
/* Ensure we have outbuf properly allocated */
}
out_mem = gst_buffer_get_all_memory (outbuf);
}
- g_assert (gst_memory_map (out_mem, &out_info, GST_MAP_WRITE));
+ status = gst_memory_map (out_mem, &out_info, GST_MAP_WRITE);
+ g_assert (status);
/** reset the buffer with alpha 0 / black */
memset (out_info.data, 0, size);
/* Direct video uses the first tensor only even if it's multi-tensor */
const uint32_t *dim = &(config->info.info[0].dimension[0]);
size_t size = _get_video_xraw_bufsize (dim);
+ gboolean status;
g_assert (outbuf);
if (gst_buffer_get_size (outbuf) > 0 && gst_buffer_get_size (outbuf) != size) {
} else {
out_mem = gst_allocator_alloc (NULL, size, NULL);
}
- g_assert (gst_memory_map (out_mem, &out_info, GST_MAP_WRITE));
+ status = gst_memory_map (out_mem, &out_info, GST_MAP_WRITE);
+ g_assert (status);
if (0 == ((dim[0] * dim[1]) % 4)) {
/* No Padding Required */
ImageLabelData *data = *pdata;
GstMapInfo out_info;
GstMemory *out_mem;
+ gboolean status;
gsize bpe = gst_tensor_get_element_size (config->info.info[0].type);
tensor_element max_val;
}
out_mem = gst_buffer_get_all_memory (outbuf);
}
- g_assert (gst_memory_map (out_mem, &out_info, GST_MAP_WRITE));
+ status = gst_memory_map (out_mem, &out_info, GST_MAP_WRITE);
+ g_assert (status);
memcpy (out_info.data, str, size);
const size_t size = idata->width * idata->height * RGBA_CHANNEL;
GstMapInfo out_info;
GstMemory *out_mem;
+ gboolean status;
/* init image segments if seg map is null */
if (idata->segment_map == NULL) {
}
out_mem = gst_buffer_get_all_memory (outbuf);
}
- g_assert (gst_memory_map (out_mem, &out_info, GST_MAP_WRITE));
+ status = gst_memory_map (out_mem, &out_info, GST_MAP_WRITE);
+ g_assert (status);
memset (out_info.data, 0, size);
const GstTensorMemory *detections = NULL;
float *arr;
int index, i, j;
+ gboolean status;
g_assert (outbuf);
/* Ensure we have outbuf properly allocated */
}
out_mem = gst_buffer_get_all_memory (outbuf);
}
- g_assert (gst_memory_map (out_mem, &out_info, GST_MAP_WRITE));
+ status = gst_memory_map (out_mem, &out_info, GST_MAP_WRITE);
+ g_assert (status);
/** reset the buffer with alpha 0 / black */
memset (out_info.data, 0, size);
gsize block_size;
gsize src_idx, dest_idx;
gsize frame_size;
+ gboolean status;
frame_size = gst_tensor_info_get_size (info);
g_assert (frame_size > 0);
srcbuf = gst_buffer_copy (outbuf);
outbuf = gst_buffer_make_writable (outbuf);
- g_assert (gst_buffer_map (srcbuf, &src_info, GST_MAP_READ));
- g_assert (gst_buffer_map (outbuf, &dest_info, GST_MAP_WRITE));
+ status = gst_buffer_map (srcbuf, &src_info, GST_MAP_READ);
+ g_assert (status);
+ status = gst_buffer_map (outbuf, &dest_info, GST_MAP_WRITE);
+ g_assert (status);
/**
* Concatenate output buffer with given axis (frames-dim)
int d0, d1;
unsigned int src_idx = 0, dest_idx = 0;
size_t size, offset;
+ gboolean status;
inbuf = gst_buffer_new_and_alloc (frame_size);
gst_buffer_memset (inbuf, 0, 0, frame_size);
- g_assert (gst_buffer_map (buf, &src_info, GST_MAP_READ));
- g_assert (gst_buffer_map (inbuf, &dest_info, GST_MAP_WRITE));
+ status = gst_buffer_map (buf, &src_info, GST_MAP_READ);
+ g_assert (status);
+ status = gst_buffer_map (inbuf, &dest_info, GST_MAP_WRITE);
+ g_assert (status);
/**
* Refer: https://gstreamer.freedesktop.org/documentation/design/mediatype-video-raw.html
frames_in = 1;
if (buf_size != frame_size) {
+ gboolean status;
GstMapInfo src_info, dest_info;
gsize block_size = MIN (buf_size, frame_size);
inbuf = gst_buffer_new_and_alloc (frame_size);
gst_buffer_memset (inbuf, 0, 0, frame_size);
- g_assert (gst_buffer_map (buf, &src_info, GST_MAP_READ));
- g_assert (gst_buffer_map (inbuf, &dest_info, GST_MAP_WRITE));
+ status = gst_buffer_map (buf, &src_info, GST_MAP_READ);
+ g_assert (status);
+ status = gst_buffer_map (inbuf, &dest_info, GST_MAP_WRITE);
+ g_assert (status);
memcpy (dest_info.data, src_info.data, block_size);
const GstTensorDecoderDef *decoder;
const gchar *mode_string;
guint i;
+ int status;
mode_string = g_value_get_string (value);
decoder = nnstreamer_decoder_find (mode_string);
self->decoder = decoder;
}
- g_assert (self->decoder->init (&self->plugin_data));
+ status = self->decoder->init (&self->plugin_data);
+ /** @todo Do proper error handling */
+ g_assert (status);
for (i = 0; i < TensorDecMaxOpNum; i++)
if (!gst_tensordec_process_plugin_options (self, i))
{
GstTensorDec *self;
GstFlowReturn res;
+ gboolean status;
self = GST_TENSOR_DECODER_CAST (trans);
for (i = 0; i < num_tensors; i++) {
in_mem[i] = gst_buffer_peek_memory (inbuf, i);
- g_assert (gst_memory_map (in_mem[i], &in_info[i], GST_MAP_READ));
+ status = gst_memory_map (in_mem[i], &in_info[i], GST_MAP_READ);
+ g_assert (status);
input[i].data = in_info[i].data;
input[i].size = in_info[i].size;
g_assert (gst_buffer_n_memory (inbuf) == prop->input_meta.num_tensors);
for (i = 0; i < prop->input_meta.num_tensors; i++) {
+ gboolean status;
in_mem[i] = gst_buffer_peek_memory (inbuf, i);
- g_assert (gst_memory_map (in_mem[i], &in_info[i], GST_MAP_READ));
+ status = gst_memory_map (in_mem[i], &in_info[i], GST_MAP_READ);
+ g_assert (status);
+ /** @todo Do proper error handling (clean up and error return) */
in_tensors[i].data = in_info[i].data;
in_tensors[i].size = in_info[i].size;
g_assert (gst_buffer_get_size (outbuf) == 0);
for (i = 0; i < prop->output_meta.num_tensors; i++) {
+ gboolean status;
out_tensors[i].data = NULL;
out_tensors[i].size = gst_tensor_filter_get_output_size (self, i);
out_tensors[i].type = prop->output_meta.info[i].type;
/* allocate memory if allocate_in_invoke is FALSE */
if (allocate_in_invoke == FALSE) {
out_mem[i] = gst_allocator_alloc (NULL, out_tensors[i].size, NULL);
- g_assert (gst_memory_map (out_mem[i], &out_info[i], GST_MAP_WRITE));
+ status = gst_memory_map (out_mem[i], &out_info[i], GST_MAP_WRITE);
+ g_assert (status);
out_tensors[i].data = out_info[i].data;
}
gsize element_size;
tensor_dim dim;
tensor_type type;
+ gboolean status;
memcpy (&dim, &tensor_merge->tensors_config.info.info[0].dimension,
sizeof (tensor_dim));
for (i = 0; i < num_mem; i++) {
mem[i] = gst_buffer_peek_memory (tensors_buf, i);
- g_assert (gst_memory_map (mem[i], &mInfo[i], GST_MAP_READ));
+ status = gst_memory_map (mem[i], &mInfo[i], GST_MAP_READ);
+ g_assert (status); /** @todo Do proper error handling */
outSize += mInfo[i].size;
}
outMem = gst_allocator_alloc (NULL, outSize, NULL);
- g_assert (gst_memory_map (outMem, &outInfo, GST_MAP_WRITE));
+ status = gst_memory_map (outMem, &outInfo, GST_MAP_WRITE);
+ g_assert (status); /** @todo Do proper error handling */
outptr = outInfo.data;
switch (tensor_merge->mode) {
if (gst_tensor_merge_get_merged_config (tensor_merge,
&tensor_merge->tensors_config, &config)) {
g_assert (gst_tensor_config_validate (&config));
+ /** @todo Do proper error handling */
newcaps = gst_tensor_caps_from_config (&config);
} else {
goto nego_error;
GstClockTime dts_time = GST_CLOCK_TIME_NONE;
GstClockTime time = 0;
gboolean isEOS = FALSE;
+ gboolean status;
GST_DEBUG_OBJECT (tensor_mux, " all pads are collected ");
if (tensor_mux->need_stream_start) {
gchar s_id[32];
gst_buffer_n_memory (tensors_buf);
}
- g_assert (gst_tensors_config_validate (&tensor_mux->tensors_config));
+ status = gst_tensors_config_validate (&tensor_mux->tensors_config);
+ if (FALSE == status)
+ return GST_FLOW_ERROR;
+
newcaps = gst_tensors_caps_from_config (&tensor_mux->tensors_config);
if (!gst_pad_set_caps (tensor_mux->srcpad, newcaps)) {
num_tensors = self->config.info.num_tensors;
for (i = 0; i < num_tensors; i++) {
+ gboolean status;
size = gst_tensor_info_get_size (&self->config.info.info[i]);
mem = gst_allocator_alloc (NULL, size, NULL);
- g_assert (gst_memory_map (mem, &info, GST_MAP_WRITE));
+ status = gst_memory_map (mem, &info, GST_MAP_WRITE);
+ g_assert (status); /** @todo Do proper error handling (err return) */
memset (info.data, 0, size);
gst_memory_unmap (mem, &info);
gint64 val;
gchar **strv;
gchar *endptr = NULL;
+ gboolean status;
/**
* using direct as we only need to store keys
self->custom_channel_table = NULL;
break;
}
- g_assert (g_hash_table_insert (self->custom_channel_table,
- GINT_TO_POINTER (val), NULL));
+ status = g_hash_table_insert (self->custom_channel_table,
+ GINT_TO_POINTER (val), NULL);
+ g_assert (status);
}
self->channels_enabled = CHANNELS_ENABLED_CUSTOM;
g_strfreev (strv);
int i;
gsize size, offset;
GstMapInfo src_info, dest_info;
+ gboolean status;
size = 0;
offset = 0;
size += gst_tensor_get_element_count (*dim) *
gst_tensor_get_element_size (split->sink_tensor_conf.info.type);
mem = gst_allocator_alloc (NULL, size, NULL);
- g_assert (gst_memory_map (mem, &dest_info, GST_MAP_WRITE));
- g_assert (gst_buffer_map (buffer, &src_info, GST_MAP_READ));
+ status = gst_memory_map (mem, &dest_info, GST_MAP_WRITE);
+ g_assert (status);
+ status = gst_buffer_map (buffer, &src_info, GST_MAP_READ);
+ g_assert (status);
for (i = 0; i < nth; i++) {
dim = g_array_index (split->tensorseg, tensor_dim *, i);
{
GstFlowReturn res;
GstTensorTransform *filter = GST_TENSOR_TRANSFORM_CAST (trans);
+ gboolean status;
uint8_t *inptr, *outptr;
GstMapInfo inInfo, outInfo;
g_return_val_if_fail (filter->loaded, GST_FLOW_ERROR);
- g_assert (gst_buffer_map (inbuf, &inInfo, GST_MAP_READ));
- g_assert (gst_buffer_map (outbuf, &outInfo, GST_MAP_WRITE));
+ status = gst_buffer_map (inbuf, &inInfo, GST_MAP_READ);
+ g_assert (status);
+ status = gst_buffer_map (outbuf, &outInfo, GST_MAP_WRITE);
+ g_assert (status);
inptr = inInfo.data;
outptr = outInfo.data;