switch (op_s->op) {
case GTT_OP_TYPECAST:
if (num_op > 1 && str_op[1]) {
+ if (i > 0) {
+ GST_WARNING_OBJECT (filter,
+ "To prevent memory re-allocation, tensor-transform limits the typecast during the sequence. "
+ "Please set the typecast in the first.");
+ op_s->op = GTT_OP_UNKNOWN;
+ break;
+ }
+
op_s->value.type = get_tensor_type (str_op[1]);
if (op_s->value.type == _NNS_END) {
}
/**
- * @brief Test for tensor_transform arithmetic (typecast uint8 > float64, add .2, add .1, typecast uint16)
+ * @brief Test for tensor_transform arithmetic (typecast uint8 > float64, add .2, add .1, final typecast uint16 will be ignored)
*/
TEST (test_tensor_transform, arithmetic_4)
{
gst_harness_set_src_caps (h, gst_tensor_caps_from_config (&config));
data_in_size = gst_tensor_info_get_size (&config.info);
- config.info.type = _NNS_UINT16;
+ config.info.type = _NNS_FLOAT64;
data_out_size = gst_tensor_info_get_size (&config.info);
/* push buffers */
ASSERT_TRUE (gst_memory_map (mem, &info, GST_MAP_READ));
for (i = 0; i < array_size; i++) {
- uint16_t expected = (i + 1) * (b + 1);
- EXPECT_EQ (((uint16_t *) info.data)[i], expected);
+ double expected = (i + 1) * (b + 1) + .3;
+ EXPECT_DOUBLE_EQ (((double *) info.data)[i], expected);
}
gst_memory_unmap (mem, &info);