size_t size {}; /** tensor element size. The size should be height * width * channel count */
float scale {}; /**< a scale value of the layer. */
int zero_point {}; /**< a zero point value of the layer. */
+ bool is_size_fixed {}; /** < if true then tensor size is calculated inference engine internally so do not use 'shape' member to calculate the tensor size. Use 'size' member instead.*/
inference_tensor_quantization_type_e quantization_type { INFERENCE_TENSOR_QUANTIZATION_NONE }; /**< a quantization type of the layer. */
// TODO.
} inference_engine_tensor_info;
inference_engine_tensor_buffer tensor_buffer;
if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_FLOAT32) {
tensor_buffer.buffer = (void *) (new float[tensor_info.size]);
- tensor_buffer.size = tensor_info.size * 4;
+ tensor_buffer.size = tensor_info.is_size_fixed ? tensor_info.size : tensor_info.size * 4;
} else if (tensor_info.data_type ==
INFERENCE_TENSOR_DATA_TYPE_UINT8) {
tensor_buffer.buffer =
tensor_buffer.size = tensor_info.size;
} else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_INT64) {
tensor_buffer.buffer = (void *)(new long long[tensor_info.size]);
- tensor_buffer.size = tensor_info.size * 8;
+ tensor_buffer.size = tensor_info.is_size_fixed ? tensor_info.size : tensor_info.size * 8;
} else if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT16) {
tensor_buffer.buffer = (void *)(new unsigned short[tensor_info.size]);
- tensor_buffer.size = tensor_info.size * 2;
+ tensor_buffer.size = tensor_info.is_size_fixed ? tensor_info.size : tensor_info.size * 2;
}
EXPECT_TRUE(tensor_buffer.buffer);