From: Inki Dae Date: Wed, 4 Mar 2020 04:16:35 +0000 (+0900) Subject: Add size member to inference_engine_tensor_buffer structure X-Git-Tag: submit/tizen/20200423.063253~43 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=77da30e96098117bcbabc9f2f8a33d589b0bf7e7;p=platform%2Fcore%2Fmultimedia%2Finference-engine-interface.git Add size member to inference_engine_tensor_buffer structure With this patch, the size member of inference_engine_tensor_info structure means a tensor element size which is calculated by height * width * channel count, and the size member of inference_engine_tensor_buffer structure means a actual tensor buffer size in bytes which is calculated by height * width * channel count * pixel per bytes. Change-Id: Ic88e34035c45386fa712e7ab922a296a4bad0ac9 Signed-off-by: Inki Dae --- diff --git a/include/inference_engine_type.h b/include/inference_engine_type.h index d141399..7f391be 100644 --- a/include/inference_engine_type.h +++ b/include/inference_engine_type.h @@ -147,6 +147,7 @@ typedef struct _inference_engine_config { typedef struct _inference_engine_tensor_buffer { void *buffer; /**< a buffer which contains tensor data. */ inference_tensor_data_type_e data_type; /**< a tensor type of the layer. */ + size_t size; /**< actual tensor buffer size in bytes. The size should be height * width * channel count * bytes per pixel. */ // TODO. } inference_engine_tensor_buffer; @@ -157,9 +158,9 @@ typedef struct _inference_engine_tensor_buffer { * - a name of a given layer * - a tensor shape of the layer * - a tensor type of the layer - * - a tensor size of the layer. + * - a tensor element size of the layer. * - * Caution. Tensor size is not in-memory buffer size in bytes so based on a given tensor size, + * Caution. Tensor element size is not in-memory buffer size in bytes so based on a given tensor element size, * upper framework should allocate actual tensor buffer according to tensor data types (i.e., uint8, float32...) * * @since_tizen 6.0 @@ -168,7 +169,7 @@ typedef struct _inference_engine_tensor_info { std::vector shape; /**< a tensor shape. */ inference_tensor_shape_type_e shape_type; /**< a tensor shape of the layer. */ inference_tensor_data_type_e data_type; /**< a tensor type of the layer. */ - size_t size; /** tensor buffer size. The size should be height * width * channel count */ + size_t size; /** tensor element size. The size should be height * width * channel count */ // TODO. } inference_engine_tensor_info;