INFERENCE_TENSOR_DATA_TYPE_MAX
} inference_tensor_data_type_e;
+ /**
+ * @brief Enumeration for Tensor quantization type.
+ *
+ * @since_tizen 9.0
+ */
+ typedef enum {
+ INFERENCE_TENSOR_QUANTIZATION_NONE = 0,
+ INFERENCE_TENSOR_QUANTIZATION_AFFINE, /** < Affine quantization */
+ } inference_tensor_quantization_type_e;
+
/**
* @brief Enumeration for OPenCL Tuner type.
*
size_t size; /** tensor element size. The size should be height * width * channel count */
float scale; /**< a scale value of the layer. */
int zero_point; /**< a zero point value of the layer. */
+ inference_tensor_quantization_type_e quantization_type; /**< a quantization type of the layer. */
// TODO.
} inference_engine_tensor_info;
Name: inference-engine-interface
Summary: Interface of inference engines
-Version: 0.5.0
+Version: 0.5.1
Release: 0
Group: Multimedia/Framework
License: Apache-2.0
(inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
(inference_tensor_data_type_e) tensor_type,
(size_t)(1 * ch * height * width),
- 0.0f, 0
+ 0.0f, 0, INFERENCE_TENSOR_QUANTIZATION_NONE
};
for (auto& layer : input_layers) {
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
1,
0.0f,
- 0
+ 0,
+ INFERENCE_TENSOR_QUANTIZATION_NONE
};
for (auto& layer : output_layers) {
INFERENCE_TENSOR_SHAPE_NCHW,
static_cast<inference_tensor_data_type_e>(tensor_type),
static_cast<size_t>(1 * ch * height * width),
- 0.0f, 0
+ 0.0f, 0, INFERENCE_TENSOR_QUANTIZATION_NONE
};
for (auto& input : input_layers) {
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
1,
0.0f,
- 0
+ 0,
+ INFERENCE_TENSOR_QUANTIZATION_NONE
};
for (auto& layer : output_layers) {
INFERENCE_TENSOR_SHAPE_NCHW,
(inference_tensor_data_type_e) tensor_type,
(size_t)(1 * ch * height * width),
- 0.0f, 0
+ 0.0f, 0, INFERENCE_TENSOR_QUANTIZATION_NONE
};
for (auto& layer : input_layers) {
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
1,
0.0f,
- 0
+ 0,
+ INFERENCE_TENSOR_QUANTIZATION_NONE
};
for (auto& layer : output_layers) {
INFERENCE_TENSOR_SHAPE_NCHW,
(inference_tensor_data_type_e) tensor_type,
(size_t)(1 * ch * height * width),
- 0.0f, 0
+ 0.0f, 0, INFERENCE_TENSOR_QUANTIZATION_NONE
};
for (auto& layer : input_layers) {
INFERENCE_TENSOR_SHAPE_NCHW,
(inference_tensor_data_type_e) tensor_type,
(size_t)(1 * ch * height * width),
- 0.0f, 0
+ 0.0f, 0, INFERENCE_TENSOR_QUANTIZATION_NONE
};
for (auto& layer : output_layers) {
(inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
(inference_tensor_data_type_e) tensor_type,
(size_t)(1 * ch * height * width),
- 0.0f, 0
+ 0.0f, 0, INFERENCE_TENSOR_QUANTIZATION_NONE
};
input_property.layers.insert(std::make_pair(layer, tensor_info));
(inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
(inference_tensor_data_type_e) tensor_type,
(size_t)(1 * ch * height * width),
- 0.0f, 0
+ 0.0f, 0, INFERENCE_TENSOR_QUANTIZATION_NONE
};
output_property.layers.insert(std::make_pair(layer, tensor_info));
(inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
(inference_tensor_data_type_e) tensor_type,
(size_t)(1 * ch * height * width),
- 0.0f, 0
+ 0.0f, 0, INFERENCE_TENSOR_QUANTIZATION_NONE
};
output_property.layers.insert(std::make_pair(layer, tensor_info));
(inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
(inference_tensor_data_type_e) tensor_type,
(size_t)(1 * ch * height * width),
- 0.0f, 0
+ 0.0f, 0, INFERENCE_TENSOR_QUANTIZATION_NONE
};
output_property.layers.insert(std::make_pair(layer, tensor_info));
(inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
(inference_tensor_data_type_e) tensor_type,
(size_t)(1 * ch * height * width),
- 0.0f, 0
+ 0.0f, 0, INFERENCE_TENSOR_QUANTIZATION_NONE
};
for (auto& layer : input_layers) {
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
1,
0.0f,
- 0};
+ 0,
+ INFERENCE_TENSOR_QUANTIZATION_NONE};
for (auto& layer : output_layers) {
output_property.layers.insert(std::make_pair(layer, output_tensor_info));
}
inference_engine_layer_property property;
inference_engine_tensor_info tensor_info = {
std::vector<size_t>{1}, INFERENCE_TENSOR_SHAPE_NCHW,
- INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 1, 0.0f, 0};
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 1, 0.0f, 0, INFERENCE_TENSOR_QUANTIZATION_NONE};
for (auto& name : tensorConfig.mOutputLayerNames) {
LOGI("Configure %s layer as output", name.c_str());