inference_tensor_shape_type_e shape_type; /**< a tensor shape of the layer. */
inference_tensor_data_type_e data_type; /**< a tensor type of the layer. */
size_t size; /** tensor element size. The size should be height * width * channel count */
+ float scale; /**< a scale value of the layer. */
+ int zero_point; /**< a zero point value of the layer. */
// TODO.
} inference_engine_tensor_info;
Name: inference-engine-interface
Summary: Interface of inference engines
-Version: 0.4.10
+Version: 0.5.0
Release: 0
Group: Multimedia/Framework
License: Apache-2.0
{ 1, ch, height, width },
(inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
(inference_tensor_data_type_e) tensor_type,
- (size_t)(1 * ch * height * width)
+ (size_t)(1 * ch * height * width),
+ 0.0f, 0
};
for (auto& layer : input_layers) {
std::vector<size_t>{1},
INFERENCE_TENSOR_SHAPE_NCHW,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
- 1
+ 1,
+ 0.0f,
+ 0
};
for (auto& layer : output_layers) {
{ 1, ch, height, width },
INFERENCE_TENSOR_SHAPE_NCHW,
static_cast<inference_tensor_data_type_e>(tensor_type),
- static_cast<size_t>(1 * ch * height * width)
+ static_cast<size_t>(1 * ch * height * width),
+ 0.0f, 0
};
for (auto& input : input_layers) {
std::vector<size_t>{1},
INFERENCE_TENSOR_SHAPE_NCHW,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
- 1
+ 1,
+ 0.0f,
+ 0
};
for (auto& layer : output_layers) {
{ 1, ch, height, width },
INFERENCE_TENSOR_SHAPE_NCHW,
(inference_tensor_data_type_e) tensor_type,
- (size_t)(1 * ch * height * width)
+ (size_t)(1 * ch * height * width),
+ 0.0f, 0
};
for (auto& layer : input_layers) {
std::vector<size_t>{1},
INFERENCE_TENSOR_SHAPE_NCHW,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
- 1
+ 1,
+ 0.0f,
+ 0
};
for (auto& layer : output_layers) {
{ 1, ch, height, width },
INFERENCE_TENSOR_SHAPE_NCHW,
(inference_tensor_data_type_e) tensor_type,
- (size_t)(1 * ch * height * width)
+ (size_t)(1 * ch * height * width),
+ 0.0f, 0
};
for (auto& layer : input_layers) {
{ 1, ch, height, width },
INFERENCE_TENSOR_SHAPE_NCHW,
(inference_tensor_data_type_e) tensor_type,
- (size_t)(1 * ch * height * width)
+ (size_t)(1 * ch * height * width),
+ 0.0f, 0
};
for (auto& layer : output_layers) {
{ 1, ch, height, width },
(inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
(inference_tensor_data_type_e) tensor_type,
- (size_t)(1 * ch * height * width)
+ (size_t)(1 * ch * height * width),
+ 0.0f, 0
};
input_property.layers.insert(std::make_pair(layer, tensor_info));
{ 1, ch, height, width },
(inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
(inference_tensor_data_type_e) tensor_type,
- (size_t)(1 * ch * height * width)
+ (size_t)(1 * ch * height * width),
+ 0.0f, 0
};
output_property.layers.insert(std::make_pair(layer, tensor_info));
{ 1, ch, height, width },
(inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
(inference_tensor_data_type_e) tensor_type,
- (size_t)(1 * ch * height * width)
+ (size_t)(1 * ch * height * width),
+ 0.0f, 0
};
output_property.layers.insert(std::make_pair(layer, tensor_info));
{ 1, ch, height, width },
(inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
(inference_tensor_data_type_e) tensor_type,
- (size_t)(1 * ch * height * width)
+ (size_t)(1 * ch * height * width),
+ 0.0f, 0
};
output_property.layers.insert(std::make_pair(layer, tensor_info));
{ 1, ch, height, width },
(inference_tensor_shape_type_e) INFERENCE_TENSOR_SHAPE_NCHW,
(inference_tensor_data_type_e) tensor_type,
- (size_t)(1 * ch * height * width)
+ (size_t)(1 * ch * height * width),
+ 0.0f, 0
};
for (auto& layer : input_layers) {
inference_engine_tensor_info output_tensor_info = { std::vector<size_t>{1},
INFERENCE_TENSOR_SHAPE_NCHW,
INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
- 1};
+ 1,
+ 0.0f,
+ 0};
for (auto& layer : output_layers) {
output_property.layers.insert(std::make_pair(layer, output_tensor_info));
}
inference_engine_layer_property property;
for (auto& name : tensorConfig.mInputLayerNames) {
- inference_engine_tensor_info tensor_info;
+ inference_engine_tensor_info tensor_info {};
tensor_info.data_type = tensorConfig.mDataType;
tensor_info.shape_type = INFERENCE_TENSOR_SHAPE_NCHW;
inference_engine_layer_property property;
inference_engine_tensor_info tensor_info = {
std::vector<size_t>{1}, INFERENCE_TENSOR_SHAPE_NCHW,
- INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 1};
+ INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 1, 0.0f, 0};
for (auto& name : tensorConfig.mOutputLayerNames) {
LOGI("Configure %s layer as output", name.c_str());