}
outputData.dimInfo.push_back(tmpDimInfo);
- outputData.data.push_back((void *)mOutputTensorBuffers.front().buffer);
+
+ // Normalize output tensor data converting it to float type in case of quantized model.
+ if (tensor_info.data_type == TENSOR_DATA_TYPE_UINT8) {
+ unsigned char *ori_buf = (unsigned char *)mOutputTensorBuffers[i].buffer;
+ float *new_buf = new float[tensor_info.size];
+
+ for (int j = 0; j < tensor_info.size; j++) {
+ new_buf[j] = (float)ori_buf[j] / 255.0f;
+ }
+
+ // replace original buffer with new one, and release origin one.
+ mOutputTensorBuffers[i].buffer = new_buf;
+ delete[] ori_buf;
+ }
+
+ outputData.data.push_back((void *)mOutputTensorBuffers[i].buffer);
}
// Will contain top N results in ascending order.
std::priority_queue<std::pair<float, int>,
std::vector<std::pair<float, int>>,
std::greater<std::pair<float, int>>> top_result_pq;
- float value;
+ float value = 0.0f;
std::vector<std::vector<int>> inferDimInfo(outputData.dimInfo);
std::vector<void*> inferResults(outputData.data.begin(), outputData.data.end());
- long count = inferDimInfo[0][1];
- LOGI("count: %ld", count);
+ int count = inferDimInfo[0][1];
+ LOGI("count: %d", count);
float *prediction = reinterpret_cast<float*>(inferResults[0]);
for (int i = 0; i < count; ++i) {
value = prediction[i];
+
// Only add it if it beats the threshold and has a chance at being in
// the top N.
top_result_pq.push(std::pair<float, int>(value, i));