std::unique_ptr<mediavision::inference::Inference> _inference;
std::unique_ptr<MediaVision::Common::EngineConfig> _config;
std::unique_ptr<MetaParser> _parser;
- FacenetOutput _result;
inference_engine_tensor_buffer *_outputTensorBuffer {};
Preprocess _preprocess;
std::string _modelFileName;
{
TensorBuffer &tensor_buffer_obj = _inference->getOutputTensorBuffer();
- // Make sure to clear _result.outputs vectors because if not clear then other output_vector will be pushed to _result.outputs
+ // Make sure to clear result.outputs vectors because if not clear then other output_vector will be pushed to result.outputs
// and it results in sending wrong output vector to face recognition framework.
- _result.outputs.clear();
+ static FacenetOutput result;
+ result.outputs.clear();
_outputTensorBuffer = tensor_buffer_obj.getTensorBuffer(_facenetOutputTensorName);
if (!_outputTensorBuffer)
float *buffer = reinterpret_cast<float *>(_outputTensorBuffer->buffer);
- _result.outputs.push_back(vector<float>(buffer, buffer + _outputTensorBuffer->size / sizeof(float)));
+ result.outputs.push_back(vector<float>(buffer, buffer + _outputTensorBuffer->size / sizeof(float)));
- return _result;
+ return result;
}
template void Facenet::preprocess<float>(mv_source_h &mv_src, shared_ptr<MetaInfo> metaInfo,
FaceRecognitionInput face_recognition_input;
face_recognition_input.mode = RequestMode::REGISTER;
- face_recognition_input.inputs.push_back(facenet_output.outputs[0]);
+ face_recognition_input.inputs.push_back(move(facenet_output.outputs[0]));
face_recognition_input.labels.push_back(label);
machine_learning_native_inference(handle, "face_recognition", face_recognition_input);
} catch (const BaseException &e) {