-// Copyright (C) 2018 Intel Corporation
+// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <opencv2/opencv.hpp>
#include <inference_engine.hpp>
+#include <samples/classification_results.h>
using namespace InferenceEngine;
// -----------------------------------------------------------------------------------------------------
// --------------------------- 1. Load Plugin for inference engine -------------------------------------
- InferencePlugin plugin = PluginDispatcher({"../../../lib/intel64", ""}).getPluginByDevice(device_name);
+ InferencePlugin plugin = PluginDispatcher().getPluginByDevice(device_name);
// -----------------------------------------------------------------------------------------------------
// --------------------------- 2. Read IR Generated by ModelOptimizer (.xml and .bin files) ------------
// --------------------------- 8. Process output -------------------------------------------------------
for (auto &item : output_info) {
auto output_name = item.first;
- Blob::Ptr output = async_infer_request.GetBlob(output_name);
- auto output_buffer = output->buffer().as<PrecisionTrait<Precision::FP32>::value_type *>();
- std::vector<unsigned> results;
- /** This is to sort output probabilities and put it to results vector **/
- TopResults(10, *output, results);
-
- std::cout << std::endl << "Top 10 results:" << std::endl << std::endl;
- for (size_t id = 0; id < 10; ++id) {
- std::cout.precision(7);
- auto result = output_buffer[results[id]];
- std::cout << std::left << std::fixed << result << " label #" << results[id] << std::endl;
- }
+ Blob::Ptr output = async_infer_request.GetBlob(output_name);;
+ // Print classification results
+ ClassificationResult classificationResult(output, {input_image_path});
+ classificationResult.print();
}
// -----------------------------------------------------------------------------------------------------
} catch (const std::exception & ex) {