From daf8bc6164fe14d5c72820554409362df4256cc8 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Sat, 10 Oct 2020 11:19:16 +0300 Subject: [PATCH] DOCS: added code snippets compilation and fixes (#2606) --- docs/CMakeLists.txt | 4 +- docs/IE_PLUGIN_DG/Doxyfile | 4 +- docs/examples/CMakeLists.txt | 13 ----- docs/snippets/Bfloat16Inference0.cpp | 1 - docs/snippets/Bfloat16Inference1.cpp | 3 -- docs/snippets/Bfloat16Inference2.cpp | 1 - docs/snippets/CMakeLists.txt | 57 ++++++++++++++++++++++ docs/snippets/DynamicBatching.cpp | 15 ++---- docs/snippets/GPU_RemoteBlob_API0.cpp | 17 ++++--- docs/snippets/HETERO0.cpp | 6 +-- ...Integrate_with_customer_application_new_API.cpp | 31 ++++++------ docs/snippets/MULTI3.cpp | 7 ++- docs/snippets/MULTI4.cpp | 11 ++--- docs/snippets/MULTI5.cpp | 7 ++- docs/snippets/Migration_CoreAPI.cpp | 10 ++-- docs/snippets/OnnxImporterTutorial0.cpp | 3 +- docs/snippets/OnnxImporterTutorial1.cpp | 3 +- docs/snippets/OnnxImporterTutorial2.cpp | 9 ++-- docs/snippets/OnnxImporterTutorial3.cpp | 5 +- docs/snippets/ShapeInference.cpp | 11 ++--- docs/snippets/dldt_optimization_guide3.cpp | 12 ++--- docs/snippets/dldt_optimization_guide4.cpp | 4 +- docs/snippets/dldt_optimization_guide5.cpp | 23 ++++----- docs/snippets/dldt_optimization_guide6.cpp | 14 +++--- docs/snippets/dldt_optimization_guide7.cpp | 13 ++--- docs/snippets/dldt_optimization_guide8.cpp | 11 ++--- docs/snippets/dldt_optimization_guide9.cpp | 14 +++--- .../example_async_infer_request.cpp | 12 ++--- .../example_itask_executor.cpp | 0 .../example_ngraph_utils.cpp | 0 docs/snippets/movidius-programming-guide.cpp | 6 +-- docs/snippets/nGraphTutorial.cpp | 3 +- docs/snippets/protecting_model_guide.cpp | 17 +++++-- 33 files changed, 189 insertions(+), 158 deletions(-) delete mode 100644 docs/examples/CMakeLists.txt create mode 100644 docs/snippets/CMakeLists.txt rename docs/{examples => snippets}/example_async_infer_request.cpp (93%) rename docs/{examples => snippets}/example_itask_executor.cpp (100%) rename docs/{examples => snippets}/example_ngraph_utils.cpp (100%) diff --git a/docs/CMakeLists.txt b/docs/CMakeLists.txt index 6f7d778..0da74ed 100644 --- a/docs/CMakeLists.txt +++ b/docs/CMakeLists.txt @@ -3,7 +3,7 @@ # if(NOT ENABLE_DOCKER) - add_subdirectory(examples) + add_subdirectory(snippets) # Detect nGraph find_package(ngraph QUIET) @@ -20,7 +20,7 @@ if(NOT ENABLE_DOCKER) add_subdirectory(template_extension) set(all_docs_targets - ie_docs_examples + ie_docs_snippets template_extension templatePlugin TemplateBehaviorTests TemplateFunctionalTests) foreach(target_name IN LISTS all_docs_targets) diff --git a/docs/IE_PLUGIN_DG/Doxyfile b/docs/IE_PLUGIN_DG/Doxyfile index 8631de2..d72cbe5 100644 --- a/docs/IE_PLUGIN_DG/Doxyfile +++ b/docs/IE_PLUGIN_DG/Doxyfile @@ -868,13 +868,13 @@ EXCLUDE_SYMBOLS = # command). EXAMPLE_PATH = ../template_plugin/src \ - ../template_plugin/include \ + ../template_plugin/include \ ../template_plugin/src/CMakeLists.txt \ ../template_plugin/tests/functional/CMakeLists.txt \ ../template_plugin/tests/functional/transformations \ ../template_plugin/tests/functional/shared_tests_instances/ \ ../../inference-engine/tests/functional/plugin/shared/include \ - ../examples + ../snippets # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and diff --git a/docs/examples/CMakeLists.txt b/docs/examples/CMakeLists.txt deleted file mode 100644 index 9a4aa91..0000000 --- a/docs/examples/CMakeLists.txt +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (C) 2018-2020 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -set(TARGET_NAME ie_docs_examples) - -file(GLOB SOURCES *.cpp) - -add_library(ie_docs_examples STATIC ${SOURCES}) - -target_link_libraries(${TARGET_NAME} PRIVATE inference_engine_plugin_api ngraph) - -#add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME}) diff --git a/docs/snippets/Bfloat16Inference0.cpp b/docs/snippets/Bfloat16Inference0.cpp index 9b86450..89c93d6 100644 --- a/docs/snippets/Bfloat16Inference0.cpp +++ b/docs/snippets/Bfloat16Inference0.cpp @@ -4,7 +4,6 @@ int main() { using namespace InferenceEngine; //! [part0] InferenceEngine::Core core; - auto cpuOptimizationCapabilities = core.GetMetric("CPU", METRIC_KEY(OPTIMIZATION_CAPABILITIES)).as>(); //! [part0] return 0; diff --git a/docs/snippets/Bfloat16Inference1.cpp b/docs/snippets/Bfloat16Inference1.cpp index 523c40d..2de3b02 100644 --- a/docs/snippets/Bfloat16Inference1.cpp +++ b/docs/snippets/Bfloat16Inference1.cpp @@ -4,11 +4,8 @@ int main() { using namespace InferenceEngine; //! [part1] InferenceEngine::Core core; - auto network = core.ReadNetwork("sample.xml"); - auto exeNetwork = core.LoadNetwork(network, "CPU"); - auto enforceBF16 = exeNetwork.GetConfig(PluginConfigParams::KEY_ENFORCE_BF16).as(); //! [part1] diff --git a/docs/snippets/Bfloat16Inference2.cpp b/docs/snippets/Bfloat16Inference2.cpp index 6ae3753..082495d 100644 --- a/docs/snippets/Bfloat16Inference2.cpp +++ b/docs/snippets/Bfloat16Inference2.cpp @@ -4,7 +4,6 @@ int main() { using namespace InferenceEngine; //! [part2] InferenceEngine::Core core; - core.SetConfig({ { CONFIG_KEY(ENFORCE_BF16), CONFIG_VALUE(NO) } }, "CPU"); //! [part2] diff --git a/docs/snippets/CMakeLists.txt b/docs/snippets/CMakeLists.txt new file mode 100644 index 0000000..a5f1d23 --- /dev/null +++ b/docs/snippets/CMakeLists.txt @@ -0,0 +1,57 @@ +# Copyright (C) 2018-2020 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +set(TARGET_NAME ie_docs_snippets) + +file(GLOB SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/*.cpp") + +# remove OpenCL related sources +# TODO: fix compilation of OpenCL files +if(NOT CLDNN__IOCL_ICD_INCDIRS OR TRUE) + list(REMOVE_ITEM SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/GPU_RemoteBlob_API0.cpp" + "${CMAKE_CURRENT_SOURCE_DIR}/GPU_RemoteBlob_API1.cpp" + "${CMAKE_CURRENT_SOURCE_DIR}/GPU_RemoteBlob_API2.cpp") +endif() + +# remove OpenCV related sources +find_package(OpenCV QUIET) +if(NOT OpenCV_FOUND) + list(REMOVE_ITEM SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/dldt_optimization_guide5.cpp" + "${CMAKE_CURRENT_SOURCE_DIR}/ShapeInference.cpp") +endif() + +# ONNX importer related files +if(NOT NGRAPH_ONNX_IMPORT_ENABLE) + list(REMOVE_ITEM SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/OnnxImporterTutorial0.cpp" + "${CMAKE_CURRENT_SOURCE_DIR}/OnnxImporterTutorial1.cpp" + "${CMAKE_CURRENT_SOURCE_DIR}/OnnxImporterTutorial2.cpp" + "${CMAKE_CURRENT_SOURCE_DIR}/OnnxImporterTutorial3.cpp") +endif() + +# remove snippets for deprecated / removed API +list(REMOVE_ITEM SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/Migration_CoreAPI.cpp") + +# requires mfxFrameSurface1 and MSS API +list(REMOVE_ITEM SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/dldt_optimization_guide2.cpp" + "${CMAKE_CURRENT_SOURCE_DIR}/dldt_optimization_guide3.cpp" + "${CMAKE_CURRENT_SOURCE_DIR}/dldt_optimization_guide4.cpp") + +# create a static library + +add_library(${TARGET_NAME} STATIC ${SOURCES}) + +if(CLDNN__IOCL_ICD_INCDIRS) + target_include_directories(${TARGET_NAME} SYSTEM PRIVATE ${CLDNN__IOCL_ICD_INCDIRS}) +endif() + +if(OpenCV_FOUND) + target_include_directories(${TARGET_NAME} SYSTEM PRIVATE ${OpenCV_INCLUDE_DIRS}) + target_link_libraries(${TARGET_NAME} PRIVATE opencv_core) +endif() + +if(NGRAPH_ONNX_IMPORT_ENABLE) + target_link_libraries(${TARGET_NAME} PRIVATE onnx_importer) +endif() + +target_link_libraries(${TARGET_NAME} PRIVATE inference_engine_plugin_api ngraph) diff --git a/docs/snippets/DynamicBatching.cpp b/docs/snippets/DynamicBatching.cpp index 01d0b2d..8212c00 100644 --- a/docs/snippets/DynamicBatching.cpp +++ b/docs/snippets/DynamicBatching.cpp @@ -2,43 +2,36 @@ #include int main() { -using namespace InferenceEngine; int FLAGS_bl = 1; auto imagesData = std::vector(2); auto imagesData2 = std::vector(4); //! [part0] -int dynBatchLimit = FLAGS_bl; //take dynamic batch limit from command line option - +int dynBatchLimit = FLAGS_bl; //take dynamic batch limit from command line option // Read network model -Core core; -CNNNetwork network = core.ReadNetwork("sample.xml"); +InferenceEngine::Core core; +InferenceEngine::CNNNetwork network = core.ReadNetwork("sample.xml"); // enable dynamic batching and prepare for setting max batch limit const std::map dyn_config = -{ { PluginConfigParams::KEY_DYN_BATCH_ENABLED, PluginConfigParams::YES } }; +{ { InferenceEngine::PluginConfigParams::KEY_DYN_BATCH_ENABLED, InferenceEngine::PluginConfigParams::YES } }; network.setBatchSize(dynBatchLimit); - // create executable network and infer request auto executable_network = core.LoadNetwork(network, "CPU", dyn_config); auto infer_request = executable_network.CreateInferRequest(); - // ... - // process a set of images // dynamically set batch size for subsequent Infer() calls of this request size_t batchSize = imagesData.size(); infer_request.SetBatch(batchSize); infer_request.Infer(); - // ... - // process another set of images batchSize = imagesData2.size(); infer_request.SetBatch(batchSize); diff --git a/docs/snippets/GPU_RemoteBlob_API0.cpp b/docs/snippets/GPU_RemoteBlob_API0.cpp index 2d22ad8..dd6784d 100644 --- a/docs/snippets/GPU_RemoteBlob_API0.cpp +++ b/docs/snippets/GPU_RemoteBlob_API0.cpp @@ -14,16 +14,17 @@ using namespace InferenceEngine; // ... -// initialize the plugin and load the network +// initialize the core and load the network InferenceEngine::Core ie; -auto exec_net = ie.LoadNetwork(net, "GPU", config); +auto net = ie.ReadNetwork("network.xml"); +auto exec_net = ie.LoadNetwork(net, "GPU"); // obtain the RemoteContext pointer from the executable network object auto cldnn_context = exec_net.GetContext(); // obtain the OpenCL context handle from the RemoteContext, // get device info and create a queue -cl::Context ctx = std::dynamic_pointer_cast(cldnn_context); +cl::Context ctx = std::dynamic_pointer_cast(cldnn_context); _device = cl::Device(_context.getInfo()[0].get(), true); cl::CommandQueue _queue; cl_command_queue_properties props = CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE; @@ -41,11 +42,11 @@ auto shared_blob = gpu::make_shared_blob(input_info->getTensorDesc(), cldnn_cont cl::Kernel kernel(program, kernelName.c_str()); kernel.setArg(0, shared_buffer); queue.enqueueNDRangeKernel(kernel, - cl::NDRange(0), - cl::NDRange(image_size), - cl::NDRange(1), - 0, // wait events * - &profileEvent); + cl::NDRange(0), + cl::NDRange(image_size), + cl::NDRange(1), + 0, // wait events * + &profileEvent); queue.finish(); // ... diff --git a/docs/snippets/HETERO0.cpp b/docs/snippets/HETERO0.cpp index af153c7..a6197af 100644 --- a/docs/snippets/HETERO0.cpp +++ b/docs/snippets/HETERO0.cpp @@ -3,14 +3,12 @@ #include "hetero/hetero_plugin_config.hpp" int main() { -using namespace InferenceEngine; -using namespace ngraph; -Core core; +InferenceEngine::Core core; auto network = core.ReadNetwork("sample.xml"); auto function = network.getFunction(); //! [part0] for (auto && op : function->get_ops()) - op->get_rt_info()["affinity"] = std::shared_ptr>("CPU"); + op->get_rt_info()["affinity"] = std::make_shared>("CPU"); //! [part0] return 0; } diff --git a/docs/snippets/Integrate_with_customer_application_new_API.cpp b/docs/snippets/Integrate_with_customer_application_new_API.cpp index 9e7e6e2..8793caf 100644 --- a/docs/snippets/Integrate_with_customer_application_new_API.cpp +++ b/docs/snippets/Integrate_with_customer_application_new_API.cpp @@ -1,19 +1,20 @@ #include int main() { -using namespace InferenceEngine; const std::string output_name = "output_name"; const std::string input_name = "input_name"; //! [part0] InferenceEngine::Core core; +InferenceEngine::CNNNetwork network; +InferenceEngine::ExecutableNetwork executable_network; //! [part0] //! [part1] -auto network = core.ReadNetwork("Model.xml"); +network = core.ReadNetwork("Model.xml"); //! [part1] //! [part2] -auto network = core.ReadNetwork("model.onnx"); +network = core.ReadNetwork("model.onnx"); //! [part2] //! [part3] @@ -27,27 +28,27 @@ InferenceEngine::OutputsDataMap output_info = network.getOutputsInfo(); /** Iterate over all input info**/ for (auto &item : input_info) { auto input_data = item.second; - input_data->setPrecision(Precision::U8); - input_data->setLayout(Layout::NCHW); - input_data->getPreProcess().setResizeAlgorithm(RESIZE_BILINEAR); - input_data->getPreProcess().setColorFormat(ColorFormat::RGB); + input_data->setPrecision(InferenceEngine::Precision::U8); + input_data->setLayout(InferenceEngine::Layout::NCHW); + input_data->getPreProcess().setResizeAlgorithm(InferenceEngine::RESIZE_BILINEAR); + input_data->getPreProcess().setColorFormat(InferenceEngine::ColorFormat::RGB); } /** Iterate over all output info**/ for (auto &item : output_info) { auto output_data = item.second; - output_data->setPrecision(Precision::FP32); - output_data->setLayout(Layout::NC); + output_data->setPrecision(InferenceEngine::Precision::FP32); + output_data->setLayout(InferenceEngine::Layout::NC); } //! [part4] //! [part5] -auto executable_network = core.LoadNetwork(network, "CPU"); +executable_network = core.LoadNetwork(network, "CPU"); //! [part5] //! [part6] /** Optional config. E.g. this enables profiling of performance counters. **/ -std::map config = {{ PluginConfigParams::KEY_PERF_COUNT, PluginConfigParams::YES }}; -auto executable_network = core.LoadNetwork(network, "CPU", config); +std::map config = {{ InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES }}; +executable_network = core.LoadNetwork(network, "CPU", config); //! [part6] //! [part7] @@ -93,7 +94,9 @@ for (auto & item : input_info) { /** Create input blob **/ InferenceEngine::TBlob::Ptr input; // assuming input precision was asked to be U8 in prev step - input = InferenceEngine::make_shared_blob(InferenceEngine::Precision::U8, input_data->getDims()); + input = InferenceEngine::make_shared_blob( + InferenceEngine::TensorDesc(InferenceEngine::Precision::U8, input_data->getTensorDesc().getDims(), + input_data->getTensorDesc().getLayout())); input->allocate(); infer_request.SetBlob(item.first, input); @@ -104,7 +107,7 @@ for (auto & item : input_info) { //! [part12] infer_request.StartAsync(); -infer_request.Wait(IInferRequest::WaitMode::RESULT_READY); +infer_request.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY); //! [part12] auto sync_infer_request = executable_network.CreateInferRequest(); diff --git a/docs/snippets/MULTI3.cpp b/docs/snippets/MULTI3.cpp index aff88d5..03426a6 100644 --- a/docs/snippets/MULTI3.cpp +++ b/docs/snippets/MULTI3.cpp @@ -3,18 +3,17 @@ int main() { -using namespace InferenceEngine; //! [part3] - Core ie; + InferenceEngine::Core ie; auto cnnNetwork = ie.ReadNetwork("sample.xml"); std::string allDevices = "MULTI:"; - std::vector myriadDevices = ie.GetMetric("MYRIAD", METRIC_KEY(myriadDevices)); + std::vector myriadDevices = ie.GetMetric("MYRIAD", METRIC_KEY(AVAILABLE_DEVICES)); for (int i = 0; i < myriadDevices.size(); ++i) { allDevices += std::string("MYRIAD.") + myriadDevices[i] + std::string(i < (myriadDevices.size() -1) ? "," : ""); } - ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, allDevices, {}); + InferenceEngine::ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, allDevices, {}); //! [part3] return 0; } diff --git a/docs/snippets/MULTI4.cpp b/docs/snippets/MULTI4.cpp index 0c5034d..b050be2 100644 --- a/docs/snippets/MULTI4.cpp +++ b/docs/snippets/MULTI4.cpp @@ -3,18 +3,17 @@ int main() { -using namespace InferenceEngine; -const std::map hddl_config = { { PluginConfigParams::KEY_PERF_COUNT, PluginConfigParams::YES } }; -const std::map gpu_config = { { PluginConfigParams::KEY_PERF_COUNT, PluginConfigParams::YES } }; +const std::map hddl_config = { { InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES } }; +const std::map gpu_config = { { InferenceEngine::PluginConfigParams::KEY_PERF_COUNT, InferenceEngine::PluginConfigParams::YES } }; //! [part4] // configure the HDDL device first -Core ie; -CNNNetwork cnnNetwork = ie.ReadNetwork("sample.xml"); +InferenceEngine::Core ie; +InferenceEngine::CNNNetwork cnnNetwork = ie.ReadNetwork("sample.xml"); ie.SetConfig(hddl_config, "HDDL"); // configure the GPU device ie.SetConfig(gpu_config, "GPU"); // load the network to the multi-device, while specifying the configuration (devices along with priorities): -ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, "MULTI", {{MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, "HDDL,GPU"}}); +InferenceEngine::ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, "MULTI", {{InferenceEngine::MultiDeviceConfigParams::KEY_MULTI_DEVICE_PRIORITIES, "HDDL,GPU"}}); // new metric allows to query the optimal number of requests: uint32_t nireq = exeNetwork.GetMetric(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)).as(); //! [part4] diff --git a/docs/snippets/MULTI5.cpp b/docs/snippets/MULTI5.cpp index 78b1152..8371773 100644 --- a/docs/snippets/MULTI5.cpp +++ b/docs/snippets/MULTI5.cpp @@ -3,14 +3,13 @@ int main() { -using namespace InferenceEngine; std::string device_name = "MULTI:HDDL,GPU"; const std::map< std::string, std::string > full_config = {}; //! [part5] -Core ie; -CNNNetwork cnnNetwork = ie.ReadNetwork("sample.xml"); +InferenceEngine::Core ie; +InferenceEngine::CNNNetwork cnnNetwork = ie.ReadNetwork("sample.xml"); // 'device_name' can be "MULTI:HDDL,GPU" to configure the multi-device to use HDDL and GPU -ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, device_name, full_config); +InferenceEngine::ExecutableNetwork exeNetwork = ie.LoadNetwork(cnnNetwork, device_name, full_config); // new metric allows to query the optimal number of requests: uint32_t nireq = exeNetwork.GetMetric(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)).as(); //! [part5] diff --git a/docs/snippets/Migration_CoreAPI.cpp b/docs/snippets/Migration_CoreAPI.cpp index 0e7d32c..bbd23f6 100644 --- a/docs/snippets/Migration_CoreAPI.cpp +++ b/docs/snippets/Migration_CoreAPI.cpp @@ -1,8 +1,6 @@ #include -#include int main() { -using namespace InferenceEngine; std::string deviceName = "Device name"; //! [part0] InferenceEngine::InferencePlugin plugin = InferenceEngine::PluginDispatcher({ FLAGS_pp }).getPluginByDevice(FLAGS_d); @@ -13,18 +11,18 @@ InferenceEngine::Core core; //! [part1] //! [part2] -CNNNetReader network_reader; +InferenceEngine::CNNNetReader network_reader; network_reader.ReadNetwork(fileNameToString(input_model)); network_reader.ReadWeights(fileNameToString(input_model).substr(0, input_model.size() - 4) + ".bin"); -CNNNetwork network = network_reader.getNetwork(); +InferenceEngine::CNNNetwork network = network_reader.getNetwork(); //! [part2] //! [part3] -CNNNetwork network = core.ReadNetwork(input_model); +InferenceEngine::CNNNetwork network = core.ReadNetwork(input_model); //! [part3] //! [part4] -CNNNetwork network = core.ReadNetwork("model.onnx"); +InferenceEngine::CNNNetwork network = core.ReadNetwork("model.onnx"); //! [part4] //! [part5] diff --git a/docs/snippets/OnnxImporterTutorial0.cpp b/docs/snippets/OnnxImporterTutorial0.cpp index e6bbd80..c2d31c0 100644 --- a/docs/snippets/OnnxImporterTutorial0.cpp +++ b/docs/snippets/OnnxImporterTutorial0.cpp @@ -1,11 +1,10 @@ #include #include -#include "onnx/onnx-ml.pb.h" +#include "onnx_import/onnx.hpp" #include #include int main() { -using namespace InferenceEngine; //! [part0] const std::int64_t version = 12; const std::string domain = "ai.onnx"; diff --git a/docs/snippets/OnnxImporterTutorial1.cpp b/docs/snippets/OnnxImporterTutorial1.cpp index 63f0eee..f2fcbd2 100644 --- a/docs/snippets/OnnxImporterTutorial1.cpp +++ b/docs/snippets/OnnxImporterTutorial1.cpp @@ -1,9 +1,8 @@ #include #include -#include "ngraph/frontend/onnx_import/onnx_utils.hpp" +#include "onnx_import/onnx.hpp" int main() { -using namespace InferenceEngine; //! [part1] const std::string op_name = "Abs"; const std::int64_t version = 12; diff --git a/docs/snippets/OnnxImporterTutorial2.cpp b/docs/snippets/OnnxImporterTutorial2.cpp index 14c2cc5..752e800 100644 --- a/docs/snippets/OnnxImporterTutorial2.cpp +++ b/docs/snippets/OnnxImporterTutorial2.cpp @@ -1,15 +1,14 @@ #include #include -#include "ngraph/frontend/onnx_import/onnx.hpp" +#include "onnx_import/onnx.hpp" #include +#include int main() { -using namespace InferenceEngine; -using namespace ngraph; //! [part2] - const std::string resnet50_path = "resnet50/model.onnx"; + const char * resnet50_path = "resnet50/model.onnx"; std::ifstream resnet50_stream(resnet50_path); - if(resnet50_stream.is_open()) + if (resnet50_stream.is_open()) { try { diff --git a/docs/snippets/OnnxImporterTutorial3.cpp b/docs/snippets/OnnxImporterTutorial3.cpp index 98ba549..12414da 100644 --- a/docs/snippets/OnnxImporterTutorial3.cpp +++ b/docs/snippets/OnnxImporterTutorial3.cpp @@ -1,12 +1,11 @@ #include #include -#include "ngraph/frontend/onnx_import/onnx.hpp" +#include "onnx_import/onnx.hpp" #include int main() { -using namespace InferenceEngine; -using namespace ngraph; //! [part3] +const char * resnet50_path = "resnet50/model.onnx"; const std::shared_ptr ng_function = ngraph::onnx_import::import_onnx_model(resnet50_path); //! [part3] return 0; diff --git a/docs/snippets/ShapeInference.cpp b/docs/snippets/ShapeInference.cpp index f505d2a..20688ee 100644 --- a/docs/snippets/ShapeInference.cpp +++ b/docs/snippets/ShapeInference.cpp @@ -1,17 +1,15 @@ #include -#include +#include #include #include int main() { -using namespace InferenceEngine; -using namespace cv; int batch_size = 1; //! [part0] InferenceEngine::Core core; // ------------- 0. Read IR and image ---------------------------------------------- - CNNNetwork network = core.ReadNetwork("path/to/IR/xml"); + InferenceEngine::CNNNetwork network = core.ReadNetwork("path/to/IR/xml"); cv::Mat image = cv::imread("path/to/image"); // --------------------------------------------------------------------------------- @@ -21,7 +19,7 @@ int batch_size = 1; // ------------- 2. Set new input shapes ------------------------------------------- std::string input_name; - SizeVector input_shape; + InferenceEngine::SizeVector input_shape; std::tie(input_name, input_shape) = *input_shapes.begin(); // let's consider first input only input_shape[0] = batch_size; // set batch size to the first input dimension input_shape[2] = image.rows; // changes input height to the image one @@ -37,10 +35,9 @@ int batch_size = 1; // ------------- 4. Loading model to the device ------------------------------------ std::string device = "CPU"; - ExecutableNetwork executable_network = core.LoadNetwork(network, device); + InferenceEngine::ExecutableNetwork executable_network = core.LoadNetwork(network, device); // --------------------------------------------------------------------------------- - //! [part0] return 0; diff --git a/docs/snippets/dldt_optimization_guide3.cpp b/docs/snippets/dldt_optimization_guide3.cpp index cf5b7fd..76fa96c 100644 --- a/docs/snippets/dldt_optimization_guide3.cpp +++ b/docs/snippets/dldt_optimization_guide3.cpp @@ -7,12 +7,12 @@ int main() { using namespace InferenceEngine; //! [part3] InferenceEngine::SizeVector dims_src = { - 1 /* batch, N*/, - (size_t) frame_in->Info.Height /* Height */, - (size_t) frame_in->Info.Width /* Width */, - 3 /*Channels,*/, - }; -TensorDesc desc(InferenceEngine::Precision::U8, dims_src, InferenceEngine::NHWC); + 1 /* batch, N*/, + (size_t) frame_in->Info.Height /* Height */, + (size_t) frame_in->Info.Width /* Width */, + 3 /*Channels,*/, + }; +InferenceEngine::TensorDesc desc(InferenceEngine::Precision::U8, dims_src, InferenceEngine::NHWC); /* wrapping the surface data, as RGB is interleaved, need to pass only ptr to the R, notice that this wouldn’t work with planar formats as these are 3 separate planes/pointers*/ InferenceEngine::TBlob::Ptr p = InferenceEngine::make_shared_blob( desc, (uint8_t*) frame_in->Data.R); inferRequest.SetBlob("input", p); diff --git a/docs/snippets/dldt_optimization_guide4.cpp b/docs/snippets/dldt_optimization_guide4.cpp index 39a21ca..8ba7c8d 100644 --- a/docs/snippets/dldt_optimization_guide4.cpp +++ b/docs/snippets/dldt_optimization_guide4.cpp @@ -7,8 +7,8 @@ int main() { using namespace InferenceEngine; //! [part4] InferenceEngine::SizeVector dims_src = { - 1 /* batch, N*/, - 3 /*Channels,*/, + 1 /* batch, N*/, + 3 /*Channels,*/, (size_t) frame_in->Info.Height /* Height */, (size_t) frame_in->Info.Width /* Width */, }; diff --git a/docs/snippets/dldt_optimization_guide5.cpp b/docs/snippets/dldt_optimization_guide5.cpp index 6bb4137..3db0ee5 100644 --- a/docs/snippets/dldt_optimization_guide5.cpp +++ b/docs/snippets/dldt_optimization_guide5.cpp @@ -5,27 +5,28 @@ int main() { -using namespace InferenceEngine; +InferenceEngine::InferRequest inferRequest; //! [part5] -cv::Mat frame = ...; // regular CV_8UC3 image, interleaved +cv::Mat frame(cv::Size(100, 100), CV_8UC3); // regular CV_8UC3 image, interleaved // creating blob that wraps the OpenCV’s Mat // (the data it points should persists until the blob is released): InferenceEngine::SizeVector dims_src = { - 1 /* batch, N*/, - (size_t)frame.rows /* Height */, - (size_t)frame.cols /* Width */, - (size_t)frame.channels() /*Channels,*/, - }; -TensorDesc desc(InferenceEngine::Precision::U8, dims_src, InferenceEngine::NHWC); + 1 /* batch, N*/, + (size_t)frame.rows /* Height */, + (size_t)frame.cols /* Width */, + (size_t)frame.channels() /*Channels,*/, + }; +InferenceEngine::TensorDesc desc(InferenceEngine::Precision::U8, dims_src, InferenceEngine::NHWC); InferenceEngine::TBlob::Ptr p = InferenceEngine::make_shared_blob( desc, (uint8_t*)frame.data, frame.step[0] * frame.rows); inferRequest.SetBlob("input", p); inferRequest.Infer(); // … // similarly, you can wrap the output tensor (let’s assume it is FP32) // notice that the output should be also explicitly stated as NHWC with setLayout -const float* output_data = output_blob->buffer(). - as::value_type*>(); -cv::Mat res (rows, cols, CV_32FC3, output_data, CV_AUTOSTEP); +auto output_blob = inferRequest.GetBlob("output"); +const float* output_data = output_blob->buffer().as(); +auto dims = output_blob->getTensorDesc().getDims(); +cv::Mat res (dims[2], dims[3], CV_32FC3, (void *)output_data); //! [part5] return 0; diff --git a/docs/snippets/dldt_optimization_guide6.cpp b/docs/snippets/dldt_optimization_guide6.cpp index c3a93fb..4e83fcd 100644 --- a/docs/snippets/dldt_optimization_guide6.cpp +++ b/docs/snippets/dldt_optimization_guide6.cpp @@ -6,23 +6,21 @@ int main() { using namespace InferenceEngine; //! [part6] -Core ie; +InferenceEngine::Core ie; auto network = ie.ReadNetwork("Model.xml", "Model.bin"); InferenceEngine::InputsDataMap input_info(network.getInputsInfo()); - auto executable_network = ie.LoadNetwork(network, "GPU"); auto infer_request = executable_network.CreateInferRequest(); for (auto & item : input_info) { - std::string input_name = item.first; - auto input = infer_request.GetBlob(input_name); - /** Lock/Fill input tensor with data **/ - unsigned char* data = input->buffer().as::value_type*>(); -// ... + std::string input_name = item.first; + auto input = infer_request.GetBlob(input_name); + /** Lock/Fill input tensor with data **/ + unsigned char* data = input->buffer().as::value_type*>(); + // ... } - infer_request.Infer(); //! [part6] return 0; diff --git a/docs/snippets/dldt_optimization_guide7.cpp b/docs/snippets/dldt_optimization_guide7.cpp index 3c807cf..75ec3b5 100644 --- a/docs/snippets/dldt_optimization_guide7.cpp +++ b/docs/snippets/dldt_optimization_guide7.cpp @@ -4,14 +4,15 @@ int main() { -using namespace InferenceEngine; -Core plugin; -auto network0 = plugin.ReadNetwork("sample.xml"); -auto network1 = plugin.ReadNetwork("sample.xml"); +InferenceEngine::Core core; +auto network0 = core.ReadNetwork("sample.xml"); +auto network1 = core.ReadNetwork("sample.xml"); //! [part7] //these two networks go thru same plugin (aka device) and their requests will not overlap. -auto executable_network0 = plugin.LoadNetwork(network0, "CPU", {{PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS, PluginConfigParams::YES}}); -auto executable_network1 = plugin.LoadNetwork(network1, "GPU", {{PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS, PluginConfigParams::YES}}); +auto executable_network0 = core.LoadNetwork(network0, "CPU", + {{InferenceEngine::PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS, InferenceEngine::PluginConfigParams::YES}}); +auto executable_network1 = core.LoadNetwork(network1, "GPU", + {{InferenceEngine::PluginConfigParams::KEY_EXCLUSIVE_ASYNC_REQUESTS, InferenceEngine::PluginConfigParams::YES}}); //! [part7] return 0; } diff --git a/docs/snippets/dldt_optimization_guide8.cpp b/docs/snippets/dldt_optimization_guide8.cpp index 56bd8eb..6f015dd 100644 --- a/docs/snippets/dldt_optimization_guide8.cpp +++ b/docs/snippets/dldt_optimization_guide8.cpp @@ -4,13 +4,12 @@ int main() { -using namespace InferenceEngine; //! [part8] -while(…) { - capture frame - populate CURRENT InferRequest - Infer CURRENT InferRequest //this call is synchronous - display CURRENT result +while(true) { + // capture frame + // populate CURRENT InferRequest + // Infer CURRENT InferRequest //this call is synchronous + // display CURRENT result } //! [part8] return 0; diff --git a/docs/snippets/dldt_optimization_guide9.cpp b/docs/snippets/dldt_optimization_guide9.cpp index 6f4d9f3..fc78954 100644 --- a/docs/snippets/dldt_optimization_guide9.cpp +++ b/docs/snippets/dldt_optimization_guide9.cpp @@ -6,13 +6,13 @@ int main() { using namespace InferenceEngine; //! [part9] -while(…) { - capture frame - populate NEXT InferRequest - start NEXT InferRequest //this call is async and returns immediately - wait for the CURRENT InferRequest //processed in a dedicated thread - display CURRENT result - swap CURRENT and NEXT InferRequests +while(true) { + // capture frame + // populate NEXT InferRequest + // start NEXT InferRequest //this call is async and returns immediately + // wait for the CURRENT InferRequest //processed in a dedicated thread + // display CURRENT result + // swap CURRENT and NEXT InferRequests } //! [part9] return 0; diff --git a/docs/examples/example_async_infer_request.cpp b/docs/snippets/example_async_infer_request.cpp similarity index 93% rename from docs/examples/example_async_infer_request.cpp rename to docs/snippets/example_async_infer_request.cpp index b62ba6d..b8b2146 100644 --- a/docs/examples/example_async_infer_request.cpp +++ b/docs/snippets/example_async_infer_request.cpp @@ -10,13 +10,13 @@ using namespace InferenceEngine; class AcceleratorSyncRequest : public InferRequestInternal { public: - using Ptr = std::shared_ptr; + using Ptr = std::shared_ptr; - void Preprocess(); - void WriteToDevice(); - void RunOnDevice(); - void ReadFromDevice(); - void PostProcess(); + void Preprocess(); + void WriteToDevice(); + void RunOnDevice(); + void ReadFromDevice(); + void PostProcess(); }; // ! [async_infer_request:define_pipeline] diff --git a/docs/examples/example_itask_executor.cpp b/docs/snippets/example_itask_executor.cpp similarity index 100% rename from docs/examples/example_itask_executor.cpp rename to docs/snippets/example_itask_executor.cpp diff --git a/docs/examples/example_ngraph_utils.cpp b/docs/snippets/example_ngraph_utils.cpp similarity index 100% rename from docs/examples/example_ngraph_utils.cpp rename to docs/snippets/example_ngraph_utils.cpp diff --git a/docs/snippets/movidius-programming-guide.cpp b/docs/snippets/movidius-programming-guide.cpp index cba07ca..c021361 100644 --- a/docs/snippets/movidius-programming-guide.cpp +++ b/docs/snippets/movidius-programming-guide.cpp @@ -1,8 +1,8 @@ #include int main() { -using namespace InferenceEngine; -Core core; +InferenceEngine::Core core; +InferenceEngine::IInferRequest::CompletionCallback callback; int numRequests = 42; int i = 1; auto network = core.ReadNetwork("sample.xml"); @@ -30,7 +30,7 @@ request[i].inferRequest->StartAsync(); //! [part3] //! [part4] -request[i].inferRequest->SetCompletionCallback(InferenceEngine::IInferRequest::Ptr context); +request[i].inferRequest->SetCompletionCallback(callback); //! [part4] return 0; diff --git a/docs/snippets/nGraphTutorial.cpp b/docs/snippets/nGraphTutorial.cpp index c0cca51..7e5be57 100644 --- a/docs/snippets/nGraphTutorial.cpp +++ b/docs/snippets/nGraphTutorial.cpp @@ -4,7 +4,6 @@ int main() { -using namespace InferenceEngine; //! [part0] using namespace std; @@ -34,7 +33,7 @@ auto ng_function = make_shared(OutputVector{add1}, ParameterVector{arg //! [part0] //! [part1] -CNNNetwork net (ng_function); +InferenceEngine::CNNNetwork net (ng_function); //! [part1] return 0; diff --git a/docs/snippets/protecting_model_guide.cpp b/docs/snippets/protecting_model_guide.cpp index 71808c0..c340b3c 100644 --- a/docs/snippets/protecting_model_guide.cpp +++ b/docs/snippets/protecting_model_guide.cpp @@ -1,21 +1,32 @@ #include +#include +#include + +void decrypt_file(std::ifstream & stream, + const std::string & pass, + std::vector & result) { +} int main() { -using namespace InferenceEngine; //! [part0] std::vector model; std::vector weights; +std::string password; // taken from an user +std::ifstream model_file("model.xml"), weights_file("model.bin"); + // Read model files and decrypt them into temporary memory block decrypt_file(model_file, password, model); decrypt_file(weights_file, password, weights); //! [part0] //! [part1] -Core core; +InferenceEngine::Core core; // Load model from temporary memory block std::string strModel(model.begin(), model.end()); -CNNNetwork network = core.ReadNetwork(strModel, make_shared_blob({Precision::U8, {weights.size()}, C}, weights.data())); +InferenceEngine::CNNNetwork network = core.ReadNetwork(strModel, + InferenceEngine::make_shared_blob({InferenceEngine::Precision::U8, + {weights.size()}, InferenceEngine::C}, weights.data())); //! [part1] return 0; -- 2.7.4