v4.15.0 (#10)
authorHailoRT-Automation <98901220+HailoRT-Automation@users.noreply.github.com>
Thu, 5 Oct 2023 13:21:52 +0000 (16:21 +0300)
committerGitHub <noreply@github.com>
Thu, 5 Oct 2023 13:21:52 +0000 (16:21 +0300)
Co-authored-by: HailoRT-Automation <contact@hailo.ai>
330 files changed:
README.md
common/include/context_switch_defs.h
common/include/control_protocol.h
common/include/d2h_events.h
common/include/firmware_header.h
common/include/firmware_status.h
common/src/firmware_header_utils.c
hailort/.gitignore [new file with mode: 0644]
hailort/CMakeLists.txt
hailort/LICENSE-3RD-PARTY.md
hailort/cmake/execute_cmake.cmake
hailort/cmake/external/benchmark.cmake [new file with mode: 0644]
hailort/cmake/external/catch2.cmake [new file with mode: 0644]
hailort/cmake/external/dotwriter.cmake [new file with mode: 0644]
hailort/cmake/external/json.cmake [new file with mode: 0644]
hailort/cmake/external/pevents.cmake [new file with mode: 0644]
hailort/cmake/external/pybind11.cmake [new file with mode: 0644]
hailort/cmake/external/readerwriterqueue.cmake [new file with mode: 0644]
hailort/cmake/external/spdlog.cmake [new file with mode: 0644]
hailort/common/CMakeLists.txt
hailort/common/circular_buffer.hpp
hailort/common/event_internal.cpp
hailort/common/event_internal.hpp
hailort/common/fork_support.cpp [new file with mode: 0644]
hailort/common/fork_support.hpp [new file with mode: 0644]
hailort/common/utils.hpp
hailort/drivers/common/hailo_ioctl_common.h
hailort/hailort_service/CMakeLists.txt
hailort/hailort_service/hailort_rpc_service.cpp
hailort/hailort_service/hailort_rpc_service.hpp
hailort/hailort_service/hailort_service
hailort/hailort_service/service_resource_manager.hpp
hailort/hailort_service/unix/hailort_service.cpp
hailort/hailort_service/windows/hailort_service.cpp
hailort/hailortcli/CMakeLists.txt
hailort/hailortcli/download_action_list_command.cpp
hailort/hailortcli/download_action_list_command.hpp
hailort/hailortcli/fw_control_command.cpp
hailort/hailortcli/fw_control_command.hpp
hailort/hailortcli/hailortcli.hpp
hailort/hailortcli/inference_progress.cpp
hailort/hailortcli/mon_command.cpp
hailort/hailortcli/mon_command.hpp
hailort/hailortcli/run2/io_wrappers.hpp
hailort/hailortcli/run2/live_stats.cpp
hailort/hailortcli/run2/live_stats.hpp
hailort/hailortcli/run2/network_live_track.cpp
hailort/hailortcli/run2/network_live_track.hpp
hailort/hailortcli/run2/network_runner.cpp
hailort/hailortcli/run2/network_runner.hpp
hailort/hailortcli/run2/run2_command.cpp
hailort/hailortcli/run2/timer_live_track.cpp
hailort/hailortcli/run2/timer_live_track.hpp
hailort/hailortcli/run_command.cpp
hailort/libhailort/CMakeLists.txt
hailort/libhailort/bindings/gstreamer/CMakeLists.txt
hailort/libhailort/bindings/gstreamer/gst-hailo/gsthailonet.cpp
hailort/libhailort/bindings/gstreamer/gst-hailo/gsthailonet.hpp
hailort/libhailort/bindings/gstreamer/gst-hailo/gsthailosend.cpp
hailort/libhailort/bindings/gstreamer/gst-hailo/gsthailosend.hpp
hailort/libhailort/bindings/gstreamer/gst-hailo/network_group_handle.cpp
hailort/libhailort/bindings/gstreamer/gst-hailo/network_group_handle.hpp
hailort/libhailort/bindings/python/CMakeLists.txt
hailort/libhailort/bindings/python/externals/pybind11.cmake [deleted file]
hailort/libhailort/bindings/python/platform/hailo_platform/pyhailort/hw_object.py
hailort/libhailort/bindings/python/platform/hailo_platform/pyhailort/pyhailort.py
hailort/libhailort/bindings/python/platform/hailo_tutorials/notebooks/HRT_0_Inference_Tutorial.ipynb
hailort/libhailort/bindings/python/platform/hailo_tutorials/notebooks/HRT_1_Power_Measurement_Tutorial.ipynb
hailort/libhailort/bindings/python/platform/hailo_tutorials/notebooks/HRT_2_Inference_Tutorial_Multi_Process_Service.ipynb
hailort/libhailort/bindings/python/platform/setup.py
hailort/libhailort/bindings/python/src/CMakeLists.txt
hailort/libhailort/bindings/python/src/bindings_common.hpp
hailort/libhailort/bindings/python/src/device_api.cpp
hailort/libhailort/bindings/python/src/device_api.hpp
hailort/libhailort/bindings/python/src/hef_api.cpp
hailort/libhailort/bindings/python/src/hef_api.hpp
hailort/libhailort/bindings/python/src/internal/CMakeLists.txt
hailort/libhailort/bindings/python/src/internal/pyhailort_internal.cpp
hailort/libhailort/bindings/python/src/internal/pyhailort_internal.hpp
hailort/libhailort/bindings/python/src/network_group_api.cpp [new file with mode: 0644]
hailort/libhailort/bindings/python/src/network_group_api.hpp [new file with mode: 0644]
hailort/libhailort/bindings/python/src/pyhailort.cpp
hailort/libhailort/bindings/python/src/quantization_api.cpp
hailort/libhailort/bindings/python/src/quantization_api.hpp
hailort/libhailort/bindings/python/src/vdevice_api.hpp
hailort/libhailort/bindings/python/src/vstream_api.cpp
hailort/libhailort/bindings/python/src/vstream_api.hpp
hailort/libhailort/cmake/toolchains/toolchains.yaml
hailort/libhailort/examples/README.md
hailort/libhailort/examples/c/data_quantization_example/CMakeLists.txt
hailort/libhailort/examples/c/infer_pipeline_example/CMakeLists.txt
hailort/libhailort/examples/c/multi_device_example/CMakeLists.txt
hailort/libhailort/examples/c/multi_network_vstream_example/CMakeLists.txt
hailort/libhailort/examples/c/notification_callback_example/CMakeLists.txt
hailort/libhailort/examples/c/power_measurement_example/CMakeLists.txt
hailort/libhailort/examples/c/raw_async_streams_single_thread_example/CMakeLists.txt
hailort/libhailort/examples/c/raw_streams_example/CMakeLists.txt
hailort/libhailort/examples/c/raw_streams_example/raw_streams_example.c
hailort/libhailort/examples/c/switch_network_groups_example/CMakeLists.txt
hailort/libhailort/examples/c/switch_network_groups_example/switch_network_groups_example.c
hailort/libhailort/examples/c/switch_network_groups_manually_example/CMakeLists.txt
hailort/libhailort/examples/c/vstreams_example/CMakeLists.txt
hailort/libhailort/examples/cpp/CMakeLists.txt
hailort/libhailort/examples/cpp/infer_pipeline_example/CMakeLists.txt
hailort/libhailort/examples/cpp/multi_device_example/CMakeLists.txt
hailort/libhailort/examples/cpp/multi_network_vstream_example/CMakeLists.txt
hailort/libhailort/examples/cpp/multi_process_example/CMakeLists.txt
hailort/libhailort/examples/cpp/multi_process_example/multi_process_example.ps1 [new file with mode: 0644]
hailort/libhailort/examples/cpp/notification_callback_example/CMakeLists.txt
hailort/libhailort/examples/cpp/power_measurement_example/CMakeLists.txt
hailort/libhailort/examples/cpp/raw_async_streams_multi_thread_example/CMakeLists.txt
hailort/libhailort/examples/cpp/raw_async_streams_single_thread_example/CMakeLists.txt
hailort/libhailort/examples/cpp/raw_streams_example/CMakeLists.txt
hailort/libhailort/examples/cpp/switch_network_groups_example/CMakeLists.txt
hailort/libhailort/examples/cpp/switch_network_groups_example/switch_network_groups_example.cpp
hailort/libhailort/examples/cpp/switch_network_groups_manually_example/CMakeLists.txt
hailort/libhailort/examples/cpp/vstreams_example/CMakeLists.txt
hailort/libhailort/hef.proto
hailort/libhailort/include/hailo/buffer.hpp
hailort/libhailort/include/hailo/device.hpp
hailort/libhailort/include/hailo/event.hpp
hailort/libhailort/include/hailo/hailort.h
hailort/libhailort/include/hailo/hailort.hpp
hailort/libhailort/include/hailo/hailort_common.hpp
hailort/libhailort/include/hailo/hef.hpp
hailort/libhailort/include/hailo/infer_model.hpp [new file with mode: 0644]
hailort/libhailort/include/hailo/inference_pipeline.hpp
hailort/libhailort/include/hailo/network_group.hpp
hailort/libhailort/include/hailo/quantization.hpp
hailort/libhailort/include/hailo/stream.hpp
hailort/libhailort/include/hailo/transform.hpp
hailort/libhailort/include/hailo/vdevice.hpp
hailort/libhailort/include/hailo/vstream.hpp
hailort/libhailort/scheduler_mon.proto
hailort/libhailort/src/CMakeLists.txt
hailort/libhailort/src/core_op/active_core_op_holder.hpp
hailort/libhailort/src/core_op/core_op.cpp
hailort/libhailort/src/core_op/core_op.hpp
hailort/libhailort/src/core_op/resource_manager/channel_allocator.hpp
hailort/libhailort/src/core_op/resource_manager/config_buffer.cpp
hailort/libhailort/src/core_op/resource_manager/intermediate_buffer.cpp
hailort/libhailort/src/core_op/resource_manager/intermediate_buffer.hpp
hailort/libhailort/src/core_op/resource_manager/resource_manager.cpp
hailort/libhailort/src/core_op/resource_manager/resource_manager.hpp
hailort/libhailort/src/core_op/resource_manager/resource_manager_builder.cpp
hailort/libhailort/src/device_common/CMakeLists.txt
hailort/libhailort/src/device_common/control.cpp
hailort/libhailort/src/device_common/control.hpp
hailort/libhailort/src/device_common/control_protocol.cpp
hailort/libhailort/src/device_common/control_protocol.hpp
hailort/libhailort/src/device_common/d2h_event_queue.cpp [new file with mode: 0644]
hailort/libhailort/src/device_common/d2h_event_queue.hpp
hailort/libhailort/src/device_common/d2h_events_parser.cpp
hailort/libhailort/src/device_common/device_internal.cpp
hailort/libhailort/src/eth/CMakeLists.txt
hailort/libhailort/src/eth/eth_device.cpp
hailort/libhailort/src/eth/eth_stream.cpp
hailort/libhailort/src/eth/eth_stream.hpp
hailort/libhailort/src/eth/hcp_config_activated_core_op.cpp [deleted file]
hailort/libhailort/src/eth/hcp_config_activated_core_op.hpp [deleted file]
hailort/libhailort/src/eth/hcp_config_core_op.cpp
hailort/libhailort/src/eth/hcp_config_core_op.hpp
hailort/libhailort/src/eth/network_rate_calculator.cpp
hailort/libhailort/src/hailort.cpp
hailort/libhailort/src/hailort_defaults.cpp
hailort/libhailort/src/hef/context_switch_actions.cpp
hailort/libhailort/src/hef/context_switch_actions.hpp
hailort/libhailort/src/hef/core_op_metadata.cpp
hailort/libhailort/src/hef/core_op_metadata.hpp
hailort/libhailort/src/hef/hef.cpp
hailort/libhailort/src/hef/hef_internal.hpp
hailort/libhailort/src/hef/layer_info.hpp
hailort/libhailort/src/hw_consts.hpp
hailort/libhailort/src/mipi/mipi_stream.cpp
hailort/libhailort/src/mipi/mipi_stream.hpp
hailort/libhailort/src/net_flow/CMakeLists.txt
hailort/libhailort/src/net_flow/ops/argmax_post_process.cpp
hailort/libhailort/src/net_flow/ops/argmax_post_process.hpp
hailort/libhailort/src/net_flow/ops/nms_post_process.cpp
hailort/libhailort/src/net_flow/ops/nms_post_process.hpp
hailort/libhailort/src/net_flow/ops/op.hpp
hailort/libhailort/src/net_flow/ops/op_metadata.hpp [new file with mode: 0644]
hailort/libhailort/src/net_flow/ops/softmax_post_process.cpp
hailort/libhailort/src/net_flow/ops/softmax_post_process.hpp
hailort/libhailort/src/net_flow/ops/ssd_post_process.cpp
hailort/libhailort/src/net_flow/ops/ssd_post_process.hpp
hailort/libhailort/src/net_flow/ops/stb_image_resize.h [new file with mode: 0644]
hailort/libhailort/src/net_flow/ops/yolo_post_process.cpp [deleted file]
hailort/libhailort/src/net_flow/ops/yolo_post_process.hpp [deleted file]
hailort/libhailort/src/net_flow/ops/yolov5_post_process.cpp [new file with mode: 0644]
hailort/libhailort/src/net_flow/ops/yolov5_post_process.hpp [new file with mode: 0644]
hailort/libhailort/src/net_flow/ops/yolov5_seg_post_process.cpp [new file with mode: 0644]
hailort/libhailort/src/net_flow/ops/yolov5_seg_post_process.hpp [new file with mode: 0644]
hailort/libhailort/src/net_flow/ops/yolox_post_process.cpp
hailort/libhailort/src/net_flow/ops/yolox_post_process.hpp
hailort/libhailort/src/net_flow/pipeline/async_infer_runner.cpp [new file with mode: 0644]
hailort/libhailort/src/net_flow/pipeline/async_infer_runner_internal.hpp [new file with mode: 0644]
hailort/libhailort/src/net_flow/pipeline/infer_model.cpp [new file with mode: 0644]
hailort/libhailort/src/net_flow/pipeline/infer_model_internal.hpp [new file with mode: 0644]
hailort/libhailort/src/net_flow/pipeline/inference_pipeline.cpp
hailort/libhailort/src/net_flow/pipeline/pipeline.cpp
hailort/libhailort/src/net_flow/pipeline/pipeline.hpp
hailort/libhailort/src/net_flow/pipeline/vstream.cpp
hailort/libhailort/src/net_flow/pipeline/vstream_internal.hpp
hailort/libhailort/src/network_group/network_group.cpp
hailort/libhailort/src/network_group/network_group_internal.hpp
hailort/libhailort/src/os/hailort_driver.hpp
hailort/libhailort/src/os/posix/hailort_driver.cpp
hailort/libhailort/src/os/posix/linux/event.cpp
hailort/libhailort/src/os/posix/qnx/event.cpp
hailort/libhailort/src/os/windows/event.cpp
hailort/libhailort/src/os/windows/hailort_driver.cpp
hailort/libhailort/src/service/hailort_rpc_client.cpp
hailort/libhailort/src/service/hailort_rpc_client.hpp
hailort/libhailort/src/service/network_group_client.cpp
hailort/libhailort/src/service/rpc_client_utils.hpp
hailort/libhailort/src/stream_common/CMakeLists.txt
hailort/libhailort/src/stream_common/async_common.hpp [deleted file]
hailort/libhailort/src/stream_common/async_stream_base.cpp [new file with mode: 0644]
hailort/libhailort/src/stream_common/async_stream_base.hpp [new file with mode: 0644]
hailort/libhailort/src/stream_common/nms_stream.cpp [new file with mode: 0644]
hailort/libhailort/src/stream_common/nms_stream.hpp [new file with mode: 0644]
hailort/libhailort/src/stream_common/nms_stream_reader.cpp [deleted file]
hailort/libhailort/src/stream_common/nms_stream_reader.hpp [deleted file]
hailort/libhailort/src/stream_common/queued_stream_buffer_pool.cpp [new file with mode: 0644]
hailort/libhailort/src/stream_common/queued_stream_buffer_pool.hpp [new file with mode: 0644]
hailort/libhailort/src/stream_common/remote_process_stream.cpp [new file with mode: 0644]
hailort/libhailort/src/stream_common/remote_process_stream.hpp [new file with mode: 0644]
hailort/libhailort/src/stream_common/stream.cpp
hailort/libhailort/src/stream_common/stream_buffer_pool.hpp [new file with mode: 0644]
hailort/libhailort/src/stream_common/stream_internal.cpp
hailort/libhailort/src/stream_common/stream_internal.hpp
hailort/libhailort/src/stream_common/transfer_common.cpp [new file with mode: 0644]
hailort/libhailort/src/stream_common/transfer_common.hpp [new file with mode: 0644]
hailort/libhailort/src/transform/transform.cpp
hailort/libhailort/src/transform/transform_internal.hpp
hailort/libhailort/src/utils/buffer.cpp
hailort/libhailort/src/utils/hailort_common.cpp
hailort/libhailort/src/utils/hailort_logger.cpp
hailort/libhailort/src/utils/hailort_logger.hpp
hailort/libhailort/src/utils/profiler/handler.hpp
hailort/libhailort/src/utils/profiler/monitor_handler.cpp
hailort/libhailort/src/utils/profiler/monitor_handler.hpp
hailort/libhailort/src/utils/profiler/profiler_utils.hpp [new file with mode: 0644]
hailort/libhailort/src/utils/profiler/scheduler_profiler_handler.cpp
hailort/libhailort/src/utils/profiler/scheduler_profiler_handler.hpp
hailort/libhailort/src/utils/profiler/tracer.cpp
hailort/libhailort/src/utils/shared_resource_manager.hpp
hailort/libhailort/src/utils/thread_safe_map.hpp
hailort/libhailort/src/utils/thread_safe_queue.hpp
hailort/libhailort/src/vdevice/CMakeLists.txt
hailort/libhailort/src/vdevice/callback_reorder_queue.hpp
hailort/libhailort/src/vdevice/scheduler/multi_device_scheduled_stream.cpp [deleted file]
hailort/libhailort/src/vdevice/scheduler/multi_device_scheduled_stream.hpp [deleted file]
hailort/libhailort/src/vdevice/scheduler/scheduled_core_op_cv.hpp [deleted file]
hailort/libhailort/src/vdevice/scheduler/scheduled_core_op_state.cpp
hailort/libhailort/src/vdevice/scheduler/scheduled_core_op_state.hpp
hailort/libhailort/src/vdevice/scheduler/scheduled_stream.cpp
hailort/libhailort/src/vdevice/scheduler/scheduled_stream.hpp
hailort/libhailort/src/vdevice/scheduler/scheduler.cpp
hailort/libhailort/src/vdevice/scheduler/scheduler.hpp
hailort/libhailort/src/vdevice/scheduler/scheduler_base.hpp
hailort/libhailort/src/vdevice/scheduler/scheduler_counter.hpp [new file with mode: 0644]
hailort/libhailort/src/vdevice/scheduler/scheduler_oracle.cpp
hailort/libhailort/src/vdevice/scheduler/scheduler_oracle.hpp
hailort/libhailort/src/vdevice/vdevice.cpp
hailort/libhailort/src/vdevice/vdevice_core_op.cpp
hailort/libhailort/src/vdevice/vdevice_core_op.hpp
hailort/libhailort/src/vdevice/vdevice_internal.hpp
hailort/libhailort/src/vdevice/vdevice_native_stream.cpp
hailort/libhailort/src/vdevice/vdevice_native_stream.hpp
hailort/libhailort/src/vdevice/vdevice_stream.cpp [deleted file]
hailort/libhailort/src/vdevice/vdevice_stream.hpp [deleted file]
hailort/libhailort/src/vdevice/vdevice_stream_multiplexer_wrapper.cpp
hailort/libhailort/src/vdevice/vdevice_stream_multiplexer_wrapper.hpp
hailort/libhailort/src/vdma/CMakeLists.txt
hailort/libhailort/src/vdma/channel/async_channel.cpp [deleted file]
hailort/libhailort/src/vdma/channel/async_channel.hpp [deleted file]
hailort/libhailort/src/vdma/channel/boundary_channel.cpp
hailort/libhailort/src/vdma/channel/boundary_channel.hpp
hailort/libhailort/src/vdma/channel/buffered_channel.cpp [deleted file]
hailort/libhailort/src/vdma/channel/buffered_channel.hpp [deleted file]
hailort/libhailort/src/vdma/channel/channel_base.cpp [deleted file]
hailort/libhailort/src/vdma/channel/channel_base.hpp [deleted file]
hailort/libhailort/src/vdma/channel/channel_state.cpp [deleted file]
hailort/libhailort/src/vdma/channel/channel_state.hpp [deleted file]
hailort/libhailort/src/vdma/channel/vdma_channel_regs.hpp
hailort/libhailort/src/vdma/circular_stream_buffer_pool.cpp [new file with mode: 0644]
hailort/libhailort/src/vdma/circular_stream_buffer_pool.hpp [new file with mode: 0644]
hailort/libhailort/src/vdma/integrated/integrated_device.cpp
hailort/libhailort/src/vdma/integrated/integrated_device.hpp
hailort/libhailort/src/vdma/memory/buffer_requirements.cpp
hailort/libhailort/src/vdma/memory/buffer_requirements.hpp
hailort/libhailort/src/vdma/memory/continuous_buffer.cpp
hailort/libhailort/src/vdma/memory/continuous_buffer.hpp
hailort/libhailort/src/vdma/memory/descriptor_list.cpp
hailort/libhailort/src/vdma/memory/descriptor_list.hpp
hailort/libhailort/src/vdma/memory/mapped_buffer.cpp
hailort/libhailort/src/vdma/memory/mapped_buffer.hpp
hailort/libhailort/src/vdma/memory/sg_buffer.cpp
hailort/libhailort/src/vdma/memory/sg_buffer.hpp
hailort/libhailort/src/vdma/memory/vdma_buffer.hpp
hailort/libhailort/src/vdma/pcie/pcie_device.cpp
hailort/libhailort/src/vdma/pcie/pcie_device.hpp
hailort/libhailort/src/vdma/vdma_async_stream.cpp [deleted file]
hailort/libhailort/src/vdma/vdma_async_stream.hpp [deleted file]
hailort/libhailort/src/vdma/vdma_config_activated_core_op.cpp
hailort/libhailort/src/vdma/vdma_config_activated_core_op.hpp [deleted file]
hailort/libhailort/src/vdma/vdma_config_core_op.cpp
hailort/libhailort/src/vdma/vdma_config_core_op.hpp
hailort/libhailort/src/vdma/vdma_config_manager.cpp
hailort/libhailort/src/vdma/vdma_config_manager.hpp
hailort/libhailort/src/vdma/vdma_device.cpp
hailort/libhailort/src/vdma/vdma_device.hpp
hailort/libhailort/src/vdma/vdma_stream.cpp
hailort/libhailort/src/vdma/vdma_stream.hpp
hailort/libhailort/src/vdma/vdma_stream_base.cpp [deleted file]
hailort/libhailort/src/vdma/vdma_stream_base.hpp [deleted file]
hailort/libhailort/tracer_profiler.proto [new file with mode: 0644]
hailort/pre_build/external/CMakeLists.txt
hailort/rpc/hailort_rpc.proto
hailort/rpc/rpc_definitions.hpp
hailort/scripts/download_firmware_eth.cmd
hailort/scripts/download_firmware_eth.sh
hailort/scripts/download_hefs.cmd
hailort/scripts/download_hefs.sh
hailort/tools/hailo15-scripts/load_driver.sh
hailort/tools/hailo15-scripts/read_log.sh
hailort/tools/hw_debug/CMakeLists.txt
hailort/tools/hw_debug/main.cpp

index ecfe8e0ee49c3418549c92ccc0652bbdbf374f27..2bfe283c26d1bb758c1a287adfdeb4393f6cafcb 100644 (file)
--- a/README.md
+++ b/README.md
@@ -40,13 +40,12 @@ HailoRT uses 2 licenses:
 
 Contact information and support is available at [**hailo.ai**](https://hailo.ai/contact-us/).
 
-## About Hailo-8â„¢
+## About Hailo
 
-Hailo-8 is a deep learning processor for edge devices. The Hailo-8 provides groundbreaking efficiency for neural network deployment.
-The Hailo-8 edge AI processor, featuring up to 26 Tera-Operations-Per-Second (TOPS), significantly outperforms all other edge processors.
-Hailo-8 is available in various form-factors, including the Hailo-8 M.2 Module.
+Hailo offers breakthrough AI Inference Accelerators and AI Vision Processors uniquely designed to accelerate embedded deep learning applications on edge devices.
 
-The Hailo-8 AI processor is designed to fit into a multitude of smart machines and devices, for a wide variety of sectors including Automotive, Smart Cities, Industry 4.0,
-Retail and Smart Homes.
+The Hailo AI Inference Accelerators allow edge devices to run deep learning applications at full scale more efficiently, effectively, and sustainably, with an architecture that takes advantage of the core properties of neural networks.
 
-For more information, please visit [**hailo.ai**](https://hailo.ai/).
+The Hailo AI Vision Processors (SoC) combine Hailo's patented and field proven AI inferencing capabilities with advanced computer vision engines, generating premium image quality and advanced video analytics.
+
+For more information, please visit [**hailo.ai**](https://hailo.ai/).
\ No newline at end of file
index cf4858fc2dcdca564420136ee51c223a6c8aa408..7a88bd6c41b0e9ed510afee6a6e158e195758c1b 100644 (file)
@@ -66,6 +66,7 @@ typedef struct {
     uint16_t feature_padding_payload;
     uint16_t buffer_padding_payload;
     uint16_t buffer_padding;
+    bool is_periph_calculated_in_hailort;
 } CONTEXT_SWITCH_DEFS__stream_reg_info_t;
 
 #if defined(_MSC_VER)
@@ -103,12 +104,16 @@ typedef enum __attribute__((packed)) {
     CONTEXT_SWITCH_DEFS__ACTION_TYPE_FETCH_CCW_BURSTS,
     CONTEXT_SWITCH_DEFS__ACTION_TYPE_VALIDATE_VDMA_CHANNEL,
     CONTEXT_SWITCH_DEFS__ACTION_TYPE_BURST_CREDITS_TASK_START,
+    CONTEXT_SWITCH_DEFS__ACTION_TYPE_BURST_CREDITS_TASK_RESET,
     CONTEXT_SWITCH_DEFS__ACTION_TYPE_DDR_BUFFERING_RESET,
     CONTEXT_SWITCH_DEFS__ACTION_TYPE_OPEN_BOUNDARY_INPUT_CHANNEL,
     CONTEXT_SWITCH_DEFS__ACTION_TYPE_OPEN_BOUNDARY_OUTPUT_CHANNEL,
     CONTEXT_SWITCH_DEFS__ACTION_TYPE_ENABLE_NMS,
     CONTEXT_SWITCH_DEFS__ACTION_TYPE_WRITE_DATA_BY_TYPE,
     CONTEXT_SWITCH_DEFS__ACTION_TYPE_SWITCH_LCU_BATCH,
+    CONTEXT_SWITCH_DEFS__ACTION_TYPE_CHANGE_BOUNDARY_INPUT_BATCH,
+    CONTEXT_SWITCH_DEFS__ACTION_TYPE_PAUSE_VDMA_CHANNEL,
+    CONTEXT_SWITCH_DEFS__ACTION_TYPE_RESUME_VDMA_CHANNEL,
 
     /* Must be last */
     CONTEXT_SWITCH_DEFS__ACTION_TYPE_COUNT
@@ -214,7 +219,7 @@ typedef struct {
 typedef struct {
     uint8_t packed_vdma_channel_id;
     uint8_t edge_layer_direction;
-    bool is_inter_context;
+    bool check_host_empty_num_available;
     uint8_t host_buffer_type;  // CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t
     uint32_t initial_credit_size;
 } CONTEXT_SWITCH_DEFS__deactivate_vdma_channel_action_data_t;
@@ -222,11 +227,21 @@ typedef struct {
 typedef struct {
     uint8_t packed_vdma_channel_id;
     uint8_t edge_layer_direction;
-    bool is_inter_context;
+    bool check_host_empty_num_available;
     uint8_t host_buffer_type;  // CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t
     uint32_t initial_credit_size;
 } CONTEXT_SWITCH_DEFS__validate_vdma_channel_action_data_t;
 
+typedef struct {
+    uint8_t packed_vdma_channel_id;
+    uint8_t edge_layer_direction;
+} CONTEXT_SWITCH_DEFS__pause_vdma_channel_action_data_t;
+
+typedef struct {
+    uint8_t packed_vdma_channel_id;
+    uint8_t edge_layer_direction;
+} CONTEXT_SWITCH_DEFS__resume_vdma_channel_action_data_t;
+
 typedef enum {
     CONTEXT_SWITCH_DEFS__CREDIT_TYPE_UNINITIALIZED = 0,
     CONTEXT_SWITCH_DEFS__CREDIT_IN_BYTES,
@@ -239,7 +254,6 @@ typedef struct {
     uint8_t network_index;
     uint32_t frame_periph_size;
     uint8_t credit_type;  // CONTEXT_SWITCH_DEFS__CREDIT_TYPE_t
-    uint16_t periph_bytes_per_buffer;
     uint8_t host_buffer_type;  // CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t, relevant only for descriptors credit.
 } CONTEXT_SWITCH_DEFS__fetch_data_action_data_t;
 
@@ -264,6 +278,10 @@ typedef struct {
 
 typedef struct {
     uint8_t packed_vdma_channel_id;
+    uint8_t stream_index;
+    uint8_t network_index;
+    bool is_inter_context;
+    uint8_t host_buffer_type;  // CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t
 } CONTEXT_SWITCH_DEFS__vdma_dataflow_interrupt_data_t;
 
 typedef struct {
@@ -283,6 +301,7 @@ typedef struct {
     uint8_t packed_vdma_channel_id;
     uint8_t stream_index;
     bool is_inter_context;
+    uint8_t host_buffer_type;  // CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t
 } CONTEXT_SWITCH_DEFS__wait_dma_idle_data_t;
 
 typedef struct {
@@ -319,6 +338,7 @@ typedef struct {
 typedef struct {
     uint8_t packed_vdma_channel_id;
     uint8_t stream_index;
+    uint8_t network_index;
     CONTEXT_SWITCH_DEFS__stream_reg_info_t stream_reg_info;
     CONTROL_PROTOCOL__host_buffer_info_t host_buffer_info;
 } CONTEXT_SWITCH_DEFS__activate_boundary_output_data_t;
@@ -342,6 +362,10 @@ typedef struct {
 typedef struct {
     uint8_t packed_vdma_channel_id;
     CONTROL_PROTOCOL__host_buffer_info_t host_buffer_info;
+    uint8_t stream_index;
+    uint8_t network_index;
+    uint16_t periph_bytes_per_buffer;
+    uint32_t frame_periph_size;
 } CONTEXT_SWITCH_DEFS__open_boundary_input_channel_data_t;
 
 typedef struct {
@@ -365,6 +389,7 @@ typedef struct {
     uint8_t network_index;
     uint16_t number_of_classes;
     uint16_t burst_size;
+    uint8_t division_factor;
 } CONTEXT_SWITCH_DEFS__enable_nms_action_t;
 
 typedef enum {
@@ -390,6 +415,10 @@ typedef struct {
     uint32_t kernel_done_count;
 } CONTEXT_SWITCH_DEFS__switch_lcu_batch_action_data_t;
 
+typedef struct {
+    uint8_t packed_vdma_channel_id;
+} CONTEXT_SWITCH_DEFS__change_boundary_input_batch_t;
+
 #pragma pack(pop)
 
 #ifdef __cplusplus
index 73d31d5fde56596ceabcfe0d9f88a3acf18606bc..28ad41c41ef49fc0c300f7f9933ba88b37633a46 100644 (file)
@@ -158,6 +158,7 @@ extern "C" {
     CONTROL_PROTOCOL__OPCODE_X(HAILO_CONTROL_OPCODE_GET_HW_CONSTS,                             false, CPU_ID_CORE_CPU)\
     CONTROL_PROTOCOL__OPCODE_X(HAILO_CONTROL_OPCODE_SET_SLEEP_STATE,                           false, CPU_ID_APP_CPU)\
     CONTROL_PROTOCOL__OPCODE_X(HAILO_CONTROL_OPCODE_CHANGE_HW_INFER_STATUS,                    false, CPU_ID_CORE_CPU)\
+    CONTROL_PROTOCOL__OPCODE_X(HAILO_CONTROL_OPCODE_SIGNAL_DRIVER_DOWN,                        false, CPU_ID_CORE_CPU)\
 
 typedef enum {
 #define CONTROL_PROTOCOL__OPCODE_X(name, is_critical, cpu_id) name,
@@ -344,7 +345,8 @@ typedef enum {
     CONTROL_PROTOCOL__HAILO8_A0 = 0,
     CONTROL_PROTOCOL__HAILO8,
     CONTROL_PROTOCOL__HAILO8L,
-    CONTROL_PROTOCOL__HAILO15,
+    CONTROL_PROTOCOL__HAILO15H,
+    CONTROL_PROTOCOL__PLUTO,
     /* Must be last!! */
     CONTROL_PROTOCOL__DEVICE_ARCHITECTURE_COUNT
 } CONTROL_PROTOCOL__device_architecture_t;
@@ -439,6 +441,7 @@ typedef struct {
     uint16_t feature_padding_payload;
     uint16_t buffer_padding_payload;
     uint16_t buffer_padding;
+    bool is_periph_calculated_in_hailort;
 } CONTROL_PROTOCOL__nn_stream_config_t;
 
 typedef struct {
@@ -1027,8 +1030,6 @@ typedef struct {
     uint16_t dynamic_batch_size;
     uint32_t batch_count_length;
     uint16_t batch_count;
-    uint32_t keep_nn_config_during_reset_length;
-    uint8_t keep_nn_config_during_reset;
 } CONTROL_PROTOCOL__change_context_switch_status_request_t;
 
 typedef struct {
index b9009ef4d886ca4035733e83c132bed8e30cbf1b..1e402d6c6bdf732a910707805010b43b488ca100 100644 (file)
@@ -58,6 +58,7 @@ typedef enum {
     CONTEXT_SWITCH_BREAKPOINT_REACHED,
     HEALTH_MONITOR_CLOCK_CHANGED_EVENT_ID,
     HW_INFER_MANAGER_INFER_DONE,
+    CONTEXT_SWITCH_RUN_TIME_ERROR,
 
     D2H_EVENT_ID_COUNT /* Must be last*/
 } D2H_EVENT_ID_t;
@@ -146,6 +147,16 @@ typedef struct {
 
 #define D2H_EVENT_HW_INFER_MANAGER_INFER_DONE_PARAMETER_COUNT  (1)
 
+typedef struct {
+    uint32_t exit_status;
+    uint8_t application_index;
+    uint16_t batch_index;
+    uint8_t context_index;
+    uint16_t action_index;
+} D2H_EVENT_context_switch_run_time_error_event_message_t;
+
+#define D2H_EVENT_CONTEXT_SWITCH_RUN_TIME_ERROR_EVENT_PARAMETER_COUNT  (5)
+
 /* D2H_EVENT__message_parameters_t should be in the same order as hailo_notification_message_parameters_t */
 typedef union {
    D2H_EVENT_rx_error_event_message_t rx_error_event;
@@ -158,6 +169,7 @@ typedef union {
    D2H_EVENT_context_switch_breakpoint_reached_event_massage_t context_switch_breakpoint_reached_event;
    D2H_EVENT_health_monitor_clock_changed_event_message_t health_monitor_clock_changed_event;
    D2H_EVENT_hw_infer_mamager_infer_done_message_t hw_infer_manager_infer_done_event;
+   D2H_EVENT_context_switch_run_time_error_event_message_t context_switch_run_time_error_event;
 } D2H_EVENT__message_parameters_t;
 
 typedef struct {
index bd0d1c494b159a711502ef7c0e74976554435c7d..baed9a3c9c5034f8e726e8621fa06f2d79136615 100644 (file)
@@ -19,6 +19,8 @@ extern "C" {
 
 #define FIRMWARE_HEADER_MAGIC_HAILO8 (0x1DD89DE0)
 #define FIRMWARE_HEADER_MAGIC_HAILO15 (0xE905DAAB)
+// TODO - HRT-11344 : change fw magic to pluto specific
+#define FIRMWARE_HEADER_MAGIC_PLUTO (0xE905DAAB)
 
 typedef enum {
     FIRMWARE_HEADER_VERSION_INITIAL = 0,
@@ -29,7 +31,8 @@ typedef enum {
 
 typedef enum {
     FIRMWARE_TYPE_HAILO8 = 0,
-    FIRMWARE_TYPE_HAILO15
+    FIRMWARE_TYPE_HAILO15,
+    FIRMWARE_TYPE_PLUTO
 } firmware_type_t;
 
 
@@ -37,6 +40,8 @@ typedef enum {
 #define COMPILED_FIRMWARE_TYPE (FIRMWARE_TYPE_HAILO15)
 #elif defined(HAILO8_B0)
 #define COMPILED_FIRMWARE_TYPE (FIRMWARE_TYPE_HAILO8)
+#elif defined(PLUTO)
+#define COMPILED_FIRMWARE_TYPE (FIRMWARE_TYPE_PLUTO)
 #endif /* MERCURY */
 
 typedef struct {
index f45d9c1b3133ece03dc97ce8f5199eefda7657d7..fe9d8e3a8456b5e536734b053e4902274015ed46 100644 (file)
@@ -111,6 +111,7 @@ Updating rules:
    FIRMWARE_STATUS__X(HAILO_STATUS_DRAM_DMA_SERVICE_INIT_FAILED)\
    FIRMWARE_STATUS__X(HAILO_STATUS_VDMA_SERVICE_INIT_FAILED)\
    FIRMWARE_STATUS__X(HAILO_STATUS_ERROR_HANDLING_STACK_OVERFLOW)\
+   FIRMWARE_STATUS__X(HAILO_STATUS_UNEXPECTED_NULL_ARGUMENT)\
    \
    FIRMWARE_MODULE__X(FIRMWARE_MODULE__DATAFLOW)\
    FIRMWARE_STATUS__X(HAILO_DATAFLOW_STATUS_INVALID_PARAMETER)\
@@ -403,7 +404,7 @@ Updating rules:
    FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_INVALID_DYNAMIC_BATCH_SIZE_LENGTH)\
    FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_INVALID_INFER_FEATURES_LENGTH) /* DEPRECATED */\
    FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_INVALID_CONFIG_CHANNEL_INFOS)\
-   FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_INVALID_IS_BATCH_SIZE_FLOW_LENGTH)\
+   FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_INVALID_IS_BATCH_SIZE_FLOW_LENGTH) /* DEPRECATED */\
    FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_INVALID_CONTEXT_SWITCH_CONTEXT_TYPE_LENGTH)\
    FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_INVALID_CONTEXT_SWITCH_CONTEXT_NETWORK_GROUP_ID_LENGTH)\
    FIRMWARE_STATUS__X(CONTROL_PROTOCOL_STATUS_SET_SLEEP_STATE_FAILED)\
@@ -552,10 +553,13 @@ Updating rules:
    FIRMWARE_STATUS__X(PCIE_SERVICE__WAIT_UNTIL_CHANNEL_IS_IDLE_REACHED_TIMEOUT)\
    FIRMWARE_STATUS__X(PCIE_SERVICE_STATUS_UNSUPPORTED_PERIPH_BYTES_PER_BUFFER)\
    FIRMWARE_STATUS__X(PCIE_SERVICE_STATUS_GLUE_LOGIC_CHANNEL_OUT_OF_RANGE)\
-   FIRMWARE_STATUS__X(PCIE_SERVICE_STATUS_INVALID_H2D_CHANNEL_INDEX)\
-   FIRMWARE_STATUS__X(PCIE_SERVICE_STATUS_INVALID_D2H_CHANNEL_INDEX)\
+   FIRMWARE_STATUS__X(PCIE_SERVICE_STATUS_INVALID_H2D_GLUE_LOGIC_CHANNEL_INDEX)\
+   FIRMWARE_STATUS__X(PCIE_SERVICE_STATUS_INVALID_D2H_GLUE_LOGIC_CHANNEL_INDEX)\
    FIRMWARE_STATUS__X(PCIE_SERVICE_INVALID_INITIAL_CREDIT_SIZE)\
    FIRMWARE_STATUS__X(PCIE_SERVICE_ERROR_ADDING_CREDITS_TO_PCIE_CHANNEL)\
+   FIRMWARE_STATUS__X(PCIE_SERVICE_STATUS_INVALID_STREAM_INDEX)\
+   FIRMWARE_STATUS__X(PCIE_SERVICE_STATUS_INVALID_CHANNEL_TYPE)\
+   FIRMWARE_STATUS__X(PCIE_SERVICE_STATUS_INVALID_DESC_PAGE_SIZE)\
    \
    FIRMWARE_MODULE__X(FIRMWARE_MODULE__FIRMWARE_UPDATE)\
    FIRMWARE_STATUS__X(FIRMWARE_UPDATE_STATUS_INVALID_PARAMETERS)\
@@ -703,7 +707,7 @@ Updating rules:
    FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_ADDING_CREDITS_IS_ALLOWED_ONLY_FOR_EDGE_LAYER_DIRECTION_HOST_TO_DEVICE)\
    FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_PCIE_CHANNEL_INDEX_AND_DIRECTION_MISMATCH)\
    FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_INVALID_ACTION_LIST_OFFSET)\
-   FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_CHANGING_APP_IS_ALLOWED_IN_RESET_STATE_ONLY)\
+   FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_CHANGING_APP_IS_NOT_ALLOWED)\
    FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_INVALID_BATCH_SIZE)\
    FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_RECEIVED_CONFIG_BREAKPOINT_BEFORE_INIT_STATE_DONE)\
    FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_RECEIVED_INVALID_APPLICATION_INDEX)\
@@ -758,6 +762,7 @@ Updating rules:
    FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_REACHED_TIMEOUT_WHILE_WAITING_FOR_NETWORK_IDLE)\
    FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_WRITE_DATA_BY_TYPE_ACTION_INVALID_TYPE)\
    FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_WRITE_DATA_BY_TYPE_ACTION_INVALID_MEMORY_SPACE)\
+   FIRMWARE_STATUS__X(CONTEXT_SWITCH_STATUS_REACHED_TIMEOUT_WHILE_WAITING_FOR_BATCH_SWITCH_CONTEXT_TO_END)\
    \
    FIRMWARE_MODULE__X(FIRMWARE_MODULE__D2H_EVENT_MANAGER)\
    FIRMWARE_STATUS__X(HAILO_D2H_EVENT_MANAGER_STATUS_MESSAGE_HIGH_PRIORITY_QUEUE_CREATE_FAILED)\
@@ -865,6 +870,7 @@ Updating rules:
    FIRMWARE_MODULE__X(FIRMWARE_MODULE__GPIO)\
    FIRMWARE_STATUS__X(GPIO_BAD_GPIO_INDEX)\
    FIRMWARE_STATUS__X(GPIO_BAD_PINMUX_GROUP)\
+   FIRMWARE_STATUS__X(GPIO_SETUP_PINMUX_NOT_SUPPORTED)\
    \
    FIRMWARE_MODULE__X(FIRMWARE_MODULE__OVERCURRENT_PROTECTION)\
    FIRMWARE_STATUS__X(OVERCURRENT_PROTECTION_INVALID_ALERT_THRESHOLD_VALUE) /* DEPRECATED */\
@@ -1046,6 +1052,7 @@ Updating rules:
    FIRMWARE_STATUS__X(DRAM_DMA_SERVICE_STATUS_INVALID_BYTES_IN_PATTERN)\
    FIRMWARE_STATUS__X(DRAM_DMA_SERVICE_STATUS_INVALID_STREAM_INDEX)\
    FIRMWARE_STATUS__X(DRAM_DMA_SERVICE_STATUS_INVALID_CHANNEL_INDEX)\
+   FIRMWARE_STATUS__X(DRAM_DMA_SERVICE_STATUS_FAILED_TO_RESET_QM_CREDITS)\
    \
    FIRMWARE_MODULE__X(FIRMWARE_MODULE__NN_CORE_SERVICE)\
    FIRMWARE_STATUS__X(NN_CORE_SERVICE_STATUS_INVALID_ARG_PASSED)\
@@ -1060,6 +1067,8 @@ Updating rules:
    FIRMWARE_STATUS__X(DATA_STREAM_MANAGER_STATUS_INVALID_CREDIT_TYPE)\
    FIRMWARE_STATUS__X(DATA_STREAM_MANAGER_WRAPPER_STATUS_INVALID_HOST_BUFFER_TYPE)\
    FIRMWARE_STATUS__X(DATA_STREAM_MANAGER_STATUS_BATCH_CREDITS_OVERFLOW)\
+   FIRMWARE_STATUS__X(DATA_STREAM_MANAGER_WRAPPER_STATUS_BURST_CREDIT_TASK_MUST_BE_DISABLED_WHILE_CHANGING_BATCH)\
+   FIRMWARE_STATUS__X(DATA_STREAM_MANAGER_WRAPPER_STATUS_UNABLE_TO_RESET_FRAME_COUNTER)\
    \
    FIRMWARE_MODULE__X(FIRMWARE_MODULE__BURST_CREDITS_TASK)\
    FIRMWARE_STATUS__X(BURST_CREDITS_TASK_STATUS_TRYING_TO_ADD_ACTION_WHILE_NOT_IN_IDLE_STATE)\
@@ -1067,6 +1076,9 @@ Updating rules:
    FIRMWARE_STATUS__X(BURST_CREDITS_TASK_STATUS_TRYING_TO_CHANGE_STATE_TO_INFER_WHILE_ALREADY_IN_INFER)\
    FIRMWARE_STATUS__X(BURST_CREDITS_TASK_STATUS_INFER_REACHED_TIMEOUT)\
    FIRMWARE_STATUS__X(BURST_CREDITS_TASK_STATUS_TASK_DEACTIVATED)\
+   FIRMWARE_STATUS__X(BURST_CREDITS_TASK_STATUS_FAILED_TO_FIND_STREAM_INDEX)\
+   FIRMWARE_STATUS__X(BURST_CREDITS_TASK_STATUS_TASK_NO_CONFIGURED_ACTIONS)\
+   FIRMWARE_STATUS__X(BURST_CREDITS_TASK_STATUS_TASK_EXPECTED_HIGHER_BATCH)\
    \
    FIRMWARE_MODULE__X(FIRMWARE_MODULE__TASK_SYNC_EVENTS)\
    FIRMWARE_STATUS__X(TASK_SYNC_EVENTS_STATUS_START_TASK_WHILE_IT_IS_RUNNING)\
@@ -1097,6 +1109,7 @@ Updating rules:
    FIRMWARE_STATUS__X(CLUSTER_MANAGER_STATUS_INVALID_KERNEL_DONE_ADDRESS)\
    FIRMWARE_STATUS__X(CLUSTER_MANAGER_STATUS_RECEIVED_UNEXPECTED_INTERRUPT)\
    FIRMWARE_STATUS__X(CLUSTER_MANAGER_STATUS_INVALID_NETWORK_INDEX)\
+   FIRMWARE_STATUS__X(CLUSTER_MANAGER_STATUS_INVALID_KERNEL_DONE_COUNT)\
    \
    FIRMWARE_MODULE__X(FIRMWARE_MODULE__HW_INFER_MANAGER)\
    FIRMWARE_STATUS__X(HW_INFER_MANAGER_STATUS_NETWORK_GROUP_NOT_CONFIGURED_BEFORE_INFER_START)\
index a3f162e5caa0a72b57744c291016b0abb4a5e6bb..ed1df4aed27293614f192baf7ed07926c87a9e6e 100644 (file)
@@ -50,6 +50,9 @@ static HAILO_COMMON_STATUS_t firmware_header_utils__validate_fw_header(uintptr_t
     case FIRMWARE_TYPE_HAILO15:
         firmware_magic = FIRMWARE_HEADER_MAGIC_HAILO15;
         break;
+    case FIRMWARE_TYPE_PLUTO:
+        firmware_magic = FIRMWARE_HEADER_MAGIC_PLUTO;
+        break;
     default:
         status = HAILO_STATUS__FIRMWARE_HEADER_UTILS__INVALID_FIRMWARE_TYPE;
         goto exit;
diff --git a/hailort/.gitignore b/hailort/.gitignore
new file mode 100644 (file)
index 0000000..71367b8
--- /dev/null
@@ -0,0 +1,12 @@
+/external/
+cmake/external/benchmark/
+cmake/external/catch2/
+cmake/external/dotwriter/
+cmake/external/json/
+cmake/external/pybind11/
+cmake/external/readerwriterqueue/
+cmake/external/spdlog/
+
+pre_build/external/build/
+pre_build/tools/build_protoc/
+pre_build/install/
index 5f790bf77f7a89b62075af62916027bd8bc82504..4fb18b216aba4193d3edead2e0223d429ae94cad 100644 (file)
@@ -30,7 +30,7 @@ endif()
 
 # Set firmware version
 add_definitions( -DFIRMWARE_VERSION_MAJOR=4 )
-add_definitions( -DFIRMWARE_VERSION_MINOR=14 )
+add_definitions( -DFIRMWARE_VERSION_MINOR=15 )
 add_definitions( -DFIRMWARE_VERSION_REVISION=0 )
 if(HAILO_BUILD_SERVICE)
     add_definitions( -DHAILO_SUPPORT_MULTI_PROCESS )
@@ -39,11 +39,6 @@ endif()
 # The logic of prepare_externals is executed in a sperate module so that it can be run externally (via cmake -P prepare_externals.cmake)
 include(prepare_externals.cmake)
 
-# BENCHMARK_ENABLE_TESTING can be used by other 3rd party projects, therefore we define it
-# before adding projects
-set(BENCHMARK_ENABLE_TESTING OFF CACHE BOOL "Enable testing of the benchmark library.")
-add_subdirectory(external/benchmark EXCLUDE_FROM_ALL)
-
 # Include host protobuf for protoc (https://stackoverflow.com/questions/53651181/cmake-find-protobuf-package-in-custom-directory)
 if(CMAKE_HOST_UNIX)
     include(${CMAKE_CURRENT_LIST_DIR}/pre_build/install/lib/cmake/protobuf/protobuf-config.cmake)
@@ -53,6 +48,8 @@ else()
     include(${CMAKE_CURRENT_LIST_DIR}/pre_build/install/cmake/protobuf-module.cmake)
 endif()
 
+set(HAILO_EXTERNALS_CMAKE_SCRIPTS ${CMAKE_CURRENT_LIST_DIR}/cmake/external/)
+
 # Add target protobuf directory and exclude its targets from all
 # Disable protobuf tests, protoc and MSVC static runtime unless they are already defined
 # NOTE: we can also force - set(protobuf_BUILD_TESTS OFF CACHE BOOL "Build protobuf tests" FORCE)
@@ -90,16 +87,9 @@ set(COMMON_INC_DIR ${PROJECT_SOURCE_DIR}/common/include)
 set(DRIVER_INC_DIR ${PROJECT_SOURCE_DIR}/hailort/drivers/common)
 set(RPC_DIR ${PROJECT_SOURCE_DIR}/hailort/rpc)
 
-add_subdirectory(external/Catch2 EXCLUDE_FROM_ALL)
 add_subdirectory(external/CLI11 EXCLUDE_FROM_ALL)
-add_subdirectory(external/json EXCLUDE_FROM_ALL)
-add_subdirectory(external/DotWriter EXCLUDE_FROM_ALL)
-add_subdirectory(external/spdlog EXCLUDE_FROM_ALL)
-set_target_properties(spdlog PROPERTIES POSITION_INDEPENDENT_CODE ON)
 if(CMAKE_SYSTEM_NAME STREQUAL QNX)
-    add_library(pevents STATIC EXCLUDE_FROM_ALL external/pevents/src/pevents.cpp)
-    target_include_directories(pevents PUBLIC external/pevents/src)
-    target_compile_definitions(pevents PRIVATE -DWFMO)
+    include(${HAILO_EXTERNALS_CMAKE_SCRIPTS}/pevents.cmake)
 endif()
 
 if(HAILO_BUILD_SERVICE)
index 04f6ece3abc0947eb445a404f200a6cfd4e05935..375f5d9511b76f8256c5645d8e4d62485b14ee59 100644 (file)
@@ -1,15 +1,16 @@
-| Package                          | Copyright (c)                     | License            | Version        | Notes                                      | References                                                                    |
-|:---------------------------------|:----------------------------------|:-------------------|:---------------|:-------------------------------------------|:------------------------------------------------------------------------------|
-| CLI11                            | University of Cincinnati          | 3-Clause BSD       | 2.2.0          | Fork                                       | https://github.com/hailo-ai/CLI11                                             |
-| Catch2                           | Catch2 Authors                    | BSL-1.0            | 2.13.7         | Cloned entire package                      | https://github.com/catchorg/Catch2                                            |
-| protobuf                         | Google Inc.                       | BSD                | 3.19.4         | Cloned entire package                      | https://github.com/protocolbuffers/protobuf                                   |
-| pybind11                         | Wenzel Jakob                      | BSD                | 2.10.1         | Cloned entire package                      | https://github.com/pybind/pybind11                                            |
-| spdlog                           | Gabi Melman                       | MIT                | 1.6.1          | Cloned entire package                      | https://github.com/gabime/spdlog                                              |
-| folly                            | Facebook, Inc. and its affiliates | Apache License 2.0 | v2020.08.17.00 | Copied only the file `folly/TokenBucket.h` | https://github.com/facebook/folly                                             |
-| nlohmann_json_cmake_fetchcontent | ArthurSonzogni                    | MIT License        | v3.9.1         | Cloned entire package                      | https://github.com/ArthurSonzogni/nlohmann_json_cmake_fetchcontent            |
-| readerwriterqueue                | Cameron Desrochers                | Simplified BSD     | 1.0.3          | Cloned entire package                      | https://github.com/cameron314/readerwriterqueue                               |
-| DotWriter                        | John Vilk                         | MIT License        | master         | Fork                                       | https://github.com/hailo-ai/DotWriter                                         |
-| benchmark                        | Google Inc.                       | Apache License 2.0 | 1.6.0          | Cloned entire package                      | https://github.com/google/benchmark.git                                       |
-| md5                              | Alexander Peslyak                 | cut-down BSD       | -              | Copied code from website                   | http://openwall.info/wiki/people/solar/software/public-domain-source-code/md5 |
-| pevents                          | Mahmoud Al-Qudsi                  | MIT License        | master         | Cloned entire package                      | https://github.com/neosmart/pevents.git                                       |
-| grpc                             | Google Inc.                       | Apache License 2.0 | 1.46.0         | Cloned entire package                      | https://github.com/grpc/grpc                                                  |
+| Package                          | Copyright (c)                     | License            | Version        | Notes                                         | References                                                                    |
+|:---------------------------------|:----------------------------------|:-------------------|:---------------|:----------------------------------------------|:------------------------------------------------------------------------------|
+| CLI11                            | University of Cincinnati          | 3-Clause BSD       | 2.2.0          | Fork                                          | https://github.com/hailo-ai/CLI11                                             |
+| Catch2                           | Catch2 Authors                    | BSL-1.0            | 2.13.7         | Cloned entire package                         | https://github.com/catchorg/Catch2                                            |
+| protobuf                         | Google Inc.                       | BSD                | 3.19.4         | Cloned entire package                         | https://github.com/protocolbuffers/protobuf                                   |
+| pybind11                         | Wenzel Jakob                      | BSD                | 2.10.1         | Cloned entire package                         | https://github.com/pybind/pybind11                                            |
+| spdlog                           | Gabi Melman                       | MIT                | 1.6.1          | Cloned entire package                         | https://github.com/gabime/spdlog                                              |
+| folly                            | Facebook, Inc. and its affiliates | Apache License 2.0 | v2020.08.17.00 | Copied only the file `folly/TokenBucket.h`    | https://github.com/facebook/folly                                             |
+| nlohmann_json_cmake_fetchcontent | ArthurSonzogni                    | MIT License        | v3.9.1         | Cloned entire package                         | https://github.com/ArthurSonzogni/nlohmann_json_cmake_fetchcontent            |
+| readerwriterqueue                | Cameron Desrochers                | Simplified BSD     | 1.0.3          | Cloned entire package                         | https://github.com/cameron314/readerwriterqueue                               |
+| DotWriter                        | John Vilk                         | MIT License        | master         | Fork                                          | https://github.com/hailo-ai/DotWriter                                         |
+| benchmark                        | Google Inc.                       | Apache License 2.0 | 1.6.0          | Cloned entire package                         | https://github.com/google/benchmark.git                                       |
+| md5                              | Alexander Peslyak                 | cut-down BSD       | -              | Copied code from website                      | http://openwall.info/wiki/people/solar/software/public-domain-source-code/md5 |
+| pevents                          | Mahmoud Al-Qudsi                  | MIT License        | master         | Cloned entire package                         | https://github.com/neosmart/pevents.git                                       |
+| grpc                             | Google Inc.                       | Apache License 2.0 | 1.46.0         | Cloned entire package                         | https://github.com/grpc/grpc                                                  |
+| stb                              | Sean Barrett                      | MIT License        | 0.97           | Copied only the file `stb/stb_image_resize.h` | https://github.com/nothings/stb                                               |
\ No newline at end of file
index 29601e2693d4321acae1277962482e3d520c6c76..b833f3067777fcadb715c6bc67b9575fb2e641e0 100644 (file)
@@ -8,7 +8,7 @@ function(execute_process_in_clean_env)
     else()
         # TODO: make it clean env for cross compile
         set(cmdline ${execute_process_in_clean_env_UNPARSED_ARGUMENTS})
-        execute_process(COMMAND cmd /C ${cmdline} OUTPUT_QUIET RESULT_VARIABLE result)
+        execute_process(COMMAND ${cmdline} OUTPUT_QUIET RESULT_VARIABLE result)
     endif()
     if(DEFINED ${execute_process_in_clean_env_RESULT_VARIABLE})
         set(${execute_process_in_clean_env_RESULT_VARIABLE} ${result} PARENT_SCOPE)
diff --git a/hailort/cmake/external/benchmark.cmake b/hailort/cmake/external/benchmark.cmake
new file mode 100644 (file)
index 0000000..d012936
--- /dev/null
@@ -0,0 +1,27 @@
+cmake_minimum_required(VERSION 3.11.0)
+
+include(FetchContent)
+
+# BENCHMARK_ENABLE_TESTING can be used by other 3rd party projects, therefore we define it
+# before adding projects
+set(BENCHMARK_ENABLE_TESTING OFF CACHE BOOL "Enable testing of the benchmark library.")
+
+FetchContent_Declare(
+    benchmark
+    GIT_REPOSITORY https://github.com/google/benchmark.git 
+    GIT_TAG f91b6b42b1b9854772a90ae9501464a161707d1e # Version 1.6.0
+    GIT_SHALLOW TRUE
+    SOURCE_DIR "${CMAKE_CURRENT_LIST_DIR}/benchmark"
+    BINARY_DIR "${CMAKE_CURRENT_LIST_DIR}/benchmark"
+)
+
+if(NOT HAILO_OFFLINE_COMPILATION)
+    # https://stackoverflow.com/questions/65527126/disable-install-for-fetchcontent
+    FetchContent_GetProperties(benchmark)
+    if(NOT benchmark_POPULATED)
+        FetchContent_Populate(benchmark)
+        add_subdirectory(${benchmark_SOURCE_DIR} ${benchmark_BINARY_DIR} EXCLUDE_FROM_ALL)
+    endif()
+else()
+    add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/benchmark EXCLUDE_FROM_ALL)
+endif()
\ No newline at end of file
diff --git a/hailort/cmake/external/catch2.cmake b/hailort/cmake/external/catch2.cmake
new file mode 100644 (file)
index 0000000..f8f5c2a
--- /dev/null
@@ -0,0 +1,23 @@
+cmake_minimum_required(VERSION 3.11.0)
+
+include(FetchContent)
+
+FetchContent_Declare(
+    catch2
+    GIT_REPOSITORY https://github.com/catchorg/Catch2.git 
+    GIT_TAG c4e3767e265808590986d5db6ca1b5532a7f3d13 # Version 2.13.7
+    GIT_SHALLOW TRUE
+    SOURCE_DIR "${CMAKE_CURRENT_LIST_DIR}/catch2"
+    BINARY_DIR "${CMAKE_CURRENT_LIST_DIR}/catch2"
+)
+
+if(NOT HAILO_OFFLINE_COMPILATION)
+    # https://stackoverflow.com/questions/65527126/disable-install-for-fetchcontent
+    FetchContent_GetProperties(catch2)
+    if(NOT catch2_POPULATED)
+        FetchContent_Populate(catch2)
+        add_subdirectory(${catch2_SOURCE_DIR} ${catch2_BINARY_DIR} EXCLUDE_FROM_ALL)
+    endif()
+else()
+    add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/catch2 EXCLUDE_FROM_ALL)
+endif()
\ No newline at end of file
diff --git a/hailort/cmake/external/dotwriter.cmake b/hailort/cmake/external/dotwriter.cmake
new file mode 100644 (file)
index 0000000..ef7d799
--- /dev/null
@@ -0,0 +1,23 @@
+cmake_minimum_required(VERSION 3.11.0)
+
+include(FetchContent)
+
+FetchContent_Declare(
+    dotwriter
+    GIT_REPOSITORY https://github.com/hailo-ai/DotWriter
+    GIT_TAG e5fa8f281adca10dd342b1d32e981499b8681daf # Version master
+    GIT_SHALLOW TRUE
+    SOURCE_DIR "${CMAKE_CURRENT_LIST_DIR}/dotwriter"
+    BINARY_DIR "${CMAKE_CURRENT_LIST_DIR}/dotwriter"
+)
+
+if(NOT HAILO_OFFLINE_COMPILATION)
+    # https://stackoverflow.com/questions/65527126/disable-install-for-fetchcontent
+    FetchContent_GetProperties(dotwriter)
+    if(NOT dotwriter_POPULATED)
+        FetchContent_Populate(dotwriter)
+        add_subdirectory(${dotwriter_SOURCE_DIR} ${dotwriter_BINARY_DIR} EXCLUDE_FROM_ALL)
+    endif()
+else()
+    add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/dotwriter EXCLUDE_FROM_ALL)
+endif()
\ No newline at end of file
diff --git a/hailort/cmake/external/json.cmake b/hailort/cmake/external/json.cmake
new file mode 100644 (file)
index 0000000..b3a30ab
--- /dev/null
@@ -0,0 +1,23 @@
+cmake_minimum_required(VERSION 3.11.0)
+
+include(FetchContent)
+
+FetchContent_Declare(
+    json
+    GIT_REPOSITORY https://github.com/ArthurSonzogni/nlohmann_json_cmake_fetchcontent.git
+    GIT_TAG 391786c6c3abdd3eeb993a3154f1f2a4cfe137a0 # Version 3.9.1
+    GIT_SHALLOW TRUE
+    SOURCE_DIR "${CMAKE_CURRENT_LIST_DIR}/json"
+    BINARY_DIR "${CMAKE_CURRENT_LIST_DIR}/json"
+)
+
+if(NOT HAILO_OFFLINE_COMPILATION)
+    # https://stackoverflow.com/questions/65527126/disable-install-for-fetchcontent
+    FetchContent_GetProperties(json)
+    if(NOT json_POPULATED)
+        FetchContent_Populate(json)
+        add_subdirectory(${json_SOURCE_DIR} ${json_BINARY_DIR} EXCLUDE_FROM_ALL)
+    endif()
+else()
+    add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/json EXCLUDE_FROM_ALL)
+endif()
\ No newline at end of file
diff --git a/hailort/cmake/external/pevents.cmake b/hailort/cmake/external/pevents.cmake
new file mode 100644 (file)
index 0000000..65c3659
--- /dev/null
@@ -0,0 +1,26 @@
+cmake_minimum_required(VERSION 3.11.0)
+
+include(FetchContent)
+
+FetchContent_Declare(
+    pevents
+    GIT_REPOSITORY https://github.com/neosmart/pevents.git
+    GIT_TAG 1209b1fd1bd2e75daab4380cf43d280b90b45366 # Master
+    #GIT_SHALLOW TRUE
+    SOURCE_DIR "${CMAKE_CURRENT_LIST_DIR}/pevents"
+    BINARY_DIR "${CMAKE_CURRENT_LIST_DIR}/pevents"
+)
+
+if(NOT HAILO_OFFLINE_COMPILATION)
+    # https://stackoverflow.com/questions/65527126/disable-install-for-fetchcontent
+    FetchContent_GetProperties(pevents)
+    if(NOT pevents_POPULATED)
+        FetchContent_Populate(pevents)
+    endif()
+endif()
+
+if(NOT TARGET pevents)
+    add_library(pevents STATIC EXCLUDE_FROM_ALL ${pevents_SOURCE_DIR}/src/pevents.cpp)
+    target_include_directories(pevents PUBLIC ${pevents_SOURCE_DIR}/src)
+    target_compile_definitions(pevents PRIVATE -DWFMO)
+endif()
\ No newline at end of file
diff --git a/hailort/cmake/external/pybind11.cmake b/hailort/cmake/external/pybind11.cmake
new file mode 100644 (file)
index 0000000..0b3df2d
--- /dev/null
@@ -0,0 +1,35 @@
+cmake_minimum_required(VERSION 3.11.0)
+
+include(FetchContent)
+
+if(NOT PYTHON_EXECUTABLE AND PYBIND11_PYTHON_VERSION)
+    # venv version is prioritized (instead of PYBIND11_PYTHON_VERSION) if PYTHON_EXECUTABLE is not set.
+    # See https://pybind11.readthedocs.io/en/stable/changelog.html#v2-6-0-oct-21-2020
+    if((${CMAKE_VERSION} VERSION_LESS "3.22.0") AND (NOT WIN32))
+        find_package(PythonInterp ${PYBIND11_PYTHON_VERSION} REQUIRED)
+        set(PYTHON_EXECUTABLE ${Python_EXECUTABLE})
+    else()
+        find_package(Python3 ${PYBIND11_PYTHON_VERSION} REQUIRED EXACT COMPONENTS Interpreter Development)
+        set(PYTHON_EXECUTABLE ${Python3_EXECUTABLE})
+    endif()
+endif()
+
+FetchContent_Declare(
+    pybind11
+    GIT_REPOSITORY https://github.com/pybind/pybind11.git
+    GIT_TAG 80dc998efced8ceb2be59756668a7e90e8bef917 # Version 2.10.1
+    GIT_SHALLOW TRUE
+    SOURCE_DIR "${CMAKE_CURRENT_LIST_DIR}/pybind11"
+    BINARY_DIR "${CMAKE_CURRENT_LIST_DIR}/pybind11"
+)
+
+if(NOT HAILO_OFFLINE_COMPILATION)
+    # https://stackoverflow.com/questions/65527126/disable-install-for-fetchcontent
+    FetchContent_GetProperties(pybind11)
+    if(NOT pybind11_POPULATED)
+        FetchContent_Populate(pybind11)
+        add_subdirectory(${pybind11_SOURCE_DIR} ${pybind11_BINARY_DIR} EXCLUDE_FROM_ALL)
+    endif()
+else()
+    add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/pybind11 EXCLUDE_FROM_ALL)
+endif()
\ No newline at end of file
diff --git a/hailort/cmake/external/readerwriterqueue.cmake b/hailort/cmake/external/readerwriterqueue.cmake
new file mode 100644 (file)
index 0000000..ba5b3f4
--- /dev/null
@@ -0,0 +1,26 @@
+cmake_minimum_required(VERSION 3.11.0)
+
+include(FetchContent)
+
+FetchContent_Declare(
+    readerwriterqueue
+    GIT_REPOSITORY https://github.com/cameron314/readerwriterqueue
+    GIT_TAG 435e36540e306cac40fcfeab8cc0a22d48464509 # Version 1.0.3
+    GIT_SHALLOW TRUE
+    SOURCE_DIR "${CMAKE_CURRENT_LIST_DIR}/readerwriterqueue"
+    BINARY_DIR "${CMAKE_CURRENT_LIST_DIR}/readerwriterqueue"
+)
+
+if(NOT HAILO_OFFLINE_COMPILATION)
+    # https://stackoverflow.com/questions/65527126/disable-install-for-fetchcontent
+    FetchContent_GetProperties(readerwriterqueue)
+    if(NOT readerwriterqueue_POPULATED)
+        FetchContent_Populate(readerwriterqueue)
+    endif()
+endif()
+
+if(NOT TARGET readerwriterqueue)
+    # Add readerwriterqueue as a header-only library
+    add_library(readerwriterqueue INTERFACE)
+    target_include_directories(readerwriterqueue INTERFACE ${readerwriterqueue_SOURCE_DIR})
+endif()
\ No newline at end of file
diff --git a/hailort/cmake/external/spdlog.cmake b/hailort/cmake/external/spdlog.cmake
new file mode 100644 (file)
index 0000000..1cf80df
--- /dev/null
@@ -0,0 +1,24 @@
+cmake_minimum_required(VERSION 3.11.0)
+
+include(FetchContent)
+
+FetchContent_Declare(
+    spdlog
+    GIT_REPOSITORY https://github.com/gabime/spdlog
+    GIT_TAG 22a169bc319ac06948e7ee0be6b9b0ac81386604
+    GIT_SHALLOW TRUE
+    SOURCE_DIR "${CMAKE_CURRENT_LIST_DIR}/spdlog"
+    BINARY_DIR "${CMAKE_CURRENT_LIST_DIR}/spdlog"
+)
+
+if(NOT HAILO_OFFLINE_COMPILATION)
+    # https://stackoverflow.com/questions/65527126/disable-install-for-fetchcontent
+    FetchContent_GetProperties(spdlog)
+    if(NOT spdlog_POPULATED)
+        FetchContent_Populate(spdlog)
+        add_subdirectory(${spdlog_SOURCE_DIR} ${spdlog_BINARY_DIR} EXCLUDE_FROM_ALL)
+    endif()
+else()
+    add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/spdlog EXCLUDE_FROM_ALL)
+endif()
+set_target_properties(spdlog PROPERTIES POSITION_INDEPENDENT_CODE ON)
index b3bed6b22e4d2fefbefa0f5dd58b519a2e2ea1fd..dcaeaf6adb32e1adf5f56e5b641771c065a03f82 100644 (file)
@@ -20,6 +20,7 @@ set(SRC_FILES
     ${CMAKE_CURRENT_SOURCE_DIR}/file_utils.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/string_utils.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/event_internal.cpp
+    ${CMAKE_CURRENT_SOURCE_DIR}/fork_support.cpp
 
     ${CMAKE_CURRENT_SOURCE_DIR}/device_measurements.cpp
 )
index f8348f6fc1394c88a23d172a835cafaf10a43e80..e13c9cbffdd975f29767cced490e455e7039a73d 100644 (file)
@@ -36,7 +36,7 @@ typedef struct {
 #define _CB_FETCH(x) (InterlockedOr((LONG volatile*)(&x), (LONG)0))
 #define _CB_SET(x, value) (InterlockedExchange((LONG volatile*)(&x), (LONG)(value)))
 #else
-#define _CB_FETCH(x) (__sync_fetch_and_or(&(x), 0))
+#define _CB_FETCH(x) (__sync_fetch_and_or(const_cast<volatile int*>(&(x)), 0))
 #define _CB_SET(x, value) ((void)__sync_lock_test_and_set(&(x), value))
 #endif
 
@@ -155,22 +155,22 @@ public:
         }
     }
 
-    bool empty()
+    bool empty() const
     {
         return CB_HEAD(m_circ) == CB_TAIL(m_circ);
     }
 
-    bool full()
+    bool full() const
     {
         return 0 == CB_AVAIL(m_circ, CB_HEAD(m_circ), CB_TAIL(m_circ));
     }
 
-    size_t size()
+    size_t size() const
     {
         return CB_PROG(m_circ, CB_HEAD(m_circ), CB_TAIL(m_circ));
     }
 
-    size_t capacity()
+    size_t capacity() const
     {
         return CB_SIZE(m_circ) - 1;
     }
index e6993791f2c3555ac898b1c581d6dd163f0b53ea..50d025998d9d28620ed94c43a6cb31522146dd5f 100644 (file)
@@ -61,6 +61,11 @@ hailo_status WaitOrShutdown::signal()
     return m_waitable->signal();
 }
 
+hailo_status WaitOrShutdown::shutdown()
+{
+    return m_shutdown_event->signal();
+}
+
 WaitableGroup WaitOrShutdown::create_waitable_group(WaitablePtr waitable, EventPtr shutdown_event)
 {
     // Note the order - consistent with SHUTDOWN_INDEX, WAITABLE_INDEX.
index 295d4a875797cc97fa448a444cca027ca91a5048..ff0b47aca80ad568c5028a8602bf02ea35b10b98 100644 (file)
@@ -93,6 +93,7 @@ public:
     // * On any failure an appropriate status shall be returned
     hailo_status wait(std::chrono::milliseconds timeout);
     hailo_status signal();
+    hailo_status shutdown();
 
 private:
     static WaitableGroup create_waitable_group(WaitablePtr waitable, EventPtr shutdown_event);
diff --git a/hailort/common/fork_support.cpp b/hailort/common/fork_support.cpp
new file mode 100644 (file)
index 0000000..7c3e44d
--- /dev/null
@@ -0,0 +1,193 @@
+/**
+ * Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file fork_support.cpp
+ **/
+
+#include "fork_support.hpp"
+#include "common/logger_macros.hpp"
+
+
+namespace hailort
+{
+
+#ifdef HAILO_IS_FORK_SUPPORTED
+RecursiveSharedMutex::RecursiveSharedMutex()
+{
+    // Make sharable mutex
+    pthread_mutexattr_t mutex_attrs{};
+    int err = pthread_mutexattr_init(&mutex_attrs);
+    if (0 != err) {
+        LOGGER__CRITICAL("Failed init mutex attr, aborting");
+        std::abort();
+    }
+
+    err = pthread_mutexattr_setpshared(&mutex_attrs, PTHREAD_PROCESS_SHARED);
+    if (0 != err) {
+        LOGGER__CRITICAL("pthread_mutexattr_setpshared failed");
+        std::abort();
+    }
+
+    err = pthread_mutexattr_settype(&mutex_attrs, PTHREAD_MUTEX_RECURSIVE);
+    if (0 != err) {
+        LOGGER__CRITICAL("pthread_mutexattr_settype failed");
+        std::abort();
+    }
+
+    err = pthread_mutex_init(&m_mutex, &mutex_attrs);
+    if (0 != pthread_mutexattr_destroy(&mutex_attrs)) {
+        LOGGER__CRITICAL("Failed destroy mutexattr");
+        // continue
+    }
+    if (0 != err) {
+        LOGGER__CRITICAL("Failed init mutex, aborting");
+        std::abort();
+    }
+}
+
+RecursiveSharedMutex::~RecursiveSharedMutex()
+{
+    int err = pthread_mutex_destroy(&m_mutex);
+    if (0 != err) {
+        LOGGER__ERROR("Failed destroy shared mutex, errno {}", err);
+    }
+}
+
+void RecursiveSharedMutex::lock()
+{
+    int err = pthread_mutex_lock(&m_mutex);
+    if (0 != err) {
+        LOGGER__ERROR("Failed lock shared mutex, errno {}", err);
+        std::abort();
+    }
+}
+
+void RecursiveSharedMutex::unlock()
+{
+    int err = pthread_mutex_unlock(&m_mutex);
+    if (0 != err) {
+        LOGGER__ERROR("Failed unlock shared mutex, errno {}", err);
+        std::abort();
+    }
+}
+
+SharedConditionVariable::SharedConditionVariable()
+{
+    // Make sharable condvar
+    pthread_condattr_t cond_attrs{};
+    int err = pthread_condattr_init(&cond_attrs);
+    if (0 != err) {
+        LOGGER__CRITICAL("Failed init condition variable attr, aborting");
+        std::abort();
+    }
+
+    err = pthread_condattr_setpshared(&cond_attrs, PTHREAD_PROCESS_SHARED);
+    if (0 != err) {
+        LOGGER__CRITICAL("pthread_condattr_setpshared failed");
+        std::abort();
+    }
+
+    err = pthread_condattr_setclock(&cond_attrs, CLOCK_MONOTONIC);
+    if (0 != err) {
+        LOGGER__CRITICAL("pthread_condattr_setclock failed");
+        std::abort();
+    }
+
+    err = pthread_cond_init(&m_cond, &cond_attrs);
+    if (0 != pthread_condattr_destroy(&cond_attrs)) {
+        LOGGER__CRITICAL("Failed destroy condattr");
+        // continue
+    }
+    if (0 != err) {
+        LOGGER__CRITICAL("Failed init mutex, aborting");
+        std::abort();
+    }
+}
+
+SharedConditionVariable::~SharedConditionVariable()
+{
+    int err = pthread_cond_destroy(&m_cond);
+    if (0 != err) {
+        LOGGER__ERROR("Failed destory vdma channel condition varialbe, errno {}", err);
+    }
+}
+
+// Get the absolute time for the given timeout - calculate now() + timeout_ns
+// using system CLOCK_MONOTONIC (Used for pthread condition variable wait)
+static struct timespec get_absolute_time(std::chrono::nanoseconds timeout_ns)
+{
+    // Using chrono with timespec types to avoid casts
+    using ts_seconds = std::chrono::duration<decltype(timespec::tv_sec)>;
+    using ts_nanoseconds = std::chrono::duration<decltype(timespec::tv_nsec), std::nano>;
+
+    struct timespec current_ts{};
+    clock_gettime(CLOCK_MONOTONIC, &current_ts);
+
+    assert((current_ts.tv_sec + std::chrono::duration_cast<ts_seconds>(timeout_ns).count()) <
+        std::numeric_limits<decltype(timespec::tv_sec)>::max());
+    auto absolute_sec = ts_seconds(current_ts.tv_sec) + std::chrono::duration_cast<ts_seconds>(timeout_ns);
+    assert(current_ts.tv_nsec <= std::nano::den);
+    auto absolute_nsec = ts_nanoseconds(current_ts.tv_nsec) +
+        std::chrono::duration_cast<ts_nanoseconds>(timeout_ns % std::chrono::seconds(1));
+
+    // Nanos overflow
+    if (absolute_nsec.count() >= std::nano::den) {
+        absolute_sec += ts_seconds(1);
+        absolute_nsec = absolute_nsec % ts_seconds(1);
+    }
+
+    return timespec {
+        .tv_sec = absolute_sec.count(),
+        .tv_nsec = absolute_nsec.count()
+    };
+}
+
+bool SharedConditionVariable::wait_for(std::unique_lock<RecursiveSharedMutex> &lock, std::chrono::milliseconds timeout, std::function<bool()> condition)
+{
+    if (UINT32_MAX == timeout.count()) {
+        // Infinity wait
+        int err = 0;
+        while (!condition() && err == 0) {
+            err = pthread_cond_wait(&m_cond, lock.mutex()->native_handle());
+        }
+        if (err != 0) {
+            LOGGER__CRITICAL("Error waiting for shared condition variable: {}", err);
+            std::abort();
+        }
+        return true;
+    }
+    else if (0 == timeout.count()) {
+        // Special case for 0 timeout - we don't want to mess with absolute time
+        return condition();
+    } else {
+        // Timed wait
+        auto ts = get_absolute_time(timeout);
+
+        int err = 0;
+        while (!condition() && err == 0) {
+            err = pthread_cond_timedwait(&m_cond, lock.mutex()->native_handle(), &ts);
+        }
+        if ((err != 0) && (err != ETIMEDOUT)) {
+            LOGGER__CRITICAL("Error waiting for shared condition variable: {}", err);
+            std::abort();
+        }
+        return err == 0;
+    }
+}
+
+void SharedConditionVariable::notify_one()
+{
+    pthread_cond_signal(&m_cond);
+}
+
+void SharedConditionVariable::notify_all()
+{
+    pthread_cond_broadcast(&m_cond);
+}
+
+#endif /* HAILO_IS_FORK_SUPPORTED */
+
+
+} /* namespace hailort */
diff --git a/hailort/common/fork_support.hpp b/hailort/common/fork_support.hpp
new file mode 100644 (file)
index 0000000..4e90be9
--- /dev/null
@@ -0,0 +1,219 @@
+/**
+ * Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file fork_support.hpp
+ * @brief Utilities/classes uses to support fork in the process.
+ *        In general, fork SHOULD NOT be supported, but we still have some places that uses fork.
+ *        Hopefully this file will be delete as soon as possible.
+ **/
+
+#ifndef _HAILO_FORK_SUPPORT_HPP_
+#define _HAILO_FORK_SUPPORT_HPP_
+
+#include <mutex>
+#include <functional>
+#include <map>
+#include <assert.h>
+
+#ifndef _MSC_VER
+#include <sys/mman.h>
+#endif
+
+#ifndef _MSC_VER
+// Windows did the right choice - not supporting fork() at all, so we don't support it either.
+#define HAILO_IS_FORK_SUPPORTED
+#endif
+
+
+namespace hailort
+{
+
+
+#ifdef HAILO_IS_FORK_SUPPORTED
+
+// Replacement for std::recursive_mutex
+class RecursiveSharedMutex final {
+public:
+    RecursiveSharedMutex();
+    ~RecursiveSharedMutex();
+
+    RecursiveSharedMutex(const RecursiveSharedMutex &) = delete;
+    RecursiveSharedMutex &operator=(const RecursiveSharedMutex &) = delete;
+    RecursiveSharedMutex(RecursiveSharedMutex &&) = delete;
+    RecursiveSharedMutex &operator=(RecursiveSharedMutex &&) = delete;
+
+    void lock();
+    void unlock();
+
+    pthread_mutex_t *native_handle()
+    {
+        return &m_mutex;
+    }
+
+private:
+    pthread_mutex_t m_mutex;
+};
+
+// Replacement for std::condition_variable, can work only with RecursiveSharedMutex 
+class SharedConditionVariable final {
+public:
+
+    SharedConditionVariable();
+    ~SharedConditionVariable();
+
+    SharedConditionVariable(const SharedConditionVariable &) = delete;
+    SharedConditionVariable &operator=(const SharedConditionVariable &) = delete;
+    SharedConditionVariable(SharedConditionVariable &&) = delete;
+    SharedConditionVariable &operator=(SharedConditionVariable &&) = delete;
+
+    bool wait_for(std::unique_lock<RecursiveSharedMutex> &lock, std::chrono::milliseconds timeout,
+        std::function<bool()> condition);
+    void notify_one();
+    void notify_all();
+
+private:
+    pthread_cond_t m_cond;
+};
+
+
+// Objects that inherit from this class, will automatically reside in memory region shared
+// between forked processed.
+// virtual dtor is not implemented for this class since it shouldn't be used for polymorphism (=
+// delete shouldn't be called on the SharedAllocatedObject).
+class SharedAllocatedObject {
+public:
+    void* operator new(std::size_t size) = delete;
+    void* operator new(std::size_t size, const std::nothrow_t&) throw()
+    {
+        // Map a shared memory region into the virtual memory of the process
+        void* ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+        if (ptr == MAP_FAILED) {
+            return nullptr;
+        }
+        return ptr;
+    }
+
+    // Custom operator delete function that unmaps the shared memory region
+    void operator delete(void* ptr, std::size_t size)
+    {
+        munmap(ptr, size);
+    }
+};
+
+
+// pthread_atfork api has 2 problems:
+//      1. The callbacks doesn't accept context.
+//      2. Callbacks cannot be unregistered.
+// In order to solve this issue, the AtForkRegistry singleton exists and manages some registry
+// of atfork callbacks.
+// pthread_atfork is called only once, and on the provided callbacks, the registered user callbacks
+// are called.
+class AtForkRegistry final {
+public:
+
+    static AtForkRegistry &get_instance()
+    {
+        static AtForkRegistry at_fork;
+        return at_fork;
+    }
+
+    AtForkRegistry(const AtForkRegistry &) = delete;
+    AtForkRegistry &operator=(const AtForkRegistry &) = delete;
+
+    // Special key used to identify the registered callbacks. One can use `this` as
+    // a unique identifier.
+    using Key = void*;
+
+    struct AtForkCallbacks {
+        std::function<void()> before_fork;
+        std::function<void()> after_fork_in_parent;
+        std::function<void()> after_fork_in_child;
+    };
+
+    // Init this guard with AtForkCallbacks, and the callbacks will be registered until destructed.
+    struct AtForkGuard {
+        AtForkGuard(Key key, const AtForkCallbacks &callbacks) :
+            m_key(key)
+        {
+            AtForkRegistry::get_instance().register_atfork(key, callbacks);
+        }
+
+        ~AtForkGuard()
+        {
+            AtForkRegistry::get_instance().unregister_atfork(m_key);
+        }
+
+        AtForkGuard(const AtForkGuard&) = delete;
+        AtForkGuard &operator=(const AtForkGuard &) = delete;
+
+        const Key m_key;
+    };
+
+
+private:
+
+    AtForkRegistry()
+    {
+        pthread_atfork(
+            []() { get_instance().before_fork(); },
+            []() { get_instance().after_fork_in_parent(); },
+            []() { get_instance().after_fork_in_child(); }
+        );
+    }
+
+    void register_atfork(Key key, const AtForkCallbacks &callbacks)
+    {
+        std::lock_guard<std::mutex> lock(m_mutex);
+        assert(m_callbacks.end() == m_callbacks.find(key));
+        m_callbacks[key] = callbacks;
+    }
+
+    void unregister_atfork(Key key)
+    {
+        std::lock_guard<std::mutex> lock(m_mutex);
+        assert(m_callbacks.end() != m_callbacks.find(key));
+        m_callbacks.erase(key);
+    }
+
+    void before_fork()
+    {
+        std::lock_guard<std::mutex> lock(m_mutex);
+        for (const auto &callback : m_callbacks) {
+            callback.second.before_fork();
+        }
+    }
+
+    void after_fork_in_parent()
+    {
+        std::lock_guard<std::mutex> lock(m_mutex);
+        for (const auto &callback : m_callbacks) {
+            callback.second.after_fork_in_parent();
+        }
+    }
+
+    void after_fork_in_child()
+    {
+        std::lock_guard<std::mutex> lock(m_mutex);
+        for (const auto &callback : m_callbacks) {
+            callback.second.after_fork_in_child();
+        }
+    }
+
+    std::mutex m_mutex;
+    std::map<Key, AtForkCallbacks> m_callbacks;
+};
+
+#else /* HAILO_IS_FORK_SUPPORTED */
+using RecursiveSharedMutex = std::recursive_mutex;
+using SharedConditionVariable = std::condition_variable_any;
+
+
+class SharedAllocatedObject {};
+#endif
+
+
+} /* namespace hailort */
+
+#endif /* _HAILO_FORK_SUPPORT_HPP_ */
index a285651d6c4577c7255e50fbf3356d151352f341..7e23b12600be1a3f8541fb93c1e7af7826388502 100644 (file)
@@ -329,10 +329,10 @@ static uint32_t get_min_value_of_unordered_map(const std::unordered_map<K, V> &m
     return min_count;
 }
 
-static inline bool is_env_variable_on(const char* env_var_name)
+static inline bool is_env_variable_on(const char* env_var_name, const char* required_value, size_t size)
 {
     auto env_var  = std::getenv(env_var_name);
-    return ((nullptr != env_var) && (strnlen(env_var, 2) == 1) && (strncmp(env_var, "1", 1) == 0));
+    return ((nullptr != env_var) && (strncmp(env_var, required_value, size) == 0));
 }
 
 } /* namespace hailort */
index a70d4e0dda4873e05706eb4dd6763712cf49e660..4d7dc6c472481a1b2417df227e974b7631a0cc67 100644 (file)
 
 #define INVALID_DRIVER_HANDLE_VALUE     ((uintptr_t)-1)
 
-// Used by windows and unix driver to raise the right CPU control handle to the FW. The same as in pcie_service FW 
+// Used by windows and unix driver to raise the right CPU control handle to the FW. The same as in pcie_service FW
 #define FW_ACCESS_CORE_CPU_CONTROL_SHIFT (1)
-#define FW_ACCESS_CORE_CPU_CONTROL_MASK  (1 << FW_ACCESS_CORE_CPU_CONTROL_SHIFT) 
+#define FW_ACCESS_CORE_CPU_CONTROL_MASK  (1 << FW_ACCESS_CORE_CPU_CONTROL_SHIFT)
 #define FW_ACCESS_CONTROL_INTERRUPT_SHIFT (0)
 #define FW_ACCESS_APP_CPU_CONTROL_MASK (1 << FW_ACCESS_CONTROL_INTERRUPT_SHIFT)
+#define FW_ACCESS_DRIVER_SHUTDOWN_SHIFT (2)
+#define FW_ACCESS_DRIVER_SHUTDOWN_MASK (1 << FW_ACCESS_DRIVER_SHUTDOWN_SHIFT)
 
 #define INVALID_VDMA_CHANNEL (0xff)
 
@@ -317,6 +319,7 @@ struct hailo_d2h_notification {
 enum hailo_board_type {
     HAILO_BOARD_TYPE_HAILO8 = 0,
     HAILO_BOARD_TYPE_HAILO15,
+    HAILO_BOARD_TYPE_PLUTO,
     HAILO_BOARD_TYPE_COUNT,
 
     /** Max enum value to maintain ABI Integrity */
index 3755c4e388091eb654b646a4848a3c7edc4d3ac2..c2f0cbed998212282b859be489bb270caaa18068 100644 (file)
@@ -1,5 +1,7 @@
 cmake_minimum_required(VERSION 3.0.0)
 
+include(${HAILO_EXTERNALS_CMAKE_SCRIPTS}/spdlog.cmake)
+
 if(WIN32)
     set(HAILORT_SERVICE_OS_DIR "${CMAKE_CURRENT_SOURCE_DIR}/windows")
 elseif(UNIX)
index d76ed61782471649a78134724c081eb005304e68..b4018958e3c25809eeb1d63acf795d6a9a7e5731 100644 (file)
@@ -32,6 +32,24 @@ HailoRtRpcService::HailoRtRpcService()
     });
 }
 
+hailo_status HailoRtRpcService::flush_input_vstream(uint32_t handle)
+{
+    if (is_input_vstream_aborted(handle)) {
+        return HAILO_SUCCESS;
+    }
+
+    auto lambda = [](std::shared_ptr<InputVStream> input_vstream) {
+        return input_vstream->flush();
+    };
+    auto &manager = ServiceResourceManager<InputVStream>::get_instance();
+    auto status = manager.execute<hailo_status>(handle, lambda);
+    if (HAILO_SUCCESS != status) {
+        LOGGER__ERROR("Failed to flush input vstream with status {}", status);
+    }
+    return status;
+}
+
+
 hailo_status HailoRtRpcService::abort_input_vstream(uint32_t handle)
 {
     if (is_input_vstream_aborted(handle)) {
@@ -131,7 +149,6 @@ void HailoRtRpcService::abort_vstreams_by_pids(std::set<uint32_t> &pids)
     }
 }
 
-
 void HailoRtRpcService::remove_disconnected_clients()
 {
     std::this_thread::sleep_for(hailort::HAILO_KEEPALIVE_INTERVAL / 2);
@@ -172,12 +189,17 @@ void HailoRtRpcService::keep_alive()
     }
 }
 
+void HailoRtRpcService::update_client_id_timestamp(uint32_t pid)
+{
+    std::unique_lock<std::mutex> lock(m_mutex);
+    m_clients_pids[pid] = std::chrono::high_resolution_clock::now();
+}
+
 grpc::Status HailoRtRpcService::client_keep_alive(grpc::ServerContext*, const keepalive_Request *request,
     empty*)
 {
     auto client_id = request->pid();
-    std::unique_lock<std::mutex> lock(m_mutex);
-    m_clients_pids[client_id] = std::chrono::high_resolution_clock::now();
+    update_client_id_timestamp(client_id);
     return grpc::Status::OK;
 }
 
@@ -195,15 +217,6 @@ grpc::Status HailoRtRpcService::get_service_version(grpc::ServerContext*, const
     return grpc::Status::OK;
 }
 
-grpc::Status HailoRtRpcService::VDevice_dup_handle(grpc::ServerContext*, const dup_handle_Request *request,
-    dup_handle_Reply* reply)
-{
-    auto &manager = ServiceResourceManager<VDevice>::get_instance();
-    auto handle = manager.dup_handle(request->pid(), request->handle());
-    reply->set_handle(handle);
-    return grpc::Status::OK;
-}
-
 grpc::Status HailoRtRpcService::VDevice_create(grpc::ServerContext *, const VDevice_create_Request *request,
     VDevice_create_Reply *reply)
 {
@@ -230,6 +243,7 @@ grpc::Status HailoRtRpcService::VDevice_create(grpc::ServerContext *, const VDev
     auto vdevice = VDevice::create(params);
     CHECK_EXPECTED_AS_RPC_STATUS(vdevice, reply);
 
+    update_client_id_timestamp(request->pid());
     auto &manager = ServiceResourceManager<VDevice>::get_instance();
     auto handle = manager.register_resource(request->pid(), std::move(vdevice.release()));
     reply->set_handle(handle);
@@ -241,7 +255,7 @@ grpc::Status HailoRtRpcService::VDevice_release(grpc::ServerContext*, const Rele
     Release_Reply *reply)
 {
     auto &manager = ServiceResourceManager<VDevice>::get_instance();
-    manager.release_resource(request->handle(), request->pid());
+    manager.release_resource(request->vdevice_identifier().vdevice_handle(), request->pid());
     reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
     return grpc::Status::OK;
 }
@@ -291,11 +305,13 @@ grpc::Status HailoRtRpcService::VDevice_configure(grpc::ServerContext*, const VD
         configure_params_map.insert({name_configure_params_pair.name(), network_configure_params});
     }
 
+    update_client_id_timestamp(request->pid());
     auto lambda = [](std::shared_ptr<VDevice> vdevice, Hef &hef, NetworkGroupsParamsMap &configure_params_map) {
         return vdevice->configure(hef, configure_params_map);
     };
     auto &vdevice_manager = ServiceResourceManager<VDevice>::get_instance();
-    auto networks = vdevice_manager.execute<Expected<ConfiguredNetworkGroupVector>>(request->handle(), lambda, hef.release(), configure_params_map);
+    auto networks = vdevice_manager.execute<Expected<ConfiguredNetworkGroupVector>>(request->identifier().vdevice_handle(), lambda,
+        hef.release(), configure_params_map);
     CHECK_SUCCESS_AS_RPC_STATUS(networks.status(), reply);
 
     auto &networks_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
@@ -315,7 +331,7 @@ grpc::Status HailoRtRpcService::VDevice_get_physical_devices_ids(grpc::ServerCon
         return vdevice->get_physical_devices_ids();
     };
     auto &vdevice_manager = ServiceResourceManager<VDevice>::get_instance();
-    auto expected_devices_ids = vdevice_manager.execute<Expected<std::vector<std::string>>>(request->handle(), lambda);
+    auto expected_devices_ids = vdevice_manager.execute<Expected<std::vector<std::string>>>(request->identifier().vdevice_handle(), lambda);
     CHECK_EXPECTED_AS_RPC_STATUS(expected_devices_ids, reply);
     auto devices_ids = expected_devices_ids.value();
     auto devices_ids_proto = reply->mutable_devices_ids();
@@ -333,17 +349,20 @@ grpc::Status HailoRtRpcService::VDevice_get_default_streams_interface(grpc::Serv
         return vdevice->get_default_streams_interface();
     };
     auto &vdevice_manager = ServiceResourceManager<VDevice>::get_instance();
-    auto stream_interface = vdevice_manager.execute<Expected<hailo_stream_interface_t>>(request->handle(), lambda);
+    auto stream_interface = vdevice_manager.execute<Expected<hailo_stream_interface_t>>(request->identifier().vdevice_handle(), lambda);
     CHECK_EXPECTED_AS_RPC_STATUS(stream_interface, reply);
     reply->set_stream_interface(*stream_interface);
     return grpc::Status::OK;
 }
 
-grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_dup_handle(grpc::ServerContext*, const dup_handle_Request *request,
-    dup_handle_Reply* reply)
+grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_dup_handle(grpc::ServerContext*, const ConfiguredNetworkGroup_dup_handle_Request *request,
+    ConfiguredNetworkGroup_dup_handle_Reply* reply)
 {
-    auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
-    auto handle = manager.dup_handle(request->pid(), request->handle());
+    auto &vdevice_manager = ServiceResourceManager<VDevice>::get_instance();
+    vdevice_manager.dup_handle(request->identifier().vdevice_handle(), request->pid());
+
+    auto &ng_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
+    auto handle = ng_manager.dup_handle(request->identifier().network_group_handle(), request->pid());
     reply->set_handle(handle);
     return grpc::Status::OK;
 }
@@ -352,7 +371,7 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_release(grpc::ServerConte
     Release_Reply *reply)
 {
     auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
-    manager.release_resource(request->handle(), request->pid());
+    manager.release_resource(request->network_group_identifier().network_group_handle(), request->pid());
     reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
     return grpc::Status::OK;
 }
@@ -382,7 +401,8 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_make_input_vstream_params
             return cng->make_input_vstream_params(quantized, format_type, timeout_ms, queue_size, network_name);
     };
     auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
-    auto expected_params = manager.execute<Expected<std::map<std::string, hailo_vstream_params_t>>>(request->handle(), lambda, request->quantized(), static_cast<hailo_format_type_t>(request->format_type()),
+    auto expected_params = manager.execute<Expected<std::map<std::string, hailo_vstream_params_t>>>(
+        request->identifier().network_group_handle(), lambda, request->quantized(), static_cast<hailo_format_type_t>(request->format_type()),
         request->timeout_ms(), request->queue_size(), request->network_name());
     CHECK_EXPECTED_AS_RPC_STATUS(expected_params, reply);
     auto params_map = reply->mutable_vstream_params_map();
@@ -404,7 +424,7 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_make_output_vstream_param
             return cng->make_output_vstream_params(quantized, format_type, timeout_ms, queue_size, network_name);
     };
     auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
-    auto expected_params = manager.execute<Expected<std::map<std::string, hailo_vstream_params_t>>>(request->handle(), 
+    auto expected_params = manager.execute<Expected<std::map<std::string, hailo_vstream_params_t>>>(request->identifier().network_group_handle(), 
         lambda, request->quantized(), static_cast<hailo_format_type_t>(request->format_type()),
         request->timeout_ms(), request->queue_size(), request->network_name());
     CHECK_EXPECTED_AS_RPC_STATUS(expected_params, reply);
@@ -427,8 +447,8 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_make_output_vstream_param
             return cng->make_output_vstream_params_groups(quantized, format_type, timeout_ms, queue_size);
     };
     auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
-    auto expected_params = manager.execute<Expected<std::vector<std::map<std::string, hailo_vstream_params_t>>>>(request->handle(), 
-        lambda, request->quantized(), static_cast<hailo_format_type_t>(request->format_type()),
+    auto expected_params = manager.execute<Expected<std::vector<std::map<std::string, hailo_vstream_params_t>>>>(
+        request->identifier().network_group_handle(), lambda, request->quantized(), static_cast<hailo_format_type_t>(request->format_type()),
         request->timeout_ms(), request->queue_size());
     CHECK_EXPECTED_AS_RPC_STATUS(expected_params, reply);
     auto params_map_vector = reply->mutable_vstream_params_groups();
@@ -453,7 +473,7 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_get_default_stream_interf
             return cng->get_default_streams_interface();
     };
     auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
-    auto expected_stream_interface = net_group_manager.execute<Expected<hailo_stream_interface_t>>(request->handle(), lambda);
+    auto expected_stream_interface = net_group_manager.execute<Expected<hailo_stream_interface_t>>(request->identifier().network_group_handle(), lambda);
     CHECK_EXPECTED_AS_RPC_STATUS(expected_stream_interface, reply);
     reply->set_stream_interface(static_cast<uint32_t>(expected_stream_interface.value()));
     reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
@@ -468,7 +488,8 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_get_output_vstream_groups
             return cng->get_output_vstream_groups();
     };
     auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
-    auto expected_output_vstream_groups = net_group_manager.execute<Expected<std::vector<std::vector<std::string>>>>(request->handle(), lambda);
+    auto expected_output_vstream_groups = net_group_manager.execute<Expected<std::vector<std::vector<std::string>>>>(
+        request->identifier().network_group_handle(), lambda);
     CHECK_EXPECTED_AS_RPC_STATUS(expected_output_vstream_groups, reply);
     auto output_vstream_groups = expected_output_vstream_groups.value();
     auto groups_proto = reply->mutable_output_vstream_groups();
@@ -497,6 +518,7 @@ void serialize_vstream_info(const hailo_vstream_info_t &info, ProtoVStreamInfo *
         auto nms_shape_proto = info_proto->mutable_nms_shape();
         nms_shape_proto->set_number_of_classes(info.nms_shape.number_of_classes);
         nms_shape_proto->set_max_bbox_per_class(info.nms_shape.max_bboxes_per_class);
+        nms_shape_proto->set_max_mask_size(info.nms_shape.max_mask_size);
     } else {
         auto shape_proto = info_proto->mutable_shape();
         shape_proto->set_height(info.shape.height);
@@ -529,7 +551,8 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_get_input_vstream_infos(g
             return cng->get_input_vstream_infos(network_name);
     };
     auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
-    auto expected_vstream_infos = net_group_manager.execute<Expected<std::vector<hailo_vstream_info_t>>>(request->handle(), lambda, request->network_name());
+    auto expected_vstream_infos = net_group_manager.execute<Expected<std::vector<hailo_vstream_info_t>>>(
+        request->identifier().network_group_handle(), lambda, request->network_name());
     CHECK_EXPECTED_AS_RPC_STATUS(expected_vstream_infos, reply);
     serialize_vstream_infos(reply, expected_vstream_infos.value());
     reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
@@ -544,7 +567,8 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_get_output_vstream_infos(
             return cng->get_output_vstream_infos(network_name);
     };
     auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
-    auto expected_vstream_infos = net_group_manager.execute<Expected<std::vector<hailo_vstream_info_t>>>(request->handle(), lambda, request->network_name());
+    auto expected_vstream_infos = net_group_manager.execute<Expected<std::vector<hailo_vstream_info_t>>>(
+        request->identifier().network_group_handle(), lambda, request->network_name());
     CHECK_EXPECTED_AS_RPC_STATUS(expected_vstream_infos, reply);
     serialize_vstream_infos(reply, expected_vstream_infos.value());
     reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
@@ -559,7 +583,8 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_get_all_vstream_infos(grp
             return cng->get_all_vstream_infos(network_name);
     };
     auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
-    auto expected_vstream_infos = net_group_manager.execute<Expected<std::vector<hailo_vstream_info_t>>>(request->handle(), lambda, request->network_name());
+    auto expected_vstream_infos = net_group_manager.execute<Expected<std::vector<hailo_vstream_info_t>>>(
+        request->identifier().network_group_handle(), lambda, request->network_name());
     CHECK_EXPECTED_AS_RPC_STATUS(expected_vstream_infos, reply);
     serialize_vstream_infos(reply, expected_vstream_infos.value());
     reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
@@ -574,7 +599,7 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_is_scheduled(grpc::Server
         return cng->is_scheduled();
     };
     auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
-    auto is_scheduled = manager.execute<bool>(request->handle(), lambda);
+    auto is_scheduled = manager.execute<bool>(request->identifier().network_group_handle(), lambda);
     reply->set_is_scheduled(static_cast<bool>(is_scheduled));
     reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
     return grpc::Status::OK;
@@ -588,7 +613,8 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_set_scheduler_timeout(grp
             return cng->set_scheduler_timeout(timeout_ms, network_name);
     };
     auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
-    auto status = net_group_manager.execute<hailo_status>(request->handle(), lambda, static_cast<std::chrono::milliseconds>(request->timeout_ms()),
+    auto status = net_group_manager.execute<hailo_status>(request->identifier().network_group_handle(), lambda,
+        static_cast<std::chrono::milliseconds>(request->timeout_ms()),
         request->network_name());
     reply->set_status(status);
     return grpc::Status::OK;
@@ -602,7 +628,7 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_set_scheduler_threshold(g
             return cng->set_scheduler_threshold(threshold, network_name);
     };
     auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
-    auto status = net_group_manager.execute<hailo_status>(request->handle(), lambda, request->threshold(),
+    auto status = net_group_manager.execute<hailo_status>(request->identifier().network_group_handle(), lambda, request->threshold(),
         request->network_name());
     reply->set_status(status);
     return grpc::Status::OK;
@@ -616,7 +642,7 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_set_scheduler_priority(gr
             return cng->set_scheduler_priority(priority, network_name);
     };
     auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
-    auto status = net_group_manager.execute<hailo_status>(request->handle(), lambda, static_cast<uint8_t>(request->priority()),
+    auto status = net_group_manager.execute<hailo_status>(request->identifier().network_group_handle(), lambda, static_cast<uint8_t>(request->priority()),
         request->network_name());
     reply->set_status(status);
     return grpc::Status::OK;
@@ -630,7 +656,7 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_get_config_params(grpc::S
             return cng->get_config_params();
     };
     auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
-    auto expected_params = net_group_manager.execute<Expected<ConfigureNetworkParams>>(request->handle(), lambda);
+    auto expected_params = net_group_manager.execute<Expected<ConfigureNetworkParams>>(request->identifier().network_group_handle(), lambda);
     CHECK_EXPECTED_AS_RPC_STATUS(expected_params, reply);
     auto net_configure_params = expected_params.value();
     auto proto_network_configure_params = reply->mutable_params();
@@ -678,13 +704,20 @@ grpc::Status HailoRtRpcService::InputVStreams_create(grpc::ServerContext *, cons
         };
         inputs_params.emplace(param_proto.name(), std::move(params));
     }
-    auto network_group_handle = request->net_group();
+    auto network_group_handle = request->identifier().network_group_handle();
     auto client_pid = request->pid();
 
+    update_client_id_timestamp(client_pid);
+    auto &vdevice_manager = ServiceResourceManager<VDevice>::get_instance();
+    vdevice_manager.dup_handle(request->identifier().vdevice_handle(), client_pid);
+
+    auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
+    net_group_manager.dup_handle(network_group_handle, client_pid);
+
+
     auto lambda = [](std::shared_ptr<ConfiguredNetworkGroup> cng, const std::map<std::string, hailo_vstream_params_t> &inputs_params) {
             return cng->create_input_vstreams(inputs_params);
     };
-    auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
     auto vstreams_expected = net_group_manager.execute<Expected<std::vector<InputVStream>>>(network_group_handle, lambda, inputs_params);
     CHECK_EXPECTED_AS_RPC_STATUS(vstreams_expected, reply);
     auto vstreams = vstreams_expected.release();
@@ -694,7 +727,7 @@ grpc::Status HailoRtRpcService::InputVStreams_create(grpc::ServerContext *, cons
         auto handle = manager.register_resource(client_pid, make_shared_nothrow<InputVStream>(std::move(vstreams[i])));
         reply->add_handles(handle);
     }
-    net_group_manager.dup_handle(client_pid, network_group_handle);
+
 
     reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
     return grpc::Status::OK;
@@ -703,9 +736,20 @@ grpc::Status HailoRtRpcService::InputVStreams_create(grpc::ServerContext *, cons
 grpc::Status HailoRtRpcService::InputVStream_release(grpc::ServerContext *, const Release_Request *request,
     Release_Reply *reply)
 {
+    auto vstream_handle = request->vstream_identifier().vstream_handle();
+    auto was_aborted = is_input_vstream_aborted(vstream_handle);
+    flush_input_vstream(vstream_handle);
+    abort_input_vstream(vstream_handle);
     auto &manager = ServiceResourceManager<InputVStream>::get_instance();
-    manager.release_resource(request->handle(), request->pid());
-    reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
+    auto resource = manager.release_resource(vstream_handle, request->pid());
+    auto status = HAILO_SUCCESS;
+    if (resource && (!was_aborted)) {
+        status = resource->resume();
+        if (HAILO_SUCCESS != status) {
+            LOGGER__INFO("Failed to resume input vstream {} after destruction", resource->name());
+        }
+    }
+    reply->set_status(static_cast<uint32_t>(status));
     return grpc::Status::OK;
 }
 
@@ -730,13 +774,19 @@ grpc::Status HailoRtRpcService::OutputVStreams_create(grpc::ServerContext *, con
         output_params.emplace(param_proto.name(), std::move(params));
     }
 
-    auto network_group_handle = request->net_group();
+    auto network_group_handle = request->identifier().network_group_handle();
     auto client_pid = request->pid();
 
+    update_client_id_timestamp(client_pid);
+    auto &vdevice_manager = ServiceResourceManager<VDevice>::get_instance();
+    vdevice_manager.dup_handle(request->identifier().vdevice_handle(), client_pid);
+
+    auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
+    net_group_manager.dup_handle(network_group_handle, client_pid);
+
     auto lambda = [](std::shared_ptr<ConfiguredNetworkGroup> cng, const std::map<std::string, hailo_vstream_params_t> &output_params) {
             return cng->create_output_vstreams(output_params);
     };
-    auto &net_group_manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
     auto vstreams_expected = net_group_manager.execute<Expected<std::vector<OutputVStream>>>(network_group_handle, lambda, output_params);
     CHECK_EXPECTED_AS_RPC_STATUS(vstreams_expected, reply);
     auto vstreams = vstreams_expected.release();
@@ -746,7 +796,7 @@ grpc::Status HailoRtRpcService::OutputVStreams_create(grpc::ServerContext *, con
         auto handle = manager.register_resource(client_pid, make_shared_nothrow<OutputVStream>(std::move(vstreams[i])));
         reply->add_handles(handle);
     }
-    net_group_manager.dup_handle(client_pid, network_group_handle);
+
 
     reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
     return grpc::Status::OK;
@@ -755,10 +805,11 @@ grpc::Status HailoRtRpcService::OutputVStreams_create(grpc::ServerContext *, con
 grpc::Status HailoRtRpcService::OutputVStream_release(grpc::ServerContext *, const Release_Request *request,
     Release_Reply *reply)
 {
-    auto was_aborted = is_output_vstream_aborted(request->handle());
-    abort_output_vstream(request->handle());
+    auto vstream_handle = request->vstream_identifier().vstream_handle();
+    auto was_aborted = is_output_vstream_aborted(vstream_handle);
+    abort_output_vstream(vstream_handle);
     auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
-    auto resource = manager.release_resource(request->handle(), request->pid());
+    auto resource = manager.release_resource(vstream_handle, request->pid());
     auto status = HAILO_SUCCESS;
     if (resource && (!was_aborted)) {
         status = resource->resume();
@@ -778,23 +829,35 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_name(grpc::ServerContext*
             return cng->name();
     };
     auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
-    auto network_group_name = manager.execute<std::string>(request->handle(), lambda);
+    auto network_group_name = manager.execute<std::string>(request->identifier().network_group_handle(), lambda);
     reply->set_network_group_name(network_group_name);
     reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
     return grpc::Status::OK;
 }
 
+grpc::Status HailoRtRpcService::InputVStream_is_multi_planar(grpc::ServerContext*, const InputVStream_is_multi_planar_Request *request,
+        InputVStream_is_multi_planar_Reply *reply)
+{
+    auto lambda = [](std::shared_ptr<InputVStream> input_vstream) {
+            return input_vstream->is_multi_planar();
+    };
+    auto &manager = ServiceResourceManager<InputVStream>::get_instance();
+    auto multi_planar = manager.execute<bool>(request->identifier().vstream_handle(), lambda);
+
+    reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
+    reply->set_is_multi_planar(multi_planar);
+    return grpc::Status::OK;
+}
+
 grpc::Status HailoRtRpcService::InputVStream_write(grpc::ServerContext*, const InputVStream_write_Request *request,
         InputVStream_write_Reply *reply)
 {
-    auto buffer_expected = Buffer::create_shared(request->data().length());
-    CHECK_EXPECTED_AS_RPC_STATUS(buffer_expected, reply);
     std::vector<uint8_t> data(request->data().begin(), request->data().end());
     auto lambda = [](std::shared_ptr<InputVStream> input_vstream, const MemoryView &buffer) {
             return input_vstream->write(std::move(buffer));
     };
     auto &manager = ServiceResourceManager<InputVStream>::get_instance();
-    auto status = manager.execute<hailo_status>(request->handle(), lambda, MemoryView::create_const(data.data(), data.size()));
+    auto status = manager.execute<hailo_status>(request->identifier().vstream_handle(), lambda, MemoryView::create_const(data.data(), data.size()));
 
     if (HAILO_STREAM_ABORTED_BY_USER == status) {
         LOGGER__INFO("User aborted VStream write.");
@@ -806,21 +869,33 @@ grpc::Status HailoRtRpcService::InputVStream_write(grpc::ServerContext*, const I
     return grpc::Status::OK;
 }
 
-grpc::Status HailoRtRpcService::InputVStream_dup_handle(grpc::ServerContext*, const dup_handle_Request *request,
-    dup_handle_Reply *reply)
+grpc::Status HailoRtRpcService::InputVStream_write_pix(grpc::ServerContext*, const InputVStream_write_pix_Request *request,
+    InputVStream_write_pix_Reply *reply)
 {
-    auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
-    auto handle = manager.dup_handle(request->pid(), request->handle());
-    reply->set_handle(handle);
-    return grpc::Status::OK;
-}
+    hailo_pix_buffer_t pix_buffer = {};
+    pix_buffer.index = request->index();
+    pix_buffer.number_of_planes = request->number_of_planes();
+    std::vector<std::vector<uint8_t>> data_arrays;
+    data_arrays.reserve(pix_buffer.number_of_planes);
+    for (uint32_t i =0; i < pix_buffer.number_of_planes; i++) {
+        data_arrays.push_back(std::vector<uint8_t>(request->planes_data(i).begin(), request->planes_data(i).end()));
+        pix_buffer.planes[i].user_ptr = data_arrays[i].data();
+        pix_buffer.planes[i].bytes_used = static_cast<uint32_t>(data_arrays[i].size());
+    }
 
-grpc::Status HailoRtRpcService::OutputVStream_dup_handle(grpc::ServerContext*, const dup_handle_Request *request,
-    dup_handle_Reply *reply)
-{
-    auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
-    auto handle = manager.dup_handle(request->pid(), request->handle());
-    reply->set_handle(handle);
+    auto lambda = [](std::shared_ptr<InputVStream> input_vstream, const hailo_pix_buffer_t &buffer) {
+            return input_vstream->write(std::move(buffer));
+    };
+    auto &manager = ServiceResourceManager<InputVStream>::get_instance();
+    auto status = manager.execute<hailo_status>(request->identifier().vstream_handle(), lambda, pix_buffer);
+
+    if (HAILO_STREAM_ABORTED_BY_USER == status) {
+        LOGGER__INFO("User aborted VStream write.");
+        reply->set_status(static_cast<uint32_t>(HAILO_STREAM_ABORTED_BY_USER));
+        return grpc::Status::OK;
+    }
+    CHECK_SUCCESS_AS_RPC_STATUS(status,  reply, "VStream write failed");
+    reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
     return grpc::Status::OK;
 }
 
@@ -832,7 +907,7 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_get_network_infos(grpc::S
             return cng->get_network_infos();
     };
     auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
-    auto expected_network_infos = manager.execute<Expected<std::vector<hailo_network_info_t>>>(request->handle(), lambda);
+    auto expected_network_infos = manager.execute<Expected<std::vector<hailo_network_info_t>>>(request->identifier().network_group_handle(), lambda);
     CHECK_EXPECTED_AS_RPC_STATUS(expected_network_infos, reply);
     auto infos_proto = reply->mutable_network_infos();
     for (auto& info : expected_network_infos.value()) {
@@ -850,7 +925,7 @@ grpc::Status HailoRtRpcService::OutputVStream_read(grpc::ServerContext*, const O
             return output_vstream->read(std::move(buffer));
     };
     auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
-    auto status = manager.execute<hailo_status>(request->handle(), lambda, MemoryView(data.data(), data.size()));
+    auto status = manager.execute<hailo_status>(request->identifier().vstream_handle(), lambda, MemoryView(data.data(), data.size()));
     if (HAILO_STREAM_ABORTED_BY_USER == status) {
         LOGGER__INFO("User aborted VStream read.");
         reply->set_status(static_cast<uint32_t>(HAILO_STREAM_ABORTED_BY_USER));
@@ -870,7 +945,7 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_get_all_stream_infos(grpc
             return cng->get_all_stream_infos();
     };
     auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
-    auto expected_stream_infos = manager.execute<Expected<std::vector<hailo_stream_info_t>>>(request->handle(), lambda);
+    auto expected_stream_infos = manager.execute<Expected<std::vector<hailo_stream_info_t>>>(request->identifier().network_group_handle(), lambda);
     CHECK_EXPECTED_AS_RPC_STATUS(expected_stream_infos, reply);
     auto proto_stream_infos = reply->mutable_stream_infos();
     for (auto& stream_info : expected_stream_infos.value()) {
@@ -927,7 +1002,8 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_get_latency_measurement(g
             return cng->get_latency_measurement(network_name);
     };
     auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
-    auto expected_latency_result = manager.execute<Expected<LatencyMeasurementResult>>(request->handle(), lambda, request->network_name());
+    auto expected_latency_result = manager.execute<Expected<LatencyMeasurementResult>>(
+        request->identifier().network_group_handle(), lambda, request->network_name());
     if (HAILO_NOT_AVAILABLE == expected_latency_result.status()) {
         reply->set_status(static_cast<uint32_t>(HAILO_NOT_AVAILABLE));
     } else {
@@ -946,7 +1022,7 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_is_multi_context(grpc::Se
             return cng->is_multi_context();
     };
     auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
-    auto is_multi_context = manager.execute<bool>(request->handle(), lambda);
+    auto is_multi_context = manager.execute<bool>(request->identifier().network_group_handle(), lambda);
     reply->set_is_multi_context(static_cast<bool>(is_multi_context));
     reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
     return grpc::Status::OK;
@@ -960,7 +1036,7 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_get_sorted_output_names(g
             return cng->get_sorted_output_names();
     };
     auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
-    auto sorted_output_names_expected = manager.execute<Expected<std::vector<std::string>>>(request->handle(), lambda);
+    auto sorted_output_names_expected = manager.execute<Expected<std::vector<std::string>>>(request->identifier().network_group_handle(), lambda);
     CHECK_EXPECTED_AS_RPC_STATUS(sorted_output_names_expected, reply);
     auto sorted_output_names_proto = reply->mutable_sorted_output_names();
     for (auto &name : sorted_output_names_expected.value()) {
@@ -978,7 +1054,8 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_get_stream_names_from_vst
             return cng->get_stream_names_from_vstream_name(vstream_name);
     };
     auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
-    auto streams_names_expected = manager.execute<Expected<std::vector<std::string>>>(request->handle(), lambda, request->vstream_name());
+    auto streams_names_expected = manager.execute<Expected<std::vector<std::string>>>(
+        request->identifier().network_group_handle(), lambda, request->vstream_name());
     CHECK_EXPECTED_AS_RPC_STATUS(streams_names_expected, reply);
     auto streams_names_proto = reply->mutable_streams_names();
     for (auto &name : streams_names_expected.value()) {
@@ -996,7 +1073,8 @@ grpc::Status HailoRtRpcService::ConfiguredNetworkGroup_get_vstream_names_from_st
             return cng->get_vstream_names_from_stream_name(stream_name);
     };
     auto &manager = ServiceResourceManager<ConfiguredNetworkGroup>::get_instance();
-    auto vstreams_names_expected = manager.execute<Expected<std::vector<std::string>>>(request->handle(), lambda, request->stream_name());
+    auto vstreams_names_expected = manager.execute<Expected<std::vector<std::string>>>(
+        request->identifier().network_group_handle(), lambda, request->stream_name());
     CHECK_EXPECTED_AS_RPC_STATUS(vstreams_names_expected, reply);
     auto vstreams_names_proto = reply->mutable_vstreams_names();
     for (auto &name : vstreams_names_expected.value()) {
@@ -1013,7 +1091,7 @@ grpc::Status HailoRtRpcService::InputVStream_get_frame_size(grpc::ServerContext*
             return input_vstream->get_frame_size();
     };
     auto &manager = ServiceResourceManager<InputVStream>::get_instance();
-    auto frame_size = manager.execute<size_t>(request->handle(), lambda);
+    auto frame_size = manager.execute<size_t>(request->identifier().vstream_handle(), lambda);
     reply->set_frame_size(static_cast<uint32_t>(frame_size));
     reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
     return grpc::Status::OK;
@@ -1026,7 +1104,7 @@ grpc::Status HailoRtRpcService::OutputVStream_get_frame_size(grpc::ServerContext
             return output_vstream->get_frame_size();
     };
     auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
-    auto frame_size = manager.execute<size_t>(request->handle(), lambda);
+    auto frame_size = manager.execute<size_t>(request->identifier().vstream_handle(), lambda);
     reply->set_frame_size(static_cast<uint32_t>(frame_size));
     reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
     return grpc::Status::OK;
@@ -1035,12 +1113,8 @@ grpc::Status HailoRtRpcService::OutputVStream_get_frame_size(grpc::ServerContext
 grpc::Status HailoRtRpcService::InputVStream_flush(grpc::ServerContext*, const InputVStream_flush_Request *request,
     InputVStream_flush_Reply *reply)
 {
-    auto lambda = [](std::shared_ptr<InputVStream> input_vstream) {
-            return input_vstream->flush();
-    };
-    auto &manager = ServiceResourceManager<InputVStream>::get_instance();
-    auto flush_status = manager.execute<hailo_status>(request->handle(), lambda);
-    reply->set_status(static_cast<uint32_t>(flush_status));
+    auto status = flush_input_vstream(request->identifier().vstream_handle());
+    reply->set_status(status);
     return grpc::Status::OK;
 }
 
@@ -1051,7 +1125,7 @@ grpc::Status HailoRtRpcService::InputVStream_name(grpc::ServerContext*, const VS
             return input_vstream->name();
     };
     auto &manager = ServiceResourceManager<InputVStream>::get_instance();
-    auto name = manager.execute<std::string>(request->handle(), lambda);
+    auto name = manager.execute<std::string>(request->identifier().vstream_handle(), lambda);
     reply->set_name(name);
     reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
     return grpc::Status::OK;
@@ -1064,7 +1138,7 @@ grpc::Status HailoRtRpcService::OutputVStream_name(grpc::ServerContext*, const V
             return output_vstream->name();
     };
     auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
-    auto name = manager.execute<std::string>(request->handle(), lambda);
+    auto name = manager.execute<std::string>(request->identifier().vstream_handle(), lambda);
     reply->set_name(name);
     reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
     return grpc::Status::OK;
@@ -1077,7 +1151,7 @@ grpc::Status HailoRtRpcService::InputVStream_network_name(grpc::ServerContext*,
             return input_vstream->network_name();
     };
     auto &manager = ServiceResourceManager<InputVStream>::get_instance();
-    auto name = manager.execute<std::string>(request->handle(), lambda);
+    auto name = manager.execute<std::string>(request->identifier().vstream_handle(), lambda);
     reply->set_network_name(name);
     reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
     return grpc::Status::OK;
@@ -1090,7 +1164,7 @@ grpc::Status HailoRtRpcService::OutputVStream_network_name(grpc::ServerContext*,
             return output_vstream->network_name();
     };
     auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
-    auto name = manager.execute<std::string>(request->handle(), lambda);
+    auto name = manager.execute<std::string>(request->identifier().vstream_handle(), lambda);
     reply->set_network_name(name);
     reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
     return grpc::Status::OK;
@@ -1099,7 +1173,7 @@ grpc::Status HailoRtRpcService::OutputVStream_network_name(grpc::ServerContext*,
 grpc::Status HailoRtRpcService::InputVStream_abort(grpc::ServerContext*, const VStream_abort_Request *request,
     VStream_abort_Reply *reply)
 {
-    auto status = abort_input_vstream(request->handle());
+    auto status = abort_input_vstream(request->identifier().vstream_handle());
     reply->set_status(status);
     return grpc::Status::OK;
 }
@@ -1107,7 +1181,7 @@ grpc::Status HailoRtRpcService::InputVStream_abort(grpc::ServerContext*, const V
 grpc::Status HailoRtRpcService::OutputVStream_abort(grpc::ServerContext*, const VStream_abort_Request *request,
     VStream_abort_Reply *reply)
 {
-    auto status = abort_output_vstream(request->handle());
+    auto status = abort_output_vstream(request->identifier().vstream_handle());
     reply->set_status(status);
     return grpc::Status::OK;
 }
@@ -1119,7 +1193,7 @@ grpc::Status HailoRtRpcService::InputVStream_resume(grpc::ServerContext*, const
             return input_vstream->resume();
     };
     auto &manager = ServiceResourceManager<InputVStream>::get_instance();
-    auto status = manager.execute<hailo_status>(request->handle(), lambda);
+    auto status = manager.execute<hailo_status>(request->identifier().vstream_handle(), lambda);
     reply->set_status(status);
     return grpc::Status::OK;
 }
@@ -1131,7 +1205,7 @@ grpc::Status HailoRtRpcService::OutputVStream_resume(grpc::ServerContext*, const
             return output_vstream->resume();
     };
     auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
-    auto status = manager.execute<hailo_status>(request->handle(), lambda);
+    auto status = manager.execute<hailo_status>(request->identifier().vstream_handle(), lambda);
     reply->set_status(status);
     return grpc::Status::OK;
 }
@@ -1143,7 +1217,7 @@ grpc::Status HailoRtRpcService::InputVStream_stop_and_clear(grpc::ServerContext*
             return input_vstream->stop_and_clear();
     };
     auto &manager = ServiceResourceManager<InputVStream>::get_instance();
-    auto status = manager.execute<hailo_status>(request->handle(), lambda);
+    auto status = manager.execute<hailo_status>(request->identifier().vstream_handle(), lambda);
     reply->set_status(status);
     return grpc::Status::OK;
 }
@@ -1155,7 +1229,7 @@ grpc::Status HailoRtRpcService::OutputVStream_stop_and_clear(grpc::ServerContext
             return output_vstream->stop_and_clear();
     };
     auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
-    auto status = manager.execute<hailo_status>(request->handle(), lambda);
+    auto status = manager.execute<hailo_status>(request->identifier().vstream_handle(), lambda);
     reply->set_status(status);
     return grpc::Status::OK;
 }
@@ -1167,7 +1241,7 @@ grpc::Status HailoRtRpcService::InputVStream_start_vstream(grpc::ServerContext*,
             return input_vstream->start_vstream();
     };
     auto &manager = ServiceResourceManager<InputVStream>::get_instance();
-    auto status = manager.execute<hailo_status>(request->handle(), lambda);
+    auto status = manager.execute<hailo_status>(request->identifier().vstream_handle(), lambda);
     reply->set_status(status);
     return grpc::Status::OK;
 }
@@ -1179,7 +1253,7 @@ grpc::Status HailoRtRpcService::OutputVStream_start_vstream(grpc::ServerContext*
             return output_vstream->start_vstream();
     };
     auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
-    auto status = manager.execute<hailo_status>(request->handle(), lambda);
+    auto status = manager.execute<hailo_status>(request->identifier().vstream_handle(), lambda);
     reply->set_status(status);
     return grpc::Status::OK;
 }
@@ -1191,7 +1265,7 @@ grpc::Status HailoRtRpcService::InputVStream_get_user_buffer_format(grpc::Server
             return input_vstream->get_user_buffer_format();
     };
     auto &manager = ServiceResourceManager<InputVStream>::get_instance();
-    auto format = manager.execute<hailo_format_t>(request->handle(), lambda);
+    auto format = manager.execute<hailo_format_t>(request->identifier().vstream_handle(), lambda);
     reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
 
     auto proto_user_buffer_format = reply->mutable_user_buffer_format();
@@ -1209,7 +1283,7 @@ grpc::Status HailoRtRpcService::OutputVStream_get_user_buffer_format(grpc::Serve
             return output_vstream->get_user_buffer_format();
     };
     auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
-    auto format = manager.execute<hailo_format_t>(request->handle(), lambda);
+    auto format = manager.execute<hailo_format_t>(request->identifier().vstream_handle(), lambda);
     reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
 
     auto proto_user_buffer_format = reply->mutable_user_buffer_format();
@@ -1227,7 +1301,7 @@ grpc::Status HailoRtRpcService::InputVStream_get_info(grpc::ServerContext*, cons
             return input_vstream->get_info();
     };
     auto &manager = ServiceResourceManager<InputVStream>::get_instance();
-    auto info = manager.execute<hailo_vstream_info_t>(request->handle(), lambda);
+    auto info = manager.execute<hailo_vstream_info_t>(request->identifier().vstream_handle(), lambda);
     auto info_proto = reply->mutable_vstream_info();
     serialize_vstream_info(info, info_proto);
     reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
@@ -1241,38 +1315,77 @@ grpc::Status HailoRtRpcService::OutputVStream_get_info(grpc::ServerContext*, con
             return output_vstream->get_info();
     };
     auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
-    auto info = manager.execute<hailo_vstream_info_t>(request->handle(), lambda);
+    auto info = manager.execute<hailo_vstream_info_t>(request->identifier().vstream_handle(), lambda);
     auto info_proto = reply->mutable_vstream_info();
     serialize_vstream_info(info, info_proto);
     reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
     return grpc::Status::OK;
 }
 
-grpc::Status HailoRtRpcService::InputVStream_is_aborted(grpc::ServerContext*, const VStream_is_aborted_Request *request,
+grpc::Status HailoRtRpcService::OutputVStream_is_aborted(grpc::ServerContext*, const VStream_is_aborted_Request *request,
     VStream_is_aborted_Reply *reply)
 {
-    auto lambda = [](std::shared_ptr<OutputVStream> input_vstream) {
-            return input_vstream->is_aborted();
+    auto lambda = [](std::shared_ptr<OutputVStream> output_vstream) {
+            return output_vstream->is_aborted();
     };
     auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
-    auto is_aborted = manager.execute<bool>(request->handle(), lambda);
+    auto is_aborted = manager.execute<bool>(request->identifier().vstream_handle(), lambda);
     reply->set_is_aborted(is_aborted);
     reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
     return grpc::Status::OK;
 }
 
-grpc::Status HailoRtRpcService::OutputVStream_is_aborted(grpc::ServerContext*, const VStream_is_aborted_Request *request,
+grpc::Status HailoRtRpcService::InputVStream_is_aborted(grpc::ServerContext*, const VStream_is_aborted_Request *request,
     VStream_is_aborted_Reply *reply)
 {
     auto lambda = [](std::shared_ptr<InputVStream> input_vstream) {
             return input_vstream->is_aborted();
     };
     auto &manager = ServiceResourceManager<InputVStream>::get_instance();
-    auto is_aborted = manager.execute<bool>(request->handle(), lambda);
+    auto is_aborted = manager.execute<bool>(request->identifier().vstream_handle(), lambda);
     reply->set_is_aborted(is_aborted);
     reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
     return grpc::Status::OK;
 }
 
+grpc::Status HailoRtRpcService::OutputVStream_set_nms_score_threshold(grpc::ServerContext*, const VStream_set_nms_score_threshold_Request *request,
+    VStream_set_nms_score_threshold_Reply *reply)
+{
+    auto lambda = [](std::shared_ptr<OutputVStream> output_vstream, float32_t threshold) {
+            return output_vstream->set_nms_score_threshold(threshold);
+    };
+    auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
+    auto status = manager.execute<hailo_status>(request->identifier().vstream_handle(), lambda, static_cast<float32_t>(request->threshold()));
+    CHECK_SUCCESS_AS_RPC_STATUS(status,  reply, "set_nms_score_threshold failed");
+    reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
+    return grpc::Status::OK;
+}
+
+grpc::Status HailoRtRpcService::OutputVStream_set_nms_iou_threshold(grpc::ServerContext*, const VStream_set_nms_iou_threshold_Request *request,
+    VStream_set_nms_iou_threshold_Reply *reply)
+{
+    auto lambda = [](std::shared_ptr<OutputVStream> output_vstream, float32_t threshold) {
+            return output_vstream->set_nms_iou_threshold(threshold);
+    };
+    auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
+    auto status = manager.execute<hailo_status>(request->identifier().vstream_handle(), lambda, static_cast<float32_t>(request->threshold()));
+    CHECK_SUCCESS_AS_RPC_STATUS(status,  reply, "set_nms_iou_threshold failed");
+    reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
+    return grpc::Status::OK;
+}
+
+grpc::Status HailoRtRpcService::OutputVStream_set_nms_max_proposals_per_class(grpc::ServerContext*, const VStream_set_nms_max_proposals_per_class_Request *request,
+    VStream_set_nms_max_proposals_per_class_Reply *reply)
+{
+    auto lambda = [](std::shared_ptr<OutputVStream> output_vstream, uint32_t max_proposals_per_class) {
+            return output_vstream->set_nms_max_proposals_per_class(max_proposals_per_class);
+    };
+    auto &manager = ServiceResourceManager<OutputVStream>::get_instance();
+    auto status = manager.execute<hailo_status>(request->identifier().vstream_handle(), lambda, static_cast<uint32_t>(request->max_proposals_per_class()));
+    CHECK_SUCCESS_AS_RPC_STATUS(status,  reply, "set_nms_max_proposals_per_class failed");
+    reply->set_status(static_cast<uint32_t>(HAILO_SUCCESS));
+    return grpc::Status::OK;
+}
+
 }
 
index fe1e9c5241f17a7c1619a40c223b4b39b7fbfece..3dddc383b14ac6cf7f12bc47aec26df804fbb8b7 100644 (file)
@@ -39,8 +39,6 @@ public:
         empty*) override;
     virtual grpc::Status get_service_version(grpc::ServerContext *, const get_service_version_Request *request,
         get_service_version_Reply *reply) override;
-    virtual grpc::Status VDevice_dup_handle(grpc::ServerContext *ctx, const dup_handle_Request *request,
-        dup_handle_Reply*) override;
 
     virtual grpc::Status VDevice_create(grpc::ServerContext *, const VDevice_create_Request *request,
         VDevice_create_Reply *reply) override;
@@ -61,8 +59,12 @@ public:
          VStreams_create_Reply *reply) override;
     virtual grpc::Status OutputVStream_release(grpc::ServerContext *, const Release_Request *request,
         Release_Reply *reply) override;
+    virtual grpc::Status InputVStream_is_multi_planar(grpc::ServerContext*, const InputVStream_is_multi_planar_Request *request,
+        InputVStream_is_multi_planar_Reply *reply) override;
     virtual grpc::Status InputVStream_write(grpc::ServerContext*, const InputVStream_write_Request *request,
         InputVStream_write_Reply *reply) override;
+    virtual grpc::Status InputVStream_write_pix(grpc::ServerContext*, const InputVStream_write_pix_Request *request,
+        InputVStream_write_pix_Reply *reply) override;
     virtual grpc::Status OutputVStream_read(grpc::ServerContext*, const OutputVStream_read_Request *request,
         OutputVStream_read_Reply *reply) override;
     virtual grpc::Status InputVStream_get_frame_size(grpc::ServerContext*, const VStream_get_frame_size_Request *request,
@@ -95,10 +97,6 @@ public:
         VStream_get_info_Reply *reply) override;
     virtual grpc::Status OutputVStream_get_info(grpc::ServerContext*, const VStream_get_info_Request *request,
         VStream_get_info_Reply *reply) override;
-    virtual grpc::Status InputVStream_dup_handle(grpc::ServerContext *ctx, const dup_handle_Request *request,
-        dup_handle_Reply*) override;
-    virtual grpc::Status OutputVStream_dup_handle(grpc::ServerContext *ctx, const dup_handle_Request *request,
-        dup_handle_Reply*) override;
     virtual grpc::Status InputVStream_stop_and_clear(grpc::ServerContext *ctx, const VStream_stop_and_clear_Request *request,
         VStream_stop_and_clear_Reply*) override;
     virtual grpc::Status OutputVStream_stop_and_clear(grpc::ServerContext *ctx, const VStream_stop_and_clear_Request *request,
@@ -111,9 +109,15 @@ public:
         VStream_is_aborted_Reply*) override;
     virtual grpc::Status OutputVStream_is_aborted(grpc::ServerContext *ctx, const VStream_is_aborted_Request *request,
         VStream_is_aborted_Reply*) override;
+    virtual grpc::Status OutputVStream_set_nms_score_threshold(grpc::ServerContext *ctx,
+        const VStream_set_nms_score_threshold_Request *request, VStream_set_nms_score_threshold_Reply*) override;
+    virtual grpc::Status OutputVStream_set_nms_iou_threshold(grpc::ServerContext *ctx,
+        const VStream_set_nms_iou_threshold_Request *request, VStream_set_nms_iou_threshold_Reply*) override;
+    virtual grpc::Status OutputVStream_set_nms_max_proposals_per_class(grpc::ServerContext *ctx,
+        const VStream_set_nms_max_proposals_per_class_Request *request, VStream_set_nms_max_proposals_per_class_Reply*) override;
 
-    virtual grpc::Status ConfiguredNetworkGroup_dup_handle(grpc::ServerContext *ctx, const dup_handle_Request *request,
-        dup_handle_Reply*) override;
+    virtual grpc::Status ConfiguredNetworkGroup_dup_handle(grpc::ServerContext *ctx, const ConfiguredNetworkGroup_dup_handle_Request *request,
+        ConfiguredNetworkGroup_dup_handle_Reply*) override;
     virtual grpc::Status ConfiguredNetworkGroup_release(grpc::ServerContext*, const Release_Request* request,
         Release_Reply* reply) override;
     virtual grpc::Status ConfiguredNetworkGroup_make_input_vstream_params(grpc::ServerContext*,
@@ -182,6 +186,7 @@ public:
 
 private:
     void keep_alive();
+    hailo_status flush_input_vstream(uint32_t handle);
     hailo_status abort_input_vstream(uint32_t handle);
     hailo_status abort_output_vstream(uint32_t handle);
     hailo_status resume_input_vstream(uint32_t handle);
@@ -190,6 +195,7 @@ private:
     bool is_output_vstream_aborted(uint32_t handle);
     void abort_vstreams_by_pids(std::set<uint32_t> &pids);
     void remove_disconnected_clients();
+    void update_client_id_timestamp(uint32_t pid);
 
     std::mutex m_mutex;
     std::map<uint32_t, std::chrono::time_point<std::chrono::high_resolution_clock>> m_clients_pids;
index 67ee22b584cffc9068e92cdd3270ecd618f6a1aa..32305f52258df3d68ccad0466608a3b04ccd1da5 100644 (file)
@@ -3,8 +3,10 @@
 # To change an environment variable's value, follow the steps:
 # 1. Change the value of the selected environemt variable in this file
 # 2. Reload systemd unit files by running: `sudo systemctl daemon-reload`
-# 3. Enable and start service by running: `sudo systemctl enable --now hailort.service`
+# 3. Copy this file to /etc/default/hailort_service
+# 4. Enable and start service by running: `sudo systemctl enable --now hailort.service`
 
 [Service]
 HAILORT_LOGGER_PATH="/var/log/hailo"
+HAILORT_LOGGER_FLUSH_EVERY_PRINT=0
 HAILO_MONITOR=0
index 5b5930b05ca7a31b9af5b97021270b58065dde3b..bec2248b9d761caaea5c6f6f8f399c5664cf4600 100644 (file)
@@ -71,7 +71,7 @@ public:
         return index;
     }
 
-    uint32_t dup_handle(uint32_t pid, uint32_t handle)
+    uint32_t dup_handle(uint32_t handle, uint32_t pid)
     {
         std::unique_lock<std::mutex> lock(m_mutex);
         auto resource_expected = resource_lookup(handle);
index 87c0d0cad1eb42c6f623525b6fb050825d9e2ecb..8134303796d61213ca8668a6255e7d8ee84d5942 100644 (file)
@@ -31,7 +31,7 @@
 #include <sys/stat.h>
 
 void RunService() {
-    const std::string server_address = hailort::HAILORT_SERVICE_DEFAULT_ADDR;
+    const std::string server_address = hailort::HAILORT_SERVICE_ADDRESS;
     hailort::HailoRtRpcService service;
 
     grpc::ServerBuilder builder;
index 1a1c223b39ad8859756a037e97878ecddff5c582..9b416964f8ab7e4e121ca7fb32882530231da3b0 100644 (file)
@@ -46,7 +46,7 @@ std::unique_ptr<grpc::Server> g_hailort_rpc_server = nullptr;
 
 void RunService()
 {
-    const std::string server_address = hailort::HAILORT_SERVICE_DEFAULT_ADDR;
+    const std::string server_address = hailort::HAILORT_SERVICE_ADDRESS;
     hailort::HailoRtRpcService service;
 
     grpc::ServerBuilder builder;
index b5c190b5d4daee8031e6d64c8b5c6b147ba1c868..0121401ba794dc5a093b85cb3d9b25ac386eec96 100644 (file)
@@ -1,6 +1,10 @@
 cmake_minimum_required(VERSION 3.0.0)
 
 include(GNUInstallDirs)
+include(${HAILO_EXTERNALS_CMAKE_SCRIPTS}/spdlog.cmake)
+include(${HAILO_EXTERNALS_CMAKE_SCRIPTS}/json.cmake)
+include(${HAILO_EXTERNALS_CMAKE_SCRIPTS}/readerwriterqueue.cmake)
+include(${HAILO_EXTERNALS_CMAKE_SCRIPTS}/dotwriter.cmake)
 
 set(HAILORTCLI_CPP_FILES
     hailortcli.cpp
@@ -31,14 +35,13 @@ set(HAILORTCLI_CPP_FILES
     run2/network_live_track.cpp
     run2/measurement_live_track.cpp
     run2/io_wrappers.cpp
+    download_action_list_command.cpp
     )
 
 if(UNIX)
     # Unix only modules
     set(HAILORTCLI_CPP_FILES ${HAILORTCLI_CPP_FILES}
         udp_rate_limiter_command.cpp
-        # TODO: We dont compile download_action_list_command on windows, as it uses packed enums (HRT-5919)
-        download_action_list_command.cpp
         measure_nnc_performance_command.cpp
         )
 endif()
@@ -69,11 +72,13 @@ target_link_libraries(hailortcli
     spdlog::spdlog
     readerwriterqueue
     DotWriter
-    scheduler_mon_proto)
+    scheduler_mon_proto
+    profiler_proto)
 
 if(WIN32)
     target_link_libraries(hailortcli Ws2_32 Iphlpapi Shlwapi winmm.lib)
 elseif(CMAKE_SYSTEM_NAME STREQUAL QNX)
+    include(${HAILO_EXTERNALS_CMAKE_SCRIPTS}/pevents.cmake)
     target_link_libraries(hailortcli pevents)
 endif()
 target_include_directories(hailortcli
index d4a4cc952b965106eec4a7e88306401db89d070c..fdd978e70384303d62e918bc78fd1b6ec2883255 100644 (file)
@@ -35,46 +35,73 @@ DownloadActionListCommand::DownloadActionListCommand(CLI::App &parent_app) :
 
 hailo_status DownloadActionListCommand::execute(Device &device, const std::string &output_file_path,
     const ConfiguredNetworkGroupVector &network_groups, const std::string &hef_file_path)
+{
+    auto expected_action_list_json = init_json_object(device, hef_file_path);
+    CHECK_EXPECTED_AS_STATUS(expected_action_list_json);
+    auto action_list_json = expected_action_list_json.value();
+
+    auto network_groups_list_json = parse_network_groups(device, network_groups);
+    CHECK_EXPECTED_AS_STATUS(network_groups_list_json);
+    action_list_json["network_groups"] = network_groups_list_json.release();
+
+    return write_to_json(action_list_json, output_file_path);
+}
+
+hailo_status DownloadActionListCommand::execute(Device &device, std::shared_ptr<ConfiguredNetworkGroup> network_group,
+    uint16_t batch_size, ordered_json &action_list_json_param, double fps, uint32_t network_group_index)
+{
+    auto expected_network_groups_list_json = parse_network_group(device, network_group, network_group_index);
+    CHECK_EXPECTED_AS_STATUS(expected_network_groups_list_json);
+    auto network_groups_list_json = expected_network_groups_list_json.release();
+    network_groups_list_json[0]["batch_size"] = batch_size;
+    network_groups_list_json[0]["fps"] = fps;
+    action_list_json_param["runs"] += network_groups_list_json[0];
+    return HAILO_SUCCESS;
+}
+
+hailo_status DownloadActionListCommand::write_to_json(ordered_json &action_list_json_param, const std::string &output_file_path)
 {
     std::cout << "> Writing action list to '" << output_file_path << "'... ";
 
+    CHECK_SUCCESS(write_json(action_list_json_param, output_file_path));
+
+    std::cout << "done." << std::endl;
+
+    return HAILO_SUCCESS;
+}
+
+Expected<ordered_json> DownloadActionListCommand::init_json_object(Device &device, const std::string &hef_file_path)
+{
+    ordered_json action_list_json = {};
     auto curr_time = CliCommon::current_time_to_string();
-    CHECK_EXPECTED_AS_STATUS(curr_time);
+    CHECK_EXPECTED(curr_time);
 
     auto chip_arch = device.get_architecture();
-    CHECK_EXPECTED_AS_STATUS(chip_arch);
+    CHECK_EXPECTED(chip_arch);
     unsigned int clock_cycle = 0;
     // TODO - HRT-8046 Implement extended device info for hailo15
-    if (HAILO_ARCH_HAILO15 == chip_arch.value()) {
+    if (HAILO_ARCH_HAILO15H == chip_arch.value()) {
         clock_cycle = HAILO15_VPU_CORE_CPU_DEFAULT_FREQ_MHZ;
     } else {
         auto extended_info = device.get_extended_device_information();
-        CHECK_EXPECTED_AS_STATUS(extended_info);
+        CHECK_EXPECTED(extended_info);
         clock_cycle = (extended_info->neural_network_core_clock_rate / NN_CORE_TO_TIMER_FREQ_FACTOR) / MHz;
     }
 
-    ordered_json action_list_json = {
-        {"version", ACTION_LIST_FORMAT_VERSION()},
-        {"creation_time", curr_time.release()},
-        {"clock_cycle_MHz", clock_cycle},
-        {"hef", json({})}
-    };
+    action_list_json["version"] = ACTION_LIST_FORMAT_VERSION();
+    action_list_json["creation_time"] = curr_time.release();
+    action_list_json["clock_cycle_MHz"] = clock_cycle;
+    action_list_json["hef"] = json({});
 
     if (!hef_file_path.empty()) {
         auto hef_info = parse_hef_metadata(hef_file_path);
-        CHECK_EXPECTED_AS_STATUS(hef_info);
+        CHECK_EXPECTED(hef_info);
         action_list_json["hef"] = hef_info.release();
     }
 
-    auto network_groups_list_json = parse_network_groups(device, network_groups);
-    CHECK_EXPECTED_AS_STATUS(network_groups_list_json);
-    action_list_json["network_groups"] = network_groups_list_json.release();
-
-    CHECK_SUCCESS(write_json(action_list_json, output_file_path));
+    action_list_json["runs"] = ordered_json::array();
 
-    std::cout << "done." << std::endl;
-
-    return HAILO_SUCCESS;
+    return action_list_json;
 }
 
 hailo_status DownloadActionListCommand::set_batch_to_measure(Device &device, uint16_t batch_to_measure)
@@ -148,6 +175,11 @@ hailo_status DownloadActionListCommand::write_json(const ordered_json &json_obj,
 #pragma GCC diagnostic push
 #pragma GCC diagnostic error "-Wswitch-enum"
 #endif
+#if defined(_MSC_VER)
+#pragma warning(push)
+#pragma warning(error: 4061)
+#endif
+
 Expected<ordered_json> DownloadActionListCommand::parse_action_data(uint32_t base_address, uint8_t *action,
     uint32_t current_buffer_offset, uint32_t *action_length, CONTEXT_SWITCH_DEFS__ACTION_TYPE_t action_type,
     uint32_t timestamp, uint8_t sub_action_index, bool sub_action_index_set, bool *is_repeated, uint8_t *num_repeated,
@@ -284,6 +316,10 @@ Expected<ordered_json> DownloadActionListCommand::parse_action_data(uint32_t bas
             data_json = json({});
             action_length_local = 0;
             break;
+        case CONTEXT_SWITCH_DEFS__ACTION_TYPE_BURST_CREDITS_TASK_RESET:
+            data_json = json({});
+            action_length_local = 0;
+            break;
         case CONTEXT_SWITCH_DEFS__ACTION_TYPE_ACTIVATE_CFG_CHANNEL:
             data_json = *reinterpret_cast<CONTEXT_SWITCH_DEFS__activate_cfg_channel_t *>(action);
             action_length_local = sizeof(CONTEXT_SWITCH_DEFS__activate_cfg_channel_t);
@@ -316,6 +352,18 @@ Expected<ordered_json> DownloadActionListCommand::parse_action_data(uint32_t bas
             data_json = *reinterpret_cast<CONTEXT_SWITCH_DEFS__switch_lcu_batch_action_data_t *>(action);
             action_length_local = sizeof(CONTEXT_SWITCH_DEFS__switch_lcu_batch_action_data_t);
             break;
+        case CONTEXT_SWITCH_DEFS__ACTION_TYPE_CHANGE_BOUNDARY_INPUT_BATCH:
+            data_json = *reinterpret_cast<CONTEXT_SWITCH_DEFS__change_boundary_input_batch_t *>(action);
+            action_length_local = sizeof(CONTEXT_SWITCH_DEFS__change_boundary_input_batch_t);
+            break;
+        case CONTEXT_SWITCH_DEFS__ACTION_TYPE_PAUSE_VDMA_CHANNEL:
+            data_json = *reinterpret_cast<CONTEXT_SWITCH_DEFS__pause_vdma_channel_action_data_t *>(action);
+            action_length_local = sizeof(CONTEXT_SWITCH_DEFS__pause_vdma_channel_action_data_t);
+            break;
+        case CONTEXT_SWITCH_DEFS__ACTION_TYPE_RESUME_VDMA_CHANNEL:
+            data_json = *reinterpret_cast<CONTEXT_SWITCH_DEFS__resume_vdma_channel_action_data_t *>(action);
+            action_length_local = sizeof(CONTEXT_SWITCH_DEFS__resume_vdma_channel_action_data_t);
+            break;
         case CONTEXT_SWITCH_DEFS__ACTION_TYPE_COUNT:
             // Fallthrough
             // Handling CONTEXT_SWITCH_DEFS__ACTION_TYPE_COUNT is needed because we compile this file with -Wswitch-enum
@@ -330,6 +378,9 @@ Expected<ordered_json> DownloadActionListCommand::parse_action_data(uint32_t bas
 #if defined(__GNUC__)
 #pragma GCC diagnostic pop
 #endif
+#if defined(_MSC_VER)
+#pragma warning(pop)
+#endif
 
 Expected<ordered_json> DownloadActionListCommand::parse_single_repeated_action(uint32_t base_address,
     uint8_t *action, uint32_t current_buffer_offset, uint32_t *action_length,
@@ -362,10 +413,11 @@ Expected<ordered_json> DownloadActionListCommand::parse_single_action(uint32_t b
 Expected<ordered_json> DownloadActionListCommand::parse_context(Device &device, uint32_t network_group_id,
     CONTROL_PROTOCOL__context_switch_context_type_t context_type, uint8_t context_index, const std::string &context_name)
 {
+    uint8_t converted_context_type = static_cast<uint8_t>(context_type);
     uint32_t action_list_base_address = 0;
     uint32_t batch_counter = 0;
 
-    auto action_list = device.download_context_action_list(network_group_id, context_type, context_index,
+    auto action_list = device.download_context_action_list(network_group_id, converted_context_type, context_index,
         &action_list_base_address, &batch_counter);
     CHECK_EXPECTED(action_list);
     // Needs to fit in 2 bytes due to firmware limitation of action list size
@@ -424,56 +476,67 @@ Expected<ordered_json> DownloadActionListCommand::parse_network_groups(Device &d
     const auto number_of_dynamic_contexts_per_network_group = device.get_number_of_dynamic_contexts_per_network_group();
     CHECK_EXPECTED(number_of_dynamic_contexts_per_network_group);
 
+    auto number_of_network_groups = (uint32_t)number_of_dynamic_contexts_per_network_group->size();
     ordered_json network_group_list_json;
-    for (uint32_t network_group_index = 0; network_group_index < number_of_dynamic_contexts_per_network_group->size(); network_group_index++) {
-        // TODO: HRT-8147 use the real network_group_id instead of network_group_index
-        const uint32_t network_group_id = network_group_index;
-
-        // TODO: network_group_name via Hef::get_network_groups_names (HRT-5997)
-        ordered_json network_group_json = {
-            {"mean_activation_time_ms", INVALID_NUMERIC_VALUE},
-            {"mean_deactivation_time_ms", INVALID_NUMERIC_VALUE},
-            {"network_group_id", network_group_id},
-            {"contexts", json::array()}
-        };
-        // We assume the the order of the network_groups in the ConfiguredNetworkGroupVector and in the action_list
-        // downloaded from the fw is the same. If the received ConfiguredNetworkGroupVector is empty, we leave the 
-        // mean_de/activation_time_ms with their default values (INVALID_NUMERIC_VALUE).
-        if (network_groups.size() > network_group_index) {
-            network_group_json["mean_activation_time_ms"] = get_accumulator_mean_value(
-                network_groups[network_group_index]->get_activation_time_accumulator());
-            network_group_json["mean_deactivation_time_ms"] = get_accumulator_mean_value(
-                network_groups[network_group_index]->get_deactivation_time_accumulator());
-        }
+    for (uint32_t network_group_index = 0; network_group_index < number_of_network_groups; network_group_index++) {
+        auto &network_group = (network_group_index < network_groups.size()) ? network_groups[network_group_index] : nullptr;
+        auto expected_json_file = parse_network_group(device, network_group, network_group_index);
+        CHECK_EXPECTED(expected_json_file);
+        network_group_list_json.emplace_back(expected_json_file.value());
+    }
+    return network_group_list_json;
+}
 
-        auto activation_context_json = parse_context(device, network_group_id,
-            CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_ACTIVATION, 0, "activation");
-        CHECK_EXPECTED(activation_context_json);
-        network_group_json["contexts"].emplace_back(activation_context_json.release());
+Expected<ordered_json> DownloadActionListCommand::parse_network_group(Device &device, const std::shared_ptr<ConfiguredNetworkGroup> network_group, uint32_t network_group_id)
+{
+    const auto number_of_dynamic_contexts_per_network_group = device.get_number_of_dynamic_contexts_per_network_group();
+    CHECK_EXPECTED(number_of_dynamic_contexts_per_network_group);
 
-        auto preliminary_context_json = parse_context(device, network_group_id,
-            CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_PRELIMINARY, 0, "preliminary");
-        CHECK_EXPECTED(preliminary_context_json);
-        network_group_json["contexts"].emplace_back(preliminary_context_json.release());
+    ordered_json network_group_list_json;
+    // TODO: network_group_name via Hef::get_network_groups_names (HRT-5997)
+    ordered_json network_group_json = {
+        {"batch_size", INVALID_NUMERIC_VALUE},
+        {"mean_activation_time_ms", INVALID_NUMERIC_VALUE},
+        {"mean_deactivation_time_ms", INVALID_NUMERIC_VALUE},
+        {"network_group_id", network_group_id},
+        {"fps", INVALID_NUMERIC_VALUE},
+        {"contexts", json::array()}
+    };
 
-        const auto dynamic_contexts_count = number_of_dynamic_contexts_per_network_group.value()[network_group_index];
-        for (uint8_t context_index = 0; context_index < dynamic_contexts_count; context_index++) {
-            auto context_json = parse_context(device, network_group_id,
-                CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_DYNAMIC, context_index,
-                fmt::format("dynamic_{}", context_index));
-            CHECK_EXPECTED(context_json);
+    if(network_group != nullptr) {
+        network_group_json["mean_activation_time_ms"] = get_accumulator_mean_value(
+            network_group->get_activation_time_accumulator());
+        network_group_json["mean_deactivation_time_ms"] = get_accumulator_mean_value(
+            network_group->get_deactivation_time_accumulator());
+    }
 
-            network_group_json["contexts"].emplace_back(context_json.release());
-        }
+    auto activation_context_json = parse_context(device, network_group_id,
+        CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_ACTIVATION, 0, "activation");
+    CHECK_EXPECTED(activation_context_json);
+    network_group_json["contexts"].emplace_back(activation_context_json.release());
 
-        auto batch_switching_context_json = parse_context(device, network_group_id,
-            CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_BATCH_SWITCHING, 0, "batch_switching");
-        CHECK_EXPECTED(batch_switching_context_json);
-        network_group_json["contexts"].emplace_back(batch_switching_context_json.release());
+    auto preliminary_context_json = parse_context(device, network_group_id,
+        CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_PRELIMINARY, 0, "preliminary");
+    CHECK_EXPECTED(preliminary_context_json);
+    network_group_json["contexts"].emplace_back(preliminary_context_json.release());
 
-        network_group_list_json.emplace_back(network_group_json);
+    const auto dynamic_contexts_count = number_of_dynamic_contexts_per_network_group.value()[network_group_id];
+    for (uint8_t context_index = 0; context_index < dynamic_contexts_count; context_index++) {
+        auto context_json = parse_context(device, network_group_id,
+            CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_DYNAMIC, context_index,
+            fmt::format("dynamic_{}", context_index));
+        CHECK_EXPECTED(context_json);
+
+        network_group_json["contexts"].emplace_back(context_json.release());
     }
 
+    auto batch_switching_context_json = parse_context(device, network_group_id,
+        CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_BATCH_SWITCHING, 0, "batch_switching");
+    CHECK_EXPECTED(batch_switching_context_json);
+    network_group_json["contexts"].emplace_back(batch_switching_context_json.release());
+
+    network_group_list_json.emplace_back(network_group_json);
+
     return network_group_list_json;
 }
 
@@ -639,3 +702,18 @@ void to_json(json& j, const CONTEXT_SWITCH_DEFS__switch_lcu_batch_action_data_t&
     j = json{{"cluster_index", cluster_index}, {"lcu_index", lcu_index}, {"network_index", network_index},
         {"kernel_done_count", kernel_done_count}};
 }
+
+void to_json(json &j, const CONTEXT_SWITCH_DEFS__pause_vdma_channel_action_data_t &data)
+{
+    j = unpack_vdma_channel_id(data);
+}
+
+void to_json(json &j, const CONTEXT_SWITCH_DEFS__resume_vdma_channel_action_data_t &data)
+{
+    j = unpack_vdma_channel_id(data);
+}
+
+void to_json(json &j, const CONTEXT_SWITCH_DEFS__change_boundary_input_batch_t &data)
+{
+    j = unpack_vdma_channel_id(data);
+}
index 4a4bb6f4c83dc118111d085d5306fdcb0a69bb62..ede0ac658663914dc080d37dfea9cb0f6b6c9011 100644 (file)
@@ -29,6 +29,10 @@ public:
     // To be used from external commands
     static hailo_status execute(Device &device, const std::string &output_file_path,
         const ConfiguredNetworkGroupVector &network_groups={}, const std::string &hef_file_path="");
+    static hailo_status execute(Device &device, std::shared_ptr<ConfiguredNetworkGroup> network_group,
+        uint16_t batch_size, ordered_json &action_list_json, double fps, uint32_t network_group_index);
+    static hailo_status write_to_json(ordered_json &action_list_json_param, const std::string &output_file_path);
+    static Expected<ordered_json> init_json_object(Device &device, const std::string &hef_file_path);
     static hailo_status set_batch_to_measure(Device &device, uint16_t batch_to_measure);
 
 protected:
@@ -38,7 +42,7 @@ private:
     std::string m_output_file_path;
     static constexpr int DEFAULT_JSON_TAB_WIDTH = 4;
     static constexpr int INVALID_NUMERIC_VALUE = -1;
-    static std::string ACTION_LIST_FORMAT_VERSION() { return "1.0"; }
+    static std::string ACTION_LIST_FORMAT_VERSION() { return "2.0"; }
 
     static Expected<ordered_json> parse_hef_metadata(const std::string &hef_file_path);
     static bool is_valid_hef(const std::string &hef_file_path);
@@ -61,6 +65,8 @@ private:
         const std::string &context_name);
     static double get_accumulator_mean_value(const AccumulatorPtr &accumulator, double default_value = INVALID_NUMERIC_VALUE);
     static Expected<ordered_json> parse_network_groups(Device &device, const ConfiguredNetworkGroupVector &network_groups);
+    static Expected<ordered_json> parse_network_group(Device &device,
+        const std::shared_ptr<ConfiguredNetworkGroup> network_group, uint32_t network_group_id);
 };
 
 // JSON serialization
@@ -84,6 +90,7 @@ static std::pair<CONTEXT_SWITCH_DEFS__ACTION_TYPE_t, std::string> mapping[] = {
     {CONTEXT_SWITCH_DEFS__ACTION_TYPE_ADD_DDR_PAIR_INFO, "add_ddr_pair_info"},
     {CONTEXT_SWITCH_DEFS__ACTION_TYPE_DDR_BUFFERING_START, "ddr_buffering_start"},
     {CONTEXT_SWITCH_DEFS__ACTION_TYPE_BURST_CREDITS_TASK_START, "burst_credits_task_start"},
+    {CONTEXT_SWITCH_DEFS__ACTION_TYPE_BURST_CREDITS_TASK_RESET, "burst_credits_task_reset"},
     {CONTEXT_SWITCH_DEFS__ACTION_TYPE_LCU_INTERRUPT, "lcu_interrupt"},
     {CONTEXT_SWITCH_DEFS__ACTION_TYPE_SEQUENCER_DONE_INTERRUPT, "sequencer_done_interrupt"},
     {CONTEXT_SWITCH_DEFS__ACTION_TYPE_INPUT_CHANNEL_TRANSFER_DONE_INTERRUPT, "input_channel_transfer_done_interrupt"},
@@ -102,6 +109,9 @@ static std::pair<CONTEXT_SWITCH_DEFS__ACTION_TYPE_t, std::string> mapping[] = {
     {CONTEXT_SWITCH_DEFS__ACTION_TYPE_ENABLE_NMS, "enable_nms"},
     {CONTEXT_SWITCH_DEFS__ACTION_TYPE_WRITE_DATA_BY_TYPE, "write_data_by_type"},
     {CONTEXT_SWITCH_DEFS__ACTION_TYPE_SWITCH_LCU_BATCH, "switch_lcu_batch"},
+    {CONTEXT_SWITCH_DEFS__ACTION_TYPE_CHANGE_BOUNDARY_INPUT_BATCH, "change boundary input batch"},
+    {CONTEXT_SWITCH_DEFS__ACTION_TYPE_PAUSE_VDMA_CHANNEL, "pause vdma channel"},
+    {CONTEXT_SWITCH_DEFS__ACTION_TYPE_RESUME_VDMA_CHANNEL, "resume vdma channel"},
 };
 static_assert(ARRAY_ENTRIES(mapping) == CONTEXT_SWITCH_DEFS__ACTION_TYPE_COUNT,
     "Missing a mapping from a CONTEXT_SWITCH_DEFS__ACTION_TYPE_t to it's string value");
@@ -142,5 +152,8 @@ void to_json(json &j, const CONTEXT_SWITCH_DEFS__add_ddr_pair_info_action_data_t
 void to_json(json &j, const CONTEXT_SWITCH_DEFS__open_boundary_input_channel_data_t &data);
 void to_json(json &j, const CONTEXT_SWITCH_DEFS__open_boundary_output_channel_data_t &data);
 void to_json(json &j, const CONTEXT_SWITCH_DEFS__switch_lcu_batch_action_data_t &data);
+void to_json(json &j, const CONTEXT_SWITCH_DEFS__pause_vdma_channel_action_data_t &data);
+void to_json(json &j, const CONTEXT_SWITCH_DEFS__resume_vdma_channel_action_data_t &data);
+void to_json(json &j, const CONTEXT_SWITCH_DEFS__change_boundary_input_batch_t &data);
 
 #endif /* _HAILO_DOWNLOAD_ACTION_LIST_COMMAND_HPP_ */
index e4b595fb31787331ac137dc876d7bc1e6c7159b8..f249238f2ff414a8568fc7373ed92b341ad082de 100644 (file)
@@ -138,8 +138,10 @@ static std::string identity_arch_string(const hailo_device_identity_t &identity)
         return "HAILO8";\r
     case HAILO_ARCH_HAILO8L:\r
         return "HAILO8L";\r
-    case HAILO_ARCH_HAILO15:\r
-        return "HAILO15";\r
+    case HAILO_ARCH_HAILO15H:\r
+        return "HAILO15H";\r
+    case HAILO_ARCH_PLUTO:\r
+        return "PLUTO";\r
     default:\r
         return "Unknown";\r
     }\r
@@ -230,8 +232,5 @@ FwControlCommand::FwControlCommand(CLI::App &parent_app) :
     add_subcommand<FwControlIdentifyCommand>();\r
     add_subcommand<FwControlResetCommand>();\r
     add_subcommand<FwControlTestMemoriesCommand>();\r
-    // TODO: Support on windows (HRT-5919)\r
-    #if defined(__GNUC__)\r
     add_subcommand<DownloadActionListCommand>();\r
-    #endif\r
 }\r
index d475bd3d18cde457fa18b2434a6cfae2f501c19e..67df9682819d3addda2c11263ede5e219c3225db 100644 (file)
 \r
 #include "hailortcli.hpp"\r
 #include "command.hpp"\r
-#if defined(__GNUC__)\r
-// TODO: Support on windows (HRT-5919)\r
 #include "download_action_list_command.hpp"\r
-#endif\r
 \r
 class FwControlIdentifyCommand : public DeviceCommand {\r
 public:\r
index 8e0ef96876adccfb4f764a39939fc59fad18d425..b9325fa1e1a686c1e3781ebc46636c9462072e34 100644 (file)
@@ -215,7 +215,7 @@ private:
 
 inline void hailo_deprecate_options(CLI::App *app, const std::vector<DeprecationActionPtr> &actions, bool set_footer = true)
 {
-    // std::set and not std::vector in case two actions have the smae deprection string
+    // std::set and not std::vector in case two actions have the same deprecation string
     std::set<std::string> deprecation_messages;
     for (const auto& deprecation_action : actions) {
         deprecation_messages.insert(deprecation_action->deprecate(!set_footer));
index ef9b43225974eefc864b3ee91fbfb64912932471..ee7d6d052fb216660ee2c40371f342f589115d20 100644 (file)
@@ -28,13 +28,15 @@ Expected<std::shared_ptr<InferProgress>> InferProgress::create(const inference_r
 InferProgress::InferProgress(const inference_runner_params &params,
     std::chrono::milliseconds print_interval, hailo_status &status) :
       m_params(params), m_print_interval(print_interval), m_networks_progress(),
-      m_stop_event(Event::create_shared(Event::State::not_signalled)), m_finished(false)
+      m_stop_event(), m_finished(false)
 {
-    if (nullptr == m_stop_event) {
+    auto event_exp = Event::create_shared(Event::State::not_signalled);
+    if (!event_exp) {
         LOGGER__ERROR("Failed to create event for progress bar");
-        status = HAILO_OUT_OF_HOST_MEMORY;
+        status = event_exp.status();
         return;
     }
+    m_stop_event = event_exp.release(); 
     status = HAILO_SUCCESS;
 }
 
index 84dce254a6743f79a8379b78cf4dddeb46327a93..cb75fa62f69e3f8a4ebf95239193f7b2947d0748 100644 (file)
@@ -30,6 +30,7 @@ constexpr size_t DEVICE_ID_WIDTH = STRING_WIDTH;
 constexpr size_t STREAM_NAME_WIDTH = STRING_WIDTH;
 constexpr size_t UTILIZATION_WIDTH = 25;
 constexpr size_t NUMBER_WIDTH = 15;
+constexpr size_t FRAME_VALUE_WIDTH = 8;
 constexpr size_t TERMINAL_DEFAULT_WIDTH = 80;
 constexpr size_t LINE_LENGTH = NETWORK_GROUP_NAME_WIDTH + STREAM_NAME_WIDTH + UTILIZATION_WIDTH + NUMBER_WIDTH;
 constexpr std::chrono::milliseconds EPSILON_TIME(500);
@@ -115,15 +116,23 @@ void MonCommand::print_networks_info_table(const ProtoMon &mon_message)
 
 void MonCommand::print_frames_header()
 {
-    std::cout << 
+    std::cout <<
         std::setw(STRING_WIDTH) << std::left << "Model" <<
         std::setw(STRING_WIDTH) << std::left << "Stream" <<
-        std::setw(NUMBER_WIDTH) << std::left << "Direction" << 
-        std::setw(NUMBER_WIDTH) << std::left << "Frames" << 
-        "\n" << std::left << std::string(LINE_LENGTH, '-') << "\n";
+        std::setw(NUMBER_WIDTH) << std::left << "Direction" <<
+        std::setw(3 * FRAME_VALUE_WIDTH - 2) << std::internal << "Frames Queue" <<
+        "\n" <<
+        std::setw(STRING_WIDTH) << std::left << "" <<
+        std::setw(STRING_WIDTH) << std::left << "" <<
+        std::setw(NUMBER_WIDTH) << std::left << "" <<
+        std::setw(FRAME_VALUE_WIDTH) << "Avg" <<
+        std::setw(FRAME_VALUE_WIDTH) << "Max" <<
+        std::setw(FRAME_VALUE_WIDTH) << "Min" <<
+        std::setw(FRAME_VALUE_WIDTH) << "Capacity" <<
+        "\n" << std::left << std::string(LINE_LENGTH + NUMBER_WIDTH, '-') << "\n";
 }
 
-void MonCommand::print_frames_table(const ProtoMon &mon_message)
+hailo_status MonCommand::print_frames_table(const ProtoMon &mon_message)
 {
     for (const auto &net_info : mon_message.net_frames_infos()) {
         auto &original_net_name = net_info.network_name();
@@ -133,20 +142,40 @@ void MonCommand::print_frames_table(const ProtoMon &mon_message)
             auto stream_name = truncate_str(stream_name_original, STREAM_NAME_WIDTH);
             auto stream_direction = (streams_frames.stream_direction() == PROTO__STREAM_DIRECTION__HOST_TO_DEVICE) ? "H2D" : "D2H";
 
-            std::string frames;
+            std::string max_frames, min_frames, queue_size;
+            double avg_frames;
             if (SCHEDULER_MON_NAN_VAL == streams_frames.buffer_frames_size() || SCHEDULER_MON_NAN_VAL == streams_frames.pending_frames_count()) {
-                frames = "NaN";
+                avg_frames = -1;
+                max_frames = "NaN";
+                min_frames = "NaN";
+                queue_size = "NaN";
+            } else {
+                avg_frames = streams_frames.avg_pending_frames_count();
+                max_frames = std::to_string(streams_frames.max_pending_frames_count());
+                min_frames = std::to_string(streams_frames.min_pending_frames_count());
+                queue_size = std::to_string(streams_frames.buffer_frames_size());
+            }
+
+            std::string avg_frames_str;
+            if (avg_frames == -1) {
+                avg_frames_str = "NaN";
             } else {
-                frames = std::to_string(streams_frames.pending_frames_count()) + "/" + std::to_string(streams_frames.buffer_frames_size());
+                std::stringstream ss;
+                ss << std::fixed << std::setprecision(2) << avg_frames;
+                avg_frames_str = ss.str();
             }
-            
-            std::cout << 
+
+            std::cout <<
                 std::setw(STRING_WIDTH) << std::left << net_name <<
                 std::setw(STRING_WIDTH) << std::left << stream_name <<
-                std::setw(NUMBER_WIDTH) << std::left << stream_direction << 
-                std::setw(NUMBER_WIDTH) << std::left << frames << "\n";
+                std::setw(NUMBER_WIDTH) << std::left << stream_direction <<
+                std::setw(FRAME_VALUE_WIDTH) << std::left << avg_frames_str <<
+                std::setw(FRAME_VALUE_WIDTH) << std::left << max_frames <<
+                std::setw(FRAME_VALUE_WIDTH) << std::left << min_frames <<
+                std::setw(FRAME_VALUE_WIDTH) << std::left << queue_size << "\n";
         }
     }
+    return HAILO_SUCCESS;
 }
 
 #if defined(__GNUC__)
@@ -163,7 +192,7 @@ Expected<uint16_t> get_terminal_line_width()
     return terminal_line_width;
 }
 
-void MonCommand::print_tables(const std::vector<ProtoMon> &mon_messages, uint32_t terminal_line_width)
+hailo_status MonCommand::print_tables(const std::vector<ProtoMon> &mon_messages, uint32_t terminal_line_width)
 {
     print_devices_info_header();
     for (const auto &mon_message : mon_messages) {
@@ -184,8 +213,9 @@ void MonCommand::print_tables(const std::vector<ProtoMon> &mon_messages, uint32_
 
     print_frames_header();
     for (const auto &mon_message : mon_messages) {
-        print_frames_table(mon_message);
+        CHECK_SUCCESS(print_frames_table(mon_message));
     }
+    return HAILO_SUCCESS;
 }
 
 static volatile bool keep_running = true;
@@ -235,7 +265,7 @@ hailo_status MonCommand::run_monitor()
             }
         }
 
-        print_tables(mon_messages, terminal_line_width);
+        CHECK_SUCCESS(print_tables(mon_messages, terminal_line_width));
         if (print_warning_msg) {
             std::cout << FORMAT_GREEN_PRINT << "Monitor did not retrieve any files. This occurs when there is no application currently running.\n"
             << "If this is not the case, verify that environment variable '" << SCHEDULER_MON_ENV_VAR << "' is set to 1.\n" << FORMAT_NORMAL_PRINT;
index 653076a3e09b77449dd2abcc3cbca5846f66c01c..b5a0cf9242476abd63056e04c77bce50004733ab 100644 (file)
@@ -15,6 +15,7 @@
 #include "hailortcli.hpp"
 #include "command.hpp"
 #include "utils/profiler/monitor_handler.hpp"
+#include "common/runtime_statistics_internal.hpp"
 
 #include "CLI/CLI.hpp"
 
@@ -30,13 +31,13 @@ public:
 
 private:
     hailo_status run_monitor();
-    void print_tables(const std::vector<ProtoMon> &mon_messages, uint32_t terminal_line_width);
+    hailo_status print_tables(const std::vector<ProtoMon> &mon_messages, uint32_t terminal_line_width);
     void print_devices_info_header();
     void print_networks_info_header();
     void print_frames_header();
     void print_devices_info_table(const ProtoMon &mon_message);
     void print_networks_info_table(const ProtoMon &mon_message);
-    void print_frames_table(const ProtoMon &mon_message);
+    hailo_status print_frames_table(const ProtoMon &mon_message);
     hailo_status run_in_alternative_terminal();
 };
 
index d43767524464e702dace13beb23a379c74bcf4ab..7a0f1da8e8759f9d45adfd87b04231df094f2bc9 100644 (file)
@@ -44,8 +44,10 @@ private:
 };
 
 // Wrapper for InputStream or InputVStream objects.
+// We use std::enable_from_this because on async api the callback is using `this`. We want to increase the reference
+// count until the callback is over.
 template<typename Writer>
-class WriterWrapper final
+class WriterWrapper final : public std::enable_shared_from_this<WriterWrapper<Writer>>
 {
 public:
     template<typename WriterParams>
@@ -85,8 +87,12 @@ public:
     hailo_status write_async(typename Writer::TransferDoneCallback callback)
     {
         before_write_start();
-        // We can use the same buffer for multiple writes simultaneously. That is OK since we don't modify the buffers.
-        auto status = get().write_async(MemoryView(*next_buffer()), callback);
+        auto self = std::enable_shared_from_this<WriterWrapper<Writer>>::shared_from_this();
+        auto status = get().write_async(MemoryView(*next_buffer()),
+            [self, original=callback](const typename Writer::CompletionInfo &completion_info) {
+                (void)self; // Keeping self here so the buffer won't be deleted until the callback is called.
+                original(completion_info);
+            });
         if (HAILO_SUCCESS != status) {
             return status;
         }
@@ -150,9 +156,6 @@ private:
             "Input file ({}) size {} must be a multiple of the frame size {}",
             file_path, buffer->size(), frame_size);
 
-        auto buffer_ptr = make_shared_nothrow<Buffer>(buffer.release());
-        CHECK_NOT_NULL_AS_EXPECTED(buffer_ptr, HAILO_OUT_OF_HOST_MEMORY);
-
         std::vector<BufferPtr> dataset;
         const size_t frames_count = buffer->size() / frame_size;
         dataset.reserve(frames_count);
index fe2f98c7d5f7857d3a8d07c9daa74b2ed5875138..b68799fcb2e3709bf28c05a7919828a927662d95 100644 (file)
@@ -17,6 +17,8 @@
 
 using namespace hailort;
 
+const uint8_t NETWORK_STATS_LEVEL = 1;
+
 hailo_status LiveStats::Track::start()
 {
     CHECK_SUCCESS(start_impl());
@@ -40,15 +42,25 @@ void LiveStats::Track::push_json(nlohmann::ordered_json &json)
     push_json_impl(json);
 }
 
+Expected<double> LiveStats::Track::get_last_measured_fps()
+{
+    // This virtual getter is supported only for the derived class NetworkLiveTrack
+    return make_unexpected(HAILO_NOT_AVAILABLE);
+}
+
+
 LiveStats::LiveStats(std::chrono::milliseconds interval) :
     m_running(false),
     m_interval(interval),
-    m_stop_event(Event::create_shared(Event::State::not_signalled)),
+    m_stop_event(),
     m_tracks(),
     m_mutex(),
     m_prev_count(0),
     m_enable_ansi_escape_sequences(CursorAdjustment())
 {
+    auto event_exp = Event::create_shared(Event::State::not_signalled);
+    assert(event_exp);
+    m_stop_event = event_exp.release();
 }
 
 LiveStats::~LiveStats()
@@ -114,6 +126,20 @@ hailo_status LiveStats::dump_stats(const std::string &json_path, const std::stri
     return HAILO_SUCCESS;
 }
 
+Expected<std::vector<double>> LiveStats::get_last_measured_fps_per_network_group()
+{
+    std::vector<double> last_measured_fpss;
+    CHECK_AS_EXPECTED(contains(m_tracks, NETWORK_STATS_LEVEL), HAILO_NOT_AVAILABLE);
+
+    for (size_t network_stats_track_index = 0; network_stats_track_index < m_tracks[NETWORK_STATS_LEVEL].size(); network_stats_track_index++) {
+        auto expected_fps = m_tracks[NETWORK_STATS_LEVEL][network_stats_track_index]->get_last_measured_fps();
+        CHECK_EXPECTED(expected_fps);
+        last_measured_fpss.emplace_back(expected_fps.release());
+    }
+
+    return last_measured_fpss;
+}
+
 hailo_status LiveStats::start()
 {
     // In order to re-start LiveStats, we should add m_stop_event->reset() here
index 6c1b4eac295da08bed69ceac8ea49e5c3afc1b4d..7d28905c6ad6da83f7acddbf1ce5cc33b48dd30b 100644 (file)
@@ -12,6 +12,8 @@
 
 #include "common/os_utils.hpp"
 #include "hailo/event.hpp"
+#include "hailo/expected.hpp"
+
 #include <nlohmann/json.hpp>
 #include <stdint.h>
 #include <chrono>
@@ -32,6 +34,7 @@ public:
         hailo_status start();
         uint32_t push_text(std::stringstream &ss);
         void push_json(nlohmann::ordered_json &json);
+        virtual hailort::Expected<double> get_last_measured_fps();
 
     protected:
         virtual hailo_status start_impl() = 0;
@@ -48,6 +51,7 @@ public:
     hailo_status dump_stats(const std::string &json_path, const std::string &inference_mode);
     hailo_status start();
     void stop();
+    hailort::Expected<std::vector<double>> get_last_measured_fps_per_network_group();
 
 private:
     bool m_running;
index ae5901803f9371a4df1faf9ec8672dad7e17a8b0..bfbd4a2a0efa823b6875abdeaa10b2764d64e78f 100644 (file)
@@ -24,7 +24,8 @@ NetworkLiveTrack::NetworkLiveTrack(const std::string &name, std::shared_ptr<Conf
     m_cng(cng),
     m_overall_latency_meter(overall_latency_meter),
     m_measure_fps(measure_fps),
-    m_hef_path(hef_path)
+    m_hef_path(hef_path),
+    m_last_measured_fps(0)
 {
     std::lock_guard<std::mutex> lock(mutex);
     max_ng_name = std::max(m_name.size(), max_ng_name);
@@ -43,9 +44,15 @@ double NetworkLiveTrack::get_fps()
     auto elapsed_time = std::chrono::steady_clock::now() - m_last_get_time;
     auto count = m_count.load();
     auto fps = count / std::chrono::duration<double>(elapsed_time).count();
+    m_last_measured_fps = fps;
     return fps;
 }
 
+Expected<double> NetworkLiveTrack::get_last_measured_fps()
+{
+    return Expected<double>(m_last_measured_fps);
+}
+
 uint32_t NetworkLiveTrack::push_text_impl(std::stringstream &ss)
 {
     ss << fmt::format("{}:", m_name);
index ba3138cd5931dd6a72b545d8e293d013fc5acdb5..69516907c6316e4c9101336b312f95a0b4233d3b 100644 (file)
@@ -32,6 +32,8 @@ public:
 
     void progress();
 
+    hailort::Expected<double> get_last_measured_fps();
+
 private:
     double get_fps();
 
@@ -45,6 +47,8 @@ private:
     hailort::LatencyMeterPtr m_overall_latency_meter;
     const bool m_measure_fps;
     const std::string &m_hef_path;
+
+    double m_last_measured_fps;
 };
 
 #endif /* _HAILO_HAILORTCLI_RUN2_NETWORK_LIVE_TRACK_HPP_ */
\ No newline at end of file
index f095b1defb29a9b909dbd63f42f666a514be842e..96d2913f00556c7e6cf06c800bcd97065177f1b1 100644 (file)
@@ -100,7 +100,8 @@ NetworkRunner::NetworkRunner(const NetworkParams &params, const std::string &nam
     m_name(name),
     m_cng(cng),
     m_overall_latency_meter(nullptr),
-    m_latency_barrier(nullptr)
+    m_latency_barrier(nullptr),
+    m_last_measured_fps(0)
 {
 }
 
@@ -134,7 +135,7 @@ Expected<std::shared_ptr<NetworkRunner>> NetworkRunner::create_shared(VDevice &v
         for (auto &stream_name_params_pair : cfg_params->stream_params_by_name) {
             stream_name_params_pair.second.flags = HAILO_STREAM_FLAGS_ASYNC;
         }
-    }
+    } 
     auto cfgr_net_groups = vdevice.configure(hef.value(), {{net_group_name, cfg_params.value()}});
     CHECK_EXPECTED(cfgr_net_groups);
     assert(1 == cfgr_net_groups->size());
@@ -262,6 +263,32 @@ void NetworkRunner::set_latency_barrier(BarrierPtr latency_barrier)
     m_latency_barrier = latency_barrier;
 }
 
+std::shared_ptr<ConfiguredNetworkGroup> NetworkRunner::get_configured_network_group()
+{
+    return m_cng;
+}
+
+void NetworkRunner::set_last_measured_fps(double fps)
+{
+    m_last_measured_fps = fps;
+}
+
+double NetworkRunner::get_last_measured_fps()
+{
+    return m_last_measured_fps;
+}
+
+hailo_vstream_params_t update_quantize_flag_in_vstream_param(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &old_vstream_params)
+{
+    hailo_vstream_params_t res = old_vstream_params;
+    if ((HAILO_FORMAT_TYPE_FLOAT32 == old_vstream_params.user_buffer_format.type) || (HailoRTCommon::is_nms(vstream_info))) {
+        res.user_buffer_format.flags &= (~HAILO_FORMAT_FLAGS_QUANTIZED);
+    } else {
+        res.user_buffer_format.flags |= (HAILO_FORMAT_FLAGS_QUANTIZED);
+    }
+    return res;
+}
+
 Expected<std::pair<std::vector<InputVStream>, std::vector<OutputVStream>>> NetworkRunner::create_vstreams(
     ConfiguredNetworkGroup &net_group, const std::map<std::string, hailo_vstream_params_t> &params)
 {//TODO: support network name
@@ -273,10 +300,12 @@ Expected<std::pair<std::vector<InputVStream>, std::vector<OutputVStream>>> Netwo
     for (auto &input_vstream_info : input_vstreams_info.value()) {
         auto elem_it = params.find(input_vstream_info.name);
         if (elem_it != params.end()) {
-            input_vstreams_params.emplace(input_vstream_info.name, elem_it->second);
+            auto vstream_param = update_quantize_flag_in_vstream_param(input_vstream_info, elem_it->second);
+            input_vstreams_params.emplace(input_vstream_info.name, vstream_param);
             match_count++;
         } else {
-            input_vstreams_params.emplace(input_vstream_info.name, HailoRTDefaults::get_vstreams_params());
+            auto vstream_param = update_quantize_flag_in_vstream_param(input_vstream_info, HailoRTDefaults::get_vstreams_params());
+            input_vstreams_params.emplace(input_vstream_info.name, vstream_param);
         }
     }
 
@@ -286,11 +315,13 @@ Expected<std::pair<std::vector<InputVStream>, std::vector<OutputVStream>>> Netwo
     for (auto &output_vstream_info : output_vstreams_info.value()) {
         auto elem_it = params.find(output_vstream_info.name);
         if (elem_it != params.end()) {
-            output_vstreams_params.emplace(output_vstream_info.name, elem_it->second);
+            auto vstream_param = update_quantize_flag_in_vstream_param(output_vstream_info, elem_it->second);
+            output_vstreams_params.emplace(output_vstream_info.name, vstream_param);
             match_count++;
         }
         else {
-            output_vstreams_params.emplace(output_vstream_info.name, HailoRTDefaults::get_vstreams_params());
+            auto vstream_param = update_quantize_flag_in_vstream_param(output_vstream_info, HailoRTDefaults::get_vstreams_params());
+            output_vstreams_params.emplace(output_vstream_info.name, vstream_param);
         }
     }
 
index 5eafec0948d3d46a29c5b9fed7f8783de0182cf2..ffe338572313606723757372d3e1d66a7ea42bea 100644 (file)
@@ -128,6 +128,9 @@ public:
     // Must be called prior to run
     void set_overall_latency_meter(LatencyMeterPtr latency_meter);
     void set_latency_barrier(BarrierPtr latency_barrier);
+    std::shared_ptr<ConfiguredNetworkGroup> get_configured_network_group();
+    void set_last_measured_fps(double fps);
+    double get_last_measured_fps();
 
 protected:
     static bool inference_succeeded(hailo_status status);
@@ -177,8 +180,9 @@ protected:
         // sync_event will be used to send one frame at a time
         EventPtr sync_event = nullptr;
         if (m_params.measure_hw_latency || m_params.measure_overall_latency) {
-            sync_event = Event::create_shared(Event::State::not_signalled);
-            CHECK_NOT_NULL(sync_event, HAILO_OUT_OF_HOST_MEMORY);
+            auto sync_event_exp = Event::create_shared(Event::State::not_signalled);
+            CHECK_EXPECTED_AS_STATUS(sync_event_exp);
+            sync_event = sync_event_exp.release();
         }
 
         while (true) {
@@ -253,8 +257,9 @@ protected:
         // sync_event will be used to send one frame at a time
         EventPtr sync_event = nullptr;
         if (m_params.measure_hw_latency || m_params.measure_overall_latency) {
-            sync_event = Event::create_shared(Event::State::not_signalled);
-            CHECK_NOT_NULL(sync_event, HAILO_OUT_OF_HOST_MEMORY);
+            auto sync_event_exp = Event::create_shared(Event::State::not_signalled);
+            CHECK_EXPECTED_AS_STATUS(sync_event_exp);
+            sync_event = sync_event_exp.release();
         }
 
         while (true) {
@@ -301,6 +306,7 @@ protected:
     std::shared_ptr<ConfiguredNetworkGroup> m_cng;
     LatencyMeterPtr m_overall_latency_meter;
     BarrierPtr m_latency_barrier;
+    double m_last_measured_fps;
 
 private:
     static const std::vector<hailo_status> ALLOWED_INFERENCE_RETURN_VALUES;
index 3d8cf98c1eec8cadf01f5b35d042992887b732c0..53ec37fbf2d06512b429dd4da69373a8294e0fd3 100644 (file)
 #include "../common.hpp"
 #include "hailo/vdevice.hpp"
 #include "hailo/hef.hpp"
+#include "../download_action_list_command.hpp"
 
 #include <memory>
 #include <vector>
+#include <regex>
 
 using namespace hailort;
 
 constexpr uint32_t DEFAULT_TIME_TO_RUN_SECONDS = 5;
 
+static const char *JSON_SUFFIX = ".json";
+static const char *RUNTIME_DATA_OUTPUT_PATH_HEF_PLACE_HOLDER = "<hef>";
+static const std::vector<uint16_t> DEFAULT_BATCH_SIZES = {1, 2, 4, 8, 16};
+static const uint16_t RUNTIME_DATA_BATCH_INDEX_TO_MEASURE_DEFAULT = 2;
+
+using json = nlohmann::json;
+using ordered_json = nlohmann::ordered_json;
+
 /** VStreamNameValidator */
 class VStreamNameValidator : public CLI::Validator {
   public:
@@ -199,13 +209,11 @@ VStreamApp::VStreamApp(const std::string &description, const std::string &name,
         }))
         ->default_val("auto");
 
-    add_flag_callback(format_opt_group, "-q,--quantized,!--no-quantized", "Whether or not data is quantized",
-        [this](bool result){
-            m_vstream_params.params.user_buffer_format.flags = result ?
-                static_cast<hailo_format_flags_t>(m_vstream_params.params.user_buffer_format.flags | HAILO_FORMAT_FLAGS_QUANTIZED) :
-                static_cast<hailo_format_flags_t>(m_vstream_params.params.user_buffer_format.flags & (~HAILO_FORMAT_FLAGS_QUANTIZED));})
-        ->run_callback_for_default()
+    auto quantized_option = format_opt_group->add_flag("-q,--quantized,!--no-quantized",
+        "Whether or not data is quantized. This flag is ignored - Determine if the data requires quantization is decided by the src-data and dst-data types.")
         ->default_val(true); // default_val() must be after run_callback_for_default()
+
+        hailo_deprecate_options(format_opt_group, { std::make_shared<OptionDeprecation>(quantized_option) }, false);
 }
 
 CLI::Option* VStreamApp::add_flag_callback(CLI::App *app, const std::string &name, const std::string &description,
@@ -307,6 +315,9 @@ class Run2 : public CLI::App
 public:
     Run2();
 
+    Expected<std::unique_ptr<VDevice>> create_vdevice();
+    Expected<std::vector<std::shared_ptr<NetworkRunner>>> init_and_run_net_runners(VDevice *vdevice);
+
     const std::vector<NetworkParams>& get_network_params();
     std::chrono::seconds get_time_to_run();
     std::vector<hailo_device_id_t> get_dev_ids();
@@ -317,6 +328,8 @@ public:
     bool get_measure_hw_latency();
     bool get_measure_overall_latency();
     bool get_multi_process_service();
+    bool get_measure_fw_actions();
+    std::string get_measure_fw_actions_output_path();
     const std::string &get_group_id();
     InferenceMode get_mode() const;
     const std::string &get_output_json_path();
@@ -324,8 +337,10 @@ public:
     void set_scheduling_algorithm(hailo_scheduling_algorithm_t scheduling_algorithm);
     void set_inference_mode();
     void set_measure_latency();
+    void set_batch_size(uint16_t batch_size);
 
 private:
+    void add_measure_fw_actions_subcom();
     void add_net_app_subcom();
     std::vector<NetworkParams> m_network_params;
     uint32_t m_time_to_run;
@@ -342,11 +357,15 @@ private:
     bool m_measure_power;
     bool m_measure_current;
     bool m_measure_temp;
+
+    bool m_measure_fw_actions;
+    std::string m_measure_fw_actions_output_path;
 };
 
 
-Run2::Run2() : CLI::App("Run networks (preview)", "run2")
+Run2::Run2() : CLI::App("Run networks", "run2")
 {
+    add_measure_fw_actions_subcom();
     add_net_app_subcom();
     add_option("-t,--time-to-run", m_time_to_run, "Time to run (seconds)")
         ->default_val(DEFAULT_TIME_TO_RUN_SECONDS)
@@ -358,7 +377,6 @@ Run2::Run2() : CLI::App("Run networks (preview)", "run2")
             { "raw_async", InferenceMode::RAW_ASYNC },
             { "raw_async_single_thread", InferenceMode::RAW_ASYNC_SINGLE_THREAD, OptionVisibility::HIDDEN }
         }))->default_val("full");
-    static const char *JSON_SUFFIX = ".json";
     add_option("-j,--json", m_stats_json_path, "If set save statistics as json to the specified path")
     ->default_val("")
     ->check(FileSuffixValidator(JSON_SUFFIX));
@@ -373,9 +391,6 @@ Run2::Run2() : CLI::App("Run networks (preview)", "run2")
         ->check(CLI::PositiveNumber)
         ->excludes(dev_id_opt);
 
-    vdevice_options_group->add_flag("--multi-process-service", m_multi_process_service, "VDevice multi process service")
-        ->default_val(false);
-
     vdevice_options_group->add_option("--group-id", m_group_id, "VDevice group id")
         ->default_val(HAILO_DEFAULT_VDEVICE_GROUP_ID);
 
@@ -384,7 +399,7 @@ Run2::Run2() : CLI::App("Run networks (preview)", "run2")
     auto measure_power_opt = measurement_options_group->add_flag("--measure-power", m_measure_power, "Measure power consumption")
         ->default_val(false);
 
-    measurement_options_group->add_flag("--measure-current", m_measure_current, "Measure current")->excludes(measure_power_opt)
+    auto measure_current_opt = measurement_options_group->add_flag("--measure-current", m_measure_current, "Measure current")->excludes(measure_power_opt)
         ->default_val(false);
 
     measurement_options_group->add_flag("--measure-latency", m_measure_hw_latency, "Measure network latency on the NN core")
@@ -393,8 +408,41 @@ Run2::Run2() : CLI::App("Run networks (preview)", "run2")
     measurement_options_group->add_flag("--measure-overall-latency", m_measure_overall_latency, "Measure overall latency measurement")
         ->default_val(false);
 
-    measurement_options_group->add_flag("--measure-temp", m_measure_temp, "Measure chip temperature")
+    auto measure_temp_opt = measurement_options_group->add_flag("--measure-temp", m_measure_temp, "Measure chip temperature")
+        ->default_val(false);
+
+    auto multi_process_flag = vdevice_options_group->add_flag("--multi-process-service", m_multi_process_service, "VDevice multi process service")
         ->default_val(false);
+
+    if (VDevice::service_over_ip_mode()) {
+        multi_process_flag
+        ->excludes(measure_power_opt)
+        ->excludes(measure_current_opt)
+        ->excludes(measure_temp_opt);
+        // When working with service over ip - client doesn't have access to physical devices
+    } else {
+        (void)measure_power_opt;
+        (void)measure_current_opt;
+        (void)measure_temp_opt;
+        (void)multi_process_flag;
+    }
+}
+
+void Run2::add_measure_fw_actions_subcom()
+{
+    m_measure_fw_actions = false;
+    auto measure_fw_actions_subcommand = std::make_shared<NetworkApp>("Collect runtime data to be used by the Profiler", "measure-fw-actions");
+    measure_fw_actions_subcommand->parse_complete_callback([this]() {
+        m_measure_fw_actions = true;
+    });
+    measure_fw_actions_subcommand->add_option("--output-path", m_measure_fw_actions_output_path,
+        fmt::format("Runtime data output file path\n'{}' will be replaced with the current running hef", RUNTIME_DATA_OUTPUT_PATH_HEF_PLACE_HOLDER))
+        ->default_val(fmt::format("runtime_data_{}.json", RUNTIME_DATA_OUTPUT_PATH_HEF_PLACE_HOLDER))
+        ->check(FileSuffixValidator(JSON_SUFFIX));
+
+    measure_fw_actions_subcommand->alias("collect-runtime-data");
+
+    add_subcommand(measure_fw_actions_subcommand);
 }
 
 void Run2::add_net_app_subcom()
@@ -499,11 +547,28 @@ void Run2::set_measure_latency()
     }
 }
 
+void Run2::set_batch_size(uint16_t batch_size)
+{
+    for (auto &params: m_network_params) {
+        params.batch_size = batch_size;
+    }
+}
+
 bool Run2::get_multi_process_service()
 {
     return m_multi_process_service;
 }
 
+bool Run2::get_measure_fw_actions()
+{
+    return m_measure_fw_actions;
+}
+
+std::string Run2::get_measure_fw_actions_output_path()
+{
+    return m_measure_fw_actions_output_path;
+}
+
 const std::string &Run2::get_group_id()
 {
     return m_group_id;
@@ -560,69 +625,83 @@ std::string get_str_infer_mode(const InferenceMode& infer_mode)
     return "<Unknown>";
 }
 
-hailo_status Run2Command::execute()
+// We assume that hef_place_holder_regex is valid
+std::string format_measure_fw_actions_output_path(const std::string &base_output_path, const std::string &hef_path,
+    const std::string &hef_place_holder_regex = RUNTIME_DATA_OUTPUT_PATH_HEF_PLACE_HOLDER,
+    const std::string &hef_suffix = ".hef")
 {
-    Run2 *app = reinterpret_cast<Run2*>(m_app);
+    const auto hef_basename = Filesystem::basename(hef_path);
+    const auto hef_no_suffix = Filesystem::remove_suffix(hef_basename, hef_suffix);
+    return std::regex_replace(base_output_path, std::regex(hef_place_holder_regex), hef_no_suffix);
+}
 
-    app->set_inference_mode();
-    app->set_measure_latency();
+Expected<std::reference_wrapper<Device>> get_single_physical_device(VDevice &vdevice)
+{
+    auto expected_physical_devices = vdevice.get_physical_devices();
+    CHECK_EXPECTED(expected_physical_devices);
+    CHECK_AS_EXPECTED(1 == expected_physical_devices->size(), HAILO_INVALID_OPERATION, "Operation not allowed for multi-device");
+    auto &res = expected_physical_devices->at(0);
+    return std::move(res);
+}
 
-    if (0 == app->get_network_params().size()) {
-        LOGGER__ERROR("Nothing to run");
-        return HAILO_INVALID_OPERATION;
-    }
-    if (1 == app->get_network_params().size()) {
-        LOGGER__WARN("\"hailortcli run2\" is in preview. It is recommended to use \"hailortcli run\" command for a single network group");
-    }
-    if (app->get_measure_hw_latency() || app->get_measure_overall_latency()) {
-        CHECK(1 == app->get_network_params().size(), HAILO_INVALID_OPERATION, "When latency measurement is enabled, only one model is allowed");
-        LOGGER__WARN("Measuring latency; frames are sent one at a time and FPS will not be measured");
-    }
+Expected<std::unique_ptr<VDevice>> Run2::create_vdevice()
+{
+    // hailo_vdevice_params_t is a c-structure that have pointers of device_ids, we must keep reference to the devices
+    // object alive until vdevice_params is destructed.
+    auto dev_ids = get_dev_ids();
 
     hailo_vdevice_params_t vdevice_params{};
-    CHECK_SUCCESS(hailo_init_vdevice_params(&vdevice_params));
-    auto dev_ids = app->get_dev_ids();
+    CHECK_SUCCESS_AS_EXPECTED(hailo_init_vdevice_params(&vdevice_params));
     if (!dev_ids.empty()) {
         vdevice_params.device_count = static_cast<uint32_t>(dev_ids.size());
         vdevice_params.device_ids = dev_ids.data();
-
         // Disable scheduler for eth VDevice
         if ((1 == dev_ids.size()) && (is_valid_ip(dev_ids[0].id))) {
             vdevice_params.scheduling_algorithm = HAILO_SCHEDULING_ALGORITHM_NONE;
-            CHECK(1 == app->get_network_params().size(), HAILO_INVALID_OPERATION, "On Ethernet inference only one model is allowed");
-            app->set_scheduling_algorithm(HAILO_SCHEDULING_ALGORITHM_NONE);
+            CHECK_AS_EXPECTED(1 == get_network_params().size(), HAILO_INVALID_OPERATION, "On Ethernet inference only one model is allowed");
+            set_scheduling_algorithm(HAILO_SCHEDULING_ALGORITHM_NONE);
         }
     } else {
-        vdevice_params.device_count = app->get_device_count();
+        vdevice_params.device_count = get_device_count();
     }
-    // TODO: Async stream support for scheduler (HRT-9878)
-    if ((app->get_mode() == InferenceMode::RAW_ASYNC) || (app->get_mode() == InferenceMode::RAW_ASYNC_SINGLE_THREAD)) {
+
+    if (get_measure_fw_actions()) {
+        CHECK_AS_EXPECTED(1 == get_network_params().size(), HAILO_INVALID_OPERATION, "Only one model is allowed when collecting runtime data");
+        CHECK_AS_EXPECTED(!get_multi_process_service(), HAILO_INVALID_OPERATION, "Collecting runtime data is not supported with multi process service");
+        CHECK_AS_EXPECTED(get_device_count() == 1, HAILO_INVALID_OPERATION, "Collecting runtime data is not supported with multi device");
+        CHECK_AS_EXPECTED(!(get_measure_hw_latency() || get_measure_overall_latency()), HAILO_INVALID_OPERATION, "Latency measurement is not allowed when collecting runtime data");
+        CHECK_AS_EXPECTED((get_mode() == InferenceMode::RAW) || (get_mode() == InferenceMode::RAW_ASYNC), HAILO_INVALID_OPERATION,
+            "'measure-fw-actions' is only supported with '--mode=raw'. Received mode: '{}'", get_str_infer_mode(get_mode()));
+
         vdevice_params.scheduling_algorithm = HAILO_SCHEDULING_ALGORITHM_NONE;
-        CHECK(1 == app->get_network_params().size(), HAILO_INVALID_OPERATION, "Only one model is allowed with aw async inference mode");
-        app->set_scheduling_algorithm(HAILO_SCHEDULING_ALGORITHM_NONE);
+        set_scheduling_algorithm(HAILO_SCHEDULING_ALGORITHM_NONE);
     }
 
-    vdevice_params.group_id = app->get_group_id().c_str();
-    vdevice_params.multi_process_service = app->get_multi_process_service();
+    vdevice_params.group_id = get_group_id().c_str();
+    vdevice_params.multi_process_service = get_multi_process_service();
 
-    auto vdevice = VDevice::create(vdevice_params);
-    CHECK_EXPECTED_AS_STATUS(vdevice);
+    return VDevice::create(vdevice_params);
+}
 
-    // create network runners
+Expected<std::vector<std::shared_ptr<NetworkRunner>>> Run2::init_and_run_net_runners(VDevice *vdevice)
+{
     std::vector<std::shared_ptr<NetworkRunner>> net_runners;
-    for (auto &net_params : app->get_network_params()) {
-        auto net_runner = NetworkRunner::create_shared(*vdevice->get(), net_params);
-        CHECK_EXPECTED_AS_STATUS(net_runner);
 
-        net_runners.emplace_back(net_runner.release());
+    auto shutdown_event_exp = Event::create_shared(Event::State::not_signalled);
+    CHECK_EXPECTED(shutdown_event_exp);
+    auto shutdown_event = shutdown_event_exp.release();
+
+    // create network runners
+    for (auto &net_params : get_network_params()) {
+        auto expected_net_runner = NetworkRunner::create_shared(*vdevice, net_params);
+        CHECK_EXPECTED(expected_net_runner);
+        auto net_runner = expected_net_runner.release();
+        net_runners.emplace_back(net_runner);
     }
 
     auto live_stats = std::make_unique<LiveStats>(std::chrono::seconds(1));
 
-    live_stats->add(std::make_shared<TimerLiveTrack>(app->get_time_to_run()), 0);
-
-    auto shutdown_event = Event::create_shared(Event::State::not_signalled);
-    CHECK_NOT_NULL(shutdown_event, HAILO_OUT_OF_HOST_MEMORY);
+    live_stats->add(std::make_shared<TimerLiveTrack>(get_time_to_run()), 0);
 
     std::vector<AsyncThreadPtr<hailo_status>> threads;
     Barrier activation_barrier(net_runners.size() + 1); // We wait for all nets to finish activation + this thread to start sampling
@@ -635,32 +714,115 @@ hailo_status Run2Command::execute()
 
     auto signal_event_scope_guard = SignalEventScopeGuard(*shutdown_event);
 
-    auto physical_devices = vdevice.value()->get_physical_devices();
-    CHECK_EXPECTED_AS_STATUS(physical_devices);
+    if (get_measure_power() || get_measure_current() || get_measure_temp()) {
+        auto physical_devices = vdevice->get_physical_devices();
+        CHECK_EXPECTED(physical_devices);
 
-    for (auto &device : physical_devices.value()) {
-        auto measurement_live_track = MeasurementLiveTrack::create_shared(device.get(), app->get_measure_power(),
-            app->get_measure_current(), app->get_measure_temp());
-        if (HAILO_SUCCESS != measurement_live_track.status()) {
-            activation_barrier.terminate();
-        }
-        CHECK_EXPECTED_AS_STATUS(measurement_live_track);
+        for (auto &device : physical_devices.value()) {
+            auto measurement_live_track = MeasurementLiveTrack::create_shared(device.get(), get_measure_power(),
+                get_measure_current(), get_measure_temp());
+            if (HAILO_SUCCESS != measurement_live_track.status()) {
+                activation_barrier.terminate();
+            }
+            CHECK_EXPECTED(measurement_live_track);
 
-        live_stats->add(measurement_live_track.release(), 2);
+            live_stats->add(measurement_live_track.release(), 2);
+        }
     }
 
     // TODO: wait for all nets before starting timer. start() should update TimerLiveTrack to start. or maybe append here but first in vector...
     activation_barrier.arrive_and_wait();
-    CHECK_SUCCESS(live_stats->start());
-    auto status = shutdown_event->wait(app->get_time_to_run());
+    CHECK_SUCCESS_AS_EXPECTED(live_stats->start());
+    auto status = shutdown_event->wait(get_time_to_run());
     if (HAILO_TIMEOUT != status) {
         // if shutdown_event is signaled its because one of the send/recv threads failed
         LOGGER__ERROR("Encountered error during inference. See log for more information.");
     }
-    if (!app->get_output_json_path().empty()){
-        live_stats->dump_stats(app->get_output_json_path(), get_str_infer_mode(app->get_mode()));
+    if (!get_output_json_path().empty()){
+        live_stats->dump_stats(get_output_json_path(), get_str_infer_mode(get_mode()));
+    }
+    auto expected_fps_per_network = live_stats->get_last_measured_fps_per_network_group();
+    CHECK_EXPECTED(expected_fps_per_network);
+    auto fps_per_network = expected_fps_per_network.release();
+    for (size_t network_runner_index = 0; network_runner_index < fps_per_network.size(); network_runner_index++) {
+        net_runners[network_runner_index]->set_last_measured_fps(fps_per_network[network_runner_index]);
     }
     live_stats.reset(); // Ensures that the final print will include real values and not with values of when streams are already aborted.
     shutdown_event->signal();
-    return wait_for_threads(threads);
+    wait_for_threads(threads);
+    return net_runners;
+}
+
+hailo_status Run2Command::execute()
+{
+    Run2 *app = reinterpret_cast<Run2*>(m_app);
+
+    app->set_inference_mode();
+    app->set_measure_latency();
+
+    CHECK(0 < app->get_network_params().size(), HAILO_INVALID_OPERATION, "Nothing to run");
+
+    if (app->get_measure_hw_latency() || app->get_measure_overall_latency()) {
+        CHECK(1 == app->get_network_params().size(), HAILO_INVALID_OPERATION, "When latency measurement is enabled, only one model is allowed");
+        LOGGER__WARNING("Measuring latency; frames are sent one at a time and FPS will not be measured");
+    }
+
+    if (1 == app->get_network_params().size()) {
+        LOGGER__WARNING("\"hailortcli run2\" is not optimized for single model usage. It is recommended to use \"hailortcli run\" command for a single model");
+    }
+
+    auto expected_vdevice = app->create_vdevice();
+    CHECK_EXPECTED_AS_STATUS(expected_vdevice);
+    auto vdevice = expected_vdevice.release();
+
+    std::vector<uint16_t> batch_sizes_to_run = { app->get_network_params()[0].batch_size };
+    if(app->get_measure_fw_actions() && app->get_network_params()[0].batch_size == HAILO_DEFAULT_BATCH_SIZE) {
+        // In case measure-fw-actions is enabled and no batch size was provided - we want to run with batch sizes 1,2,4,8,16
+        batch_sizes_to_run = DEFAULT_BATCH_SIZES;
+    }
+
+    std::string runtime_data_output_path;
+    ordered_json action_list_json;
+
+    if (app->get_measure_fw_actions()) {
+        auto device = get_single_physical_device(*vdevice);
+        CHECK_EXPECTED_AS_STATUS(device);
+
+        auto expected_action_list_json = DownloadActionListCommand::init_json_object(device.release(), app->get_network_params()[0].hef_path);
+        CHECK_EXPECTED_AS_STATUS(expected_action_list_json);
+        action_list_json = expected_action_list_json.release();
+        runtime_data_output_path = format_measure_fw_actions_output_path(
+            app->get_measure_fw_actions_output_path(), app->get_network_params()[0].hef_path);
+    }
+
+    uint32_t network_group_index = 0;
+    for (auto batch_size : batch_sizes_to_run) {
+        if(app->get_measure_fw_actions()) {
+            app->set_batch_size(batch_size);
+
+            auto device = get_single_physical_device(*vdevice);
+            CHECK_EXPECTED_AS_STATUS(device);
+
+            auto status = DownloadActionListCommand::set_batch_to_measure(device.release(), RUNTIME_DATA_BATCH_INDEX_TO_MEASURE_DEFAULT);
+            CHECK_SUCCESS(status);
+        }
+
+        auto expected_net_runners = app->init_and_run_net_runners(vdevice.get());
+        CHECK_EXPECTED_AS_STATUS(expected_net_runners);
+        auto net_runners = expected_net_runners.release();
+
+        if(app->get_measure_fw_actions()) { // Collecting runtime data
+            auto device = get_single_physical_device(*vdevice);
+            CHECK_EXPECTED_AS_STATUS(device);
+
+            auto status = DownloadActionListCommand::execute(device.release(), net_runners[0]->get_configured_network_group(), batch_size, action_list_json, net_runners[0]->get_last_measured_fps(), network_group_index);
+            CHECK_SUCCESS(status);
+
+            network_group_index++;
+        }
+    }
+    if(app->get_measure_fw_actions()) { // In case measure-fw-actions is enabled - write data to JSON file
+        CHECK_SUCCESS(DownloadActionListCommand::write_to_json(action_list_json, runtime_data_output_path));
+    }
+    return HAILO_SUCCESS;
 }
\ No newline at end of file
index 33670823f24ec2078d347b10fba54f9d629f77a4..65c241e262f165dd72f1e5de70238fea5e35bddf 100644 (file)
@@ -41,4 +41,9 @@ void TimerLiveTrack::push_json_impl(nlohmann::ordered_json &json)
     std::stringstream time_to_run;
     time_to_run << std::fixed << std::setprecision(2) << std::round(std::chrono::duration<double>(m_duration).count()) << " seconds";
     json["time_to_run"] = time_to_run.str();
+}
+
+Expected<double> TimerLiveTrack::get_last_measured_fps()
+{
+    return make_unexpected(HAILO_NOT_AVAILABLE);
 }
\ No newline at end of file
index 836b692a91056f38429d5cf2687236a2f08939eb..c00f5c39d600b3216c5f4b2663f1fd94dd6f2c63 100644 (file)
@@ -20,6 +20,7 @@ public:
     virtual hailo_status start_impl() override;
     virtual uint32_t push_text_impl(std::stringstream &ss) override;
     virtual void push_json_impl(nlohmann::ordered_json &json) override;
+    virtual hailort::Expected<double> get_last_measured_fps();
 
 private:
     std::chrono::milliseconds m_duration;
index 05be13c9c3693776eca41fcef73f8b841afc2de4..56535f24430c5bca34384d61c49d62c71e34e0f5 100644 (file)
 #include "inference_progress.hpp"
 #include "infer_stats_printer.hpp"
 #include "graph_printer.hpp"
-#if defined(__GNUC__)
-// TODO: Support on windows (HRT-5919)
 #include "download_action_list_command.hpp"
-#endif
 #include "common.hpp"
 
 #include "common/string_utils.hpp"
@@ -183,9 +180,9 @@ static void add_run_command_params(CLI::App *run_subcommand, inference_runner_pa
         ->default_val("true");
 
     auto transformation_group = run_subcommand->add_option_group("Transformations");
-    transformation_group->add_option("--quantized", params.transform.quantized,
+    auto quantized_option = transformation_group->add_option("--quantized", params.transform.quantized,
         "true means the tool assumes that the data is already quantized,\n"
-        "false means it is the tool's responsability to quantize (scale) the data.")
+        "false means it is the tool's responsibility to quantize (scale) the data.")
         ->default_val("true");
     transformation_group->add_option("--user-format-type", params.transform.format_type,
         "The host data type")
@@ -221,8 +218,6 @@ static void add_run_command_params(CLI::App *run_subcommand, inference_runner_pa
             "No measurement flags provided; Run 'hailortcli run measure-stats --help' for options");
     });
 
-    // TODO: Support on windows (HRT-5919)
-    #if defined(__GNUC__)
     auto *collect_runtime_data_subcommand = run_subcommand->add_subcommand("collect-runtime-data",
         "Collect runtime data to be used by the Profiler");
     static const char *JSON_SUFFIX = ".json";
@@ -240,7 +235,6 @@ static void add_run_command_params(CLI::App *run_subcommand, inference_runner_pa
         // If this subcommand was parsed, then we need to download runtime_data
         params.runtime_data.collect_runtime_data = true;
     });
-    #endif
 
     auto measure_power_group = run_subcommand->add_option_group("Measure Power/Current");
     CLI::Option *power_sampling_period = measure_power_group->add_option("--sampling-period",
@@ -263,13 +257,13 @@ static void add_run_command_params(CLI::App *run_subcommand, inference_runner_pa
                         ->excludes(elem_latency_option)
                         ->excludes(elem_queue_size_option);
 
+    hailo_deprecate_options(run_subcommand, { std::make_shared<OptionDeprecation>(quantized_option) }, false);
+
     run_subcommand->parse_complete_callback([&params, hef_new, power_sampling_period,
             power_averaging_factor, measure_power_opt, measure_current_opt]() {
         PARSE_CHECK(!hef_new->empty(), "Single HEF file/directory is required");
         bool is_hw_only = InferMode::HW_ONLY == params.mode;
         params.transform.transform = (!is_hw_only || (params.inputs_name_and_file_path.size() > 0));
-        PARSE_CHECK((!params.transform.quantized || (HAILO_FORMAT_TYPE_AUTO == params.transform.format_type)),
-            "User data type must be auto when quantized is set");
         bool has_oneof_measure_flags = (!measure_power_opt->empty() || !measure_current_opt->empty());
         PARSE_CHECK(power_sampling_period->empty() || has_oneof_measure_flags,
             "--sampling-period requires --measure-power or --measure-current");
@@ -302,6 +296,10 @@ static void add_run_command_params(CLI::App *run_subcommand, inference_runner_pa
                     params.runtime_data.batch_to_measure);
             }
         }
+
+        PARSE_CHECK((params.dot_output.empty() || !is_hw_only),
+            "Generating .dot file for pipeline graph is impossible when running in 'hw-only' mode");
+
     });
 }
 
@@ -476,7 +474,8 @@ Expected<std::map<std::string, std::vector<InputVStream>>> create_input_vstreams
     auto network_infos = configured_net_group.get_network_infos();
     CHECK_EXPECTED(network_infos);
     for (auto &network_info : network_infos.value()) {
-        auto input_vstreams_params = configured_net_group.make_input_vstream_params(params.transform.quantized,
+        auto quantized = (params.transform.format_type != HAILO_FORMAT_TYPE_FLOAT32);
+        auto input_vstreams_params = configured_net_group.make_input_vstream_params(quantized,
             params.transform.format_type, HAILORTCLI_DEFAULT_VSTREAM_TIMEOUT_MS, HAILO_DEFAULT_VSTREAM_QUEUE_SIZE, network_info.name);
         CHECK_EXPECTED(input_vstreams_params);
 
@@ -498,7 +497,15 @@ Expected<std::map<std::string, std::vector<OutputVStream>>> create_output_vstrea
     auto network_infos = configured_net_group.get_network_infos();
     CHECK_EXPECTED(network_infos);
     for (auto &network_info : network_infos.value()) {
-        auto output_vstreams_params = configured_net_group.make_output_vstream_params(params.transform.quantized,
+        // Data is not quantized if format_type is explicitly float32, or if an output is NMS (which also enforces float32 output)
+        // We don't cover a case of multiple outputs where only some of them are NMS (no such model currently), and anyway it is handled in run2
+        auto vstream_infos = configured_net_group.get_output_vstream_infos();
+        CHECK_EXPECTED(vstream_infos);
+        auto nms_output = std::any_of(vstream_infos->begin(), vstream_infos->end(), [] (const hailo_vstream_info_t &output_info) {
+            return HailoRTCommon::is_nms(output_info);
+        });
+        auto quantized = ((params.transform.format_type != HAILO_FORMAT_TYPE_FLOAT32) && !nms_output);
+        auto output_vstreams_params = configured_net_group.make_output_vstream_params(quantized,
             params.transform.format_type, HAILORTCLI_DEFAULT_VSTREAM_TIMEOUT_MS, HAILO_DEFAULT_VSTREAM_QUEUE_SIZE, network_info.name);
         CHECK_EXPECTED(output_vstreams_params);
 
@@ -958,43 +965,59 @@ static Expected<std::unique_ptr<ActivatedNetworkGroup>> activate_network_group(C
 }
 
 static Expected<std::map<std::string, BufferPtr>> create_constant_dataset(
-    const std::vector<hailo_stream_info_t> &input_streams_infos, const hailo_transform_params_t &trans_params)
+    const std::pair<std::vector<hailo_stream_info_t>, std::vector<hailo_vstream_info_t>> &input_infos, const hailo_transform_params_t &trans_params,
+    InferMode mode)
 {
     const uint8_t const_byte = 0xAB;
     std::map<std::string, BufferPtr> dataset;
-    for (const auto &input_stream_info : input_streams_infos) {
-        const auto frame_size = hailo_get_host_frame_size(&input_stream_info, &trans_params);
-        auto constant_buffer = Buffer::create_shared(frame_size, const_byte);
-        if (!constant_buffer) {
-            std::cerr << "Out of memory, tried to allocate " << frame_size << std::endl;
-            return make_unexpected(constant_buffer.status());
-        }
 
-        dataset.emplace(std::string(input_stream_info.name), constant_buffer.release());
+    if (InferMode::HW_ONLY == mode) {
+        for (const auto &input_stream_info : input_infos.first) {
+            const auto frame_size = input_stream_info.hw_frame_size;
+            auto constant_buffer = Buffer::create_shared(frame_size, const_byte);
+            if (!constant_buffer) {
+                std::cerr << "Out of memory, tried to allocate " << frame_size << std::endl;
+                return make_unexpected(constant_buffer.status());
+            }
+            dataset.emplace(std::string(input_stream_info.name), constant_buffer.release());
+        }
+    } else {
+        for (const auto &input_vstream_info : input_infos.second) {
+            const auto frame_size = HailoRTCommon::get_frame_size(input_vstream_info, trans_params.user_buffer_format);
+            auto constant_buffer = Buffer::create_shared(frame_size, const_byte);
+            if (!constant_buffer) {
+                std::cerr << "Out of memory, tried to allocate " << frame_size << std::endl;
+                return make_unexpected(constant_buffer.status());
+            }
+            dataset.emplace(std::string(input_vstream_info.name), constant_buffer.release());
+        }
     }
 
     return dataset;
 }
 
 static Expected<std::map<std::string, BufferPtr>> create_dataset_from_files(
-    const std::vector<hailo_stream_info_t> &input_streams_infos, const std::vector<std::string> &input_files,
+    const std::pair<std::vector<hailo_stream_info_t>, std::vector<hailo_vstream_info_t>> &input_infos, const std::vector<std::string> &input_files,
     const hailo_transform_params_t &trans_params, InferMode mode)
 {
-    CHECK_AS_EXPECTED(input_streams_infos.size() == input_files.size(), HAILO_INVALID_ARGUMENT, "Number of input files ({}) must be equal to the number of inputs ({})", input_files.size(), input_streams_infos.size());
+    // When creating dataset from files we always care about the logic-inputs (e.g. vstreams)
+    CHECK_AS_EXPECTED(input_infos.second.size() == input_files.size(),
+        HAILO_INVALID_ARGUMENT, "Number of input files ({}) must be equal to the number of inputs ({})", input_files.size(), input_infos.second.size());
 
     std::map<std::string, std::string> file_paths;
-    if ((input_streams_infos.size() == 1) && (input_files[0].find("=") == std::string::npos)) { // Legacy single input format
-        file_paths.emplace(std::string(input_streams_infos[0].name), input_files[0]);
+    if ((input_infos.second.size() == 1) && (input_files[0].find("=") == std::string::npos)) { // Legacy single input format
+        file_paths.emplace(std::string(input_infos.second.begin()->name), input_files[0]);
     }
     else {
         file_paths = format_strings_to_key_value_pairs(input_files);
     }
 
     std::map<std::string, BufferPtr> dataset;
-    for (const auto &input_stream_info : input_streams_infos) {
-        const auto host_frame_size = hailo_get_host_frame_size(&input_stream_info, &trans_params);
-        const auto stream_name = std::string(input_stream_info.name);
-        CHECK_AS_EXPECTED(stream_name.find("=") == std::string::npos, HAILO_INVALID_ARGUMENT, "stream inputs must not contain '=' characters: {}", stream_name);
+    for (const auto &input_vstream_info : input_infos.second) {
+        const auto host_frame_size = HailoRTCommon::get_frame_size(input_vstream_info, trans_params.user_buffer_format);
+        const auto stream_name = std::string(input_vstream_info.name);
+        CHECK_AS_EXPECTED(stream_name.find("=") == std::string::npos,
+            HAILO_INVALID_ARGUMENT, "stream inputs must not contain '=' characters: {}", stream_name);
 
         const auto file_path_it = file_paths.find(stream_name);
         CHECK_AS_EXPECTED(file_paths.end() != file_path_it, HAILO_INVALID_ARGUMENT, "Missing input file for input: {}", stream_name);
@@ -1005,13 +1028,17 @@ static Expected<std::map<std::string, BufferPtr>> create_dataset_from_files(
             "Input file ({}) size {} must be a multiple of the frame size {} ({})", file_path_it->second, host_buffer->size(), host_frame_size, stream_name);
 
         if (InferMode::HW_ONLY == mode) {
+            auto matching_stream_info = std::find_if(input_infos.first.begin(), input_infos.first.end(), [&stream_name] (const auto &stream_info) {
+                return std::string(stream_info.name) == stream_name;
+            });
+            CHECK_AS_EXPECTED(matching_stream_info != input_infos.first.end(), HAILO_INVALID_OPERATION, "Failed to find raw-stream with name {}.", stream_name);
             const size_t frames_count = (host_buffer->size() / host_frame_size);
-            const size_t hw_frame_size = input_stream_info.hw_frame_size;
+            const size_t hw_frame_size = matching_stream_info->hw_frame_size;
             const size_t hw_buffer_size = frames_count * hw_frame_size;
             auto hw_buffer = Buffer::create_shared(hw_buffer_size);
             CHECK_EXPECTED(hw_buffer);
 
-            auto transform_context = InputTransformContext::create(input_stream_info, trans_params);
+            auto transform_context = InputTransformContext::create(*matching_stream_info, trans_params);
             CHECK_EXPECTED(transform_context);
             
             for (size_t i = 0; i < frames_count; i++) {
@@ -1022,8 +1049,7 @@ static Expected<std::map<std::string, BufferPtr>> create_dataset_from_files(
                 CHECK_SUCCESS_AS_EXPECTED(status);
             }
             dataset[stream_name] = hw_buffer.release();
-        }
-        else {
+        } else {
             auto host_buffer_shared = make_shared_nothrow<Buffer>(host_buffer.release());
             CHECK_NOT_NULL_AS_EXPECTED(host_buffer_shared, HAILO_OUT_OF_HOST_MEMORY);
             dataset[stream_name] = host_buffer_shared;
@@ -1044,17 +1070,22 @@ static Expected<std::vector<std::map<std::string, BufferPtr>>> create_dataset(
     trans_params.user_buffer_format.order = HAILO_FORMAT_ORDER_AUTO;
     trans_params.user_buffer_format.flags = (params.transform.quantized ? HAILO_FORMAT_FLAGS_QUANTIZED : HAILO_FORMAT_FLAGS_NONE);
     trans_params.user_buffer_format.type = params.transform.format_type;
-    std::vector<std::vector<hailo_stream_info_t>> input_infos;
+
+    // Vector of len(ng.conut), each element is pair of all input_stream_infos, and all input_vstream_infos
+    std::vector<std::pair<std::vector<hailo_stream_info_t>, std::vector<hailo_vstream_info_t>>> input_infos;
     for (auto &network_group : network_groups) {
         auto expected_all_streams_infos = network_group->get_all_stream_infos();
         CHECK_EXPECTED(expected_all_streams_infos);
-        auto &all_infos = expected_all_streams_infos.value();
-        std::vector<hailo_stream_info_t> group_input_infos;
-        std::copy_if(all_infos.begin(), all_infos.end(), std::back_inserter(group_input_infos), [](auto &info) {
+        auto &all_stream_infos = expected_all_streams_infos.value();
+        std::vector<hailo_stream_info_t> group_input_stream_infos;
+        std::copy_if(all_stream_infos.begin(), all_stream_infos.end(), std::back_inserter(group_input_stream_infos), [](const auto &info) {
             return info.direction == HAILO_H2D_STREAM;
         });
-        input_infos.push_back(group_input_infos);
+        auto expected_input_vstreams_infos = network_group->get_input_vstream_infos();
+        CHECK_EXPECTED(expected_input_vstreams_infos);
+        input_infos.push_back({group_input_stream_infos, expected_input_vstreams_infos.release()});
     }
+
     if (!params.inputs_name_and_file_path.empty()) {
         for (auto &group_input_infos : input_infos) {
             auto network_group_dataset = create_dataset_from_files(group_input_infos, params.inputs_name_and_file_path,
@@ -1062,10 +1093,9 @@ static Expected<std::vector<std::map<std::string, BufferPtr>>> create_dataset(
             CHECK_EXPECTED(network_group_dataset);
             results.emplace_back(network_group_dataset.release());
         }
-    }
-    else {
+    } else {
         for (auto &group_input_infos : input_infos) {
-            auto network_group_dataset = create_constant_dataset(group_input_infos, trans_params);
+            auto network_group_dataset = create_constant_dataset(group_input_infos, trans_params, params.mode);
             CHECK_EXPECTED(network_group_dataset);
             results.emplace_back(network_group_dataset.release());
         }
@@ -1179,18 +1209,13 @@ Expected<InferResult> run_command_hef_single_device(const inference_runner_param
     auto network_group_list = device->configure(hef.value(), configure_params.value());
     CHECK_EXPECTED(network_group_list, "Failed configure device from hef");
 
-#if defined(__GNUC__)
-    // TODO: Support on windows (HRT-5919)
     if (use_batch_to_measure_opt(params)) {
         auto status = DownloadActionListCommand::set_batch_to_measure(*device, params.runtime_data.batch_to_measure);
         CHECK_SUCCESS_AS_EXPECTED(status);
     }
-#endif
 
     auto inference_result = activate_and_run_single_device(*device, network_group_list.value(), params);
 
-#if defined(__GNUC__)
-    // TODO: Support on windows (HRT-5919)
     if (use_batch_to_measure_opt(params) && (0 == params.frames_count) && inference_result) {
         auto min_frames_count = get_min_inferred_frames_count(inference_result.value());
         CHECK_EXPECTED(min_frames_count);
@@ -1208,7 +1233,6 @@ Expected<InferResult> run_command_hef_single_device(const inference_runner_param
             params.hef_path);
     }
 
-#endif
     CHECK_EXPECTED(inference_result);
     return inference_result;
 }
@@ -1356,22 +1380,17 @@ Expected<InferResult> run_command_hef_vdevice(const inference_runner_params &par
     auto network_group_list = vdevice.value()->configure(hef.value(), configure_params.value());
     CHECK_EXPECTED(network_group_list, "Failed configure vdevice from hef");
 
-#if defined(__GNUC__)
     for (auto &device : physical_devices) {
-        // TODO: Support on windows (HRT-5919)
         if (use_batch_to_measure_opt(params)) {
             status = DownloadActionListCommand::set_batch_to_measure(device.get(), params.runtime_data.batch_to_measure);
             CHECK_SUCCESS_AS_EXPECTED(status);
         }
     }
-#endif
 
     auto infer_result = activate_and_run_vdevice(physical_devices, scheduler_is_used, network_group_list.value(), params);
     CHECK_EXPECTED(infer_result, "Error failed running inference");
 
-#if defined(__GNUC__)
     for (auto &device : physical_devices) {
-        // TODO: Support on windows (HRT-5919)
         if (use_batch_to_measure_opt(params) && (0 == params.frames_count) && infer_result) {
             auto min_frames_count = get_min_inferred_frames_count(infer_result.value());
             CHECK_EXPECTED(min_frames_count);
@@ -1390,7 +1409,6 @@ Expected<InferResult> run_command_hef_vdevice(const inference_runner_params &par
                 params.hef_path);
         }
     }
-#endif
 
     return infer_result;
 }
index 504348b491e9db552ed252dba799a35e7109d31b..c333356cd613a2fa8a4a466ab6a88018950f34ff 100644 (file)
@@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 3.0.0)
 # set(CMAKE_C_CLANG_TIDY "clang-tidy;-checks=*")
 
 set(HAILORT_MAJOR_VERSION    4)
-set(HAILORT_MINOR_VERSION    14)
+set(HAILORT_MINOR_VERSION    15)
 set(HAILORT_REVISION_VERSION 0)
 
 # Add the cmake folder so the modules there are found
@@ -43,9 +43,19 @@ target_include_directories(scheduler_mon_proto
     $<BUILD_INTERFACE: ${Protobuf_INCLUDE_DIRS}>
 )
 
-# Add readerwriterqueue as a header-only library
-add_library(readerwriterqueue INTERFACE)
-target_include_directories(readerwriterqueue INTERFACE ${HAILO_EXTERNAL_DIR}/readerwriterqueue)
+protobuf_generate_cpp(PROTO_PROFILER_SRC PROTO_PROFILER_HEADR tracer_profiler.proto)
+add_library(profiler_proto ${PROTO_PROFILER_SRC} ${PROTO_PROFILER_HEADR})
+target_link_libraries(profiler_proto libprotobuf-lite)
+set_target_properties(profiler_proto PROPERTIES CXX_STANDARD 14 GENERATED TRUE POSITION_INDEPENDENT_CODE ON)
+if(CMAKE_HOST_WIN32)
+    target_compile_options(profiler_proto PRIVATE /wd4244)
+endif()
+get_filename_component(PROTO_PROFILER_HEADER_DIRECTORY ${PROTO_PROFILER_HEADR} DIRECTORY)
+target_include_directories(profiler_proto
+    PUBLIC
+    $<BUILD_INTERFACE: ${PROTO_PROFILER_HEADER_DIRECTORY}>
+    $<BUILD_INTERFACE: ${Protobuf_INCLUDE_DIRS}>
+)
 
 add_subdirectory(src)
 set(NET_FLOW_INFRA_DIR "${CMAKE_CURRENT_SOURCE_DIR}/tests/infra/net_flow")
index dde420370386ec08a62bfe5f3b785a8aea6e8ae3..64b4b37bae473345bed355da870e23a38974cfcc 100644 (file)
@@ -8,7 +8,7 @@ if(NOT CMAKE_HOST_UNIX)
     message(FATAL_ERROR "Only unix hosts are supported, stopping build")
 endif()
 
-find_package(HailoRT 4.14.0 EXACT REQUIRED)
+find_package(HailoRT 4.15.0 EXACT REQUIRED)
 
 # GST_PLUGIN_DEFINE needs PACKAGE to be defined
 set(GST_HAILO_PACKAGE_NAME "hailo")
@@ -54,6 +54,7 @@ target_link_libraries(gsthailo HailoRT::libhailort ${GSTREAMER_VIDEO_LDFLAGS})
 
 install(TARGETS gsthailo
     LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
+    ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
     # TODO: get gstreamer-1.0 in an automate way
     PUBLIC_HEADER DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/gstreamer-1.0/gst/hailo/"
     CONFIGURATIONS Release)
index 77ce6bdfc137c77361175af6754360108908e152..c5aed4575b0192129e0dff7c6b4aedf61c00f1a5 100644 (file)
@@ -23,6 +23,7 @@
 #include "hailo_events/hailo_events.hpp"
 #include "metadata/hailo_buffer_flag_meta.hpp"
 #include "hailo/hailort_common.hpp"
+#include "hailo/hailort_defaults.hpp"
 
 #include <sstream>
 #include <algorithm>
@@ -108,6 +109,9 @@ enum
     PROP_OUTPUT_QUANTIZED,
     PROP_INPUT_FORMAT_TYPE,
     PROP_OUTPUT_FORMAT_TYPE,
+    PROP_NMS_SCORE_THRESHOLD,
+    PROP_NMS_IOU_THRESHOLD,
+    PROP_NMS_MAX_PROPOSALS_PER_CLASS,
 };
 
 G_DEFINE_TYPE(GstHailoNet, gst_hailonet, GST_TYPE_BIN);
@@ -197,12 +201,12 @@ static void gst_hailonet_class_init(GstHailoNetClass *klass)
             "To use this property, the service should be active and scheduling-algorithm should be set. Defaults to false.",
             HAILO_DEFAULT_MULTI_PROCESS_SERVICE, (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
     g_object_class_install_property(gobject_class, PROP_INPUT_QUANTIZED,
-        g_param_spec_boolean("input-quantized", "Is the input quantized or not", "Passing `true` under the argument means that the input data sent to the stream is quantized to begin with."
-            "This will result in an input stream that doesn't quantize the input data. Passing `false` under the argument, will lead to input data being quantized.",
+        g_param_spec_boolean("input-quantized", "Is the input quantized or not", "Deprecated parameter that will be ignored. "
+        "Determine whether to quantize (scale) the data will be decided by the src-data and dst-data types.",
             true, (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
     g_object_class_install_property(gobject_class, PROP_OUTPUT_QUANTIZED,
-        g_param_spec_boolean("output-quantized", "Should the output be quantized or de-quantized","Passing `true` under the argument means that the output data received from the stream is to remain quantized" 
-            "(such as it is upon exiting the device). This will result in an output stream that doesn't de-quantize the output data. Passing `false` under the argument will lead to output data being de-quantized.",
+        g_param_spec_boolean("output-quantized", "Should the output be quantized or de-quantized","Deprecated parameter that will be ignored. "
+        "Determine whether to de-quantize (rescale) the data will be decided by the src-data and dst-data types.",
             true, (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
     g_object_class_install_property(gobject_class, PROP_INPUT_FORMAT_TYPE,
         g_param_spec_enum("input-format-type", "Input format type", "Input format type(auto, float32, uint16, uint8). Default value is auto."
@@ -214,6 +218,16 @@ static void gst_hailonet_class_init(GstHailoNetClass *klass)
             "Gets values from the enum GstHailoFormatType. ",
             GST_TYPE_HAILO_FORMAT_TYPE, HAILO_FORMAT_TYPE_AUTO,
         (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
+    g_object_class_install_property(gobject_class, PROP_NMS_SCORE_THRESHOLD,
+        g_param_spec_float("nms-score-threshold", "NMS score threshold", "Threshold used for filtering out candidates. Any box with score<TH is suppressed.",
+            0, 1, 0, (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
+    g_object_class_install_property(gobject_class, PROP_NMS_IOU_THRESHOLD,
+        g_param_spec_float("nms-iou-threshold", "NMS IoU threshold", "Intersection over union overlap Threshold, used in the NMS iterative elimination process where potential duplicates of detected items are suppressed.",
+            0, 1, 0, (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
+    g_object_class_install_property(gobject_class, PROP_NMS_MAX_PROPOSALS_PER_CLASS,
+        g_param_spec_uint("nms-max-proposals-per-class", "NMS max proposals per class", "Set a limit for the maximum number of boxes per class.",
+            0, std::numeric_limits<uint32_t>::max(), 0, (GParamFlags)(G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
+
     // See information about the "flush" signal in the element description
     g_signal_new(
         "flush",
@@ -271,9 +285,9 @@ Expected<std::unique_ptr<HailoNetImpl>> HailoNetImpl::create(GstHailoNet *elemen
     g_signal_connect(element, "flush", G_CALLBACK(gst_hailonet_flush_callback), nullptr);
 
     auto was_flushed_event = Event::create_shared(Event::State::not_signalled);
-    GST_CHECK(nullptr != was_flushed_event, make_unexpected(HAILO_OUT_OF_HOST_MEMORY), element, RESOURCE, "Failed allocating memory for event!");
+    GST_CHECK_EXPECTED(was_flushed_event, element, RESOURCE, "Failed allocating memory for event!");
 
-    auto ptr = make_unique_nothrow<HailoNetImpl>(element, hailosend, queue, hailorecv, was_flushed_event);
+    auto ptr = make_unique_nothrow<HailoNetImpl>(element, hailosend, queue, hailorecv, was_flushed_event.release());
     if (nullptr == ptr) {
         return make_unexpected(HAILO_OUT_OF_HOST_MEMORY);
     }
@@ -518,6 +532,7 @@ void HailoNetImpl::set_property(GObject *object, guint property_id, const GValue
         m_props.m_multi_process_service = g_value_get_boolean(value);
         break;
     case PROP_INPUT_QUANTIZED:
+        g_warning("'input-quantized' is a deprecated parameter that will be ignored.");
         if (m_was_configured) {
             g_warning("The network was already configured so changing the quantized flag will not take place!");
             break;
@@ -525,6 +540,7 @@ void HailoNetImpl::set_property(GObject *object, guint property_id, const GValue
         m_props.m_input_quantized = g_value_get_boolean(value);
         break;
     case PROP_OUTPUT_QUANTIZED:
+        g_warning("'output-quantized' is a deprecated parameter that will be ignored.");
         if (m_was_configured) {
             g_warning("The network was already configured so changing the quantized flag will not take place!");
             break;
@@ -545,6 +561,27 @@ void HailoNetImpl::set_property(GObject *object, guint property_id, const GValue
         }
         m_props.m_output_format_type = static_cast<hailo_format_type_t>(g_value_get_enum(value));
         break;
+    case PROP_NMS_SCORE_THRESHOLD:
+        if (m_was_configured) {
+            g_warning("The network was already configured so changing the score threshold will not take place!");
+            break;
+        }
+        m_props.m_nms_score_threshold = static_cast<gfloat>(g_value_get_float(value));
+        break;
+    case PROP_NMS_IOU_THRESHOLD:
+        if (m_was_configured) {
+            g_warning("The network was already configured so changing the IoU threshold will not take place!");
+            break;
+        }
+        m_props.m_nms_iou_threshold = static_cast<gfloat>(g_value_get_float(value));
+        break;
+    case PROP_NMS_MAX_PROPOSALS_PER_CLASS:
+        if (m_was_configured) {
+            g_warning("The network was already configured so changing the max proposals per class will not take place!");
+            break;
+        }
+        m_props.m_nms_max_proposals_per_class = static_cast<guint32>(g_value_get_uint(value));
+        break;
     default:
         G_OBJECT_WARN_INVALID_PROPERTY_ID(object, property_id, pspec);
         break;
@@ -630,6 +667,15 @@ void HailoNetImpl::get_property(GObject *object, guint property_id, GValue *valu
     case PROP_OUTPUT_FORMAT_TYPE:
         g_value_set_enum(value, m_props.m_output_format_type.get());
         break;
+    case PROP_NMS_SCORE_THRESHOLD:
+        g_value_set_float(value, m_props.m_nms_score_threshold.get());
+        break;
+    case PROP_NMS_IOU_THRESHOLD:
+        g_value_set_float(value, m_props.m_nms_iou_threshold.get());
+        break;
+    case PROP_NMS_MAX_PROPOSALS_PER_CLASS:
+        g_value_set_uint(value, m_props.m_nms_max_proposals_per_class.get());
+        break;
     default:
         G_OBJECT_WARN_INVALID_PROPERTY_ID(object, property_id, pspec);
         break;
@@ -657,11 +703,15 @@ hailo_status HailoNetImpl::set_hef()
         // TODO: HRT-4957
         GST_CHECK(m_net_group_handle->hef()->get_network_groups_names().size() == 1, HAILO_INVALID_ARGUMENT, m_element, RESOURCE,
             "Network group has to be specified when there are more than one network groups in the HEF!");
-        auto networks_infos = m_net_group_handle->hef()->get_network_infos(m_net_group_handle->hef()->get_network_groups_names()[0].c_str());
+        auto network_group_name = m_net_group_handle->hef()->get_network_groups_names()[0];
+
+        auto networks_infos = m_net_group_handle->hef()->get_network_infos(network_group_name.c_str());
         GST_CHECK_EXPECTED_AS_STATUS(networks_infos, m_element, RESOURCE, "Getting network infos from network group name was failed, status %d", networks_infos.status());
         GST_CHECK(networks_infos.value().size() == 1, HAILO_INVALID_ARGUMENT, m_element, RESOURCE,
             "Network has to be specified when there are more than one network in the network group!");
-        m_props.m_network_name = g_strdup(networks_infos.release()[0].name);
+
+        std::string default_ng_name = HailoRTDefaults::get_network_name(network_group_name);
+        m_props.m_network_name = g_strdup(default_ng_name.c_str());
     }
 
     auto input_vstream_infos = m_net_group_handle->hef()->get_input_vstream_infos(m_props.m_network_name.get());
@@ -720,12 +770,48 @@ hailo_status HailoNetImpl::configure_network_group()
         GST_CHECK_SUCCESS(status, m_element, RESOURCE, "Setting scheduler priority failed, status = %d", status);
     }
 
-    auto vstreams = m_net_group_handle->create_vstreams(m_props.m_network_name.get(), m_props.m_scheduling_algorithm.get(), m_output_formats, static_cast<bool>(m_props.m_input_quantized.get()), 
-        static_cast<bool>(m_props.m_output_quantized.get()), m_props.m_input_format_type.get(), m_props.m_output_format_type.get());
+    auto input_quantized = (m_props.m_input_quantized.was_changed()) ? static_cast<bool>(m_props.m_input_quantized.get()) :
+        (m_props.m_input_format_type.get() != HAILO_FORMAT_TYPE_FLOAT32);
+
+    auto output_quantized = (m_props.m_output_quantized.was_changed()) ? static_cast<bool>(m_props.m_output_quantized.get()) :
+        (m_props.m_output_format_type.get() != HAILO_FORMAT_TYPE_FLOAT32);
+
+    auto vstreams = m_net_group_handle->create_vstreams(m_props.m_network_name.get(), m_props.m_scheduling_algorithm.get(), m_output_formats,
+        input_quantized, output_quantized, m_props.m_input_format_type.get(), m_props.m_output_format_type.get());
     GST_CHECK_EXPECTED_AS_STATUS(vstreams, m_element, RESOURCE, "Creating vstreams failed, status = %d", status);
 
     GST_HAILOSEND(m_hailosend)->impl->set_input_vstreams(std::move(vstreams->first));
 
+    // Check that if one of the NMS params are changed, we have NMS outputs in the model
+    auto has_nms_output = std::any_of(vstreams->second.begin(), vstreams->second.end(), [](const auto &vs)
+    {
+        return HailoRTCommon::is_nms(vs.get_info());
+    });
+
+    for (auto &out_vs : vstreams->second) {
+        if (m_props.m_nms_score_threshold.was_changed()) {
+            GST_CHECK(has_nms_output, HAILO_INVALID_OPERATION, m_element, RESOURCE, "NMS score threshold is set, but there is no NMS output in this model.");
+            if (HailoRTCommon::is_nms(out_vs.get_info())) {
+                status = out_vs.set_nms_score_threshold(m_props.m_nms_score_threshold.get());
+                GST_CHECK_SUCCESS(status, m_element, RESOURCE, "Setting NMS score threshold failed, status = %d", status);
+            }
+        }
+        if (m_props.m_nms_iou_threshold.was_changed()) {
+            GST_CHECK(has_nms_output, HAILO_INVALID_OPERATION, m_element, RESOURCE, "NMS IoU threshold is set, but there is no NMS output in this model.");
+            if (HailoRTCommon::is_nms(out_vs.get_info())) {
+                status = out_vs.set_nms_iou_threshold(m_props.m_nms_iou_threshold.get());
+                GST_CHECK_SUCCESS(status, m_element, RESOURCE, "Setting NMS IoU threshold failed, status = %d", status);
+            }
+        }
+        if (m_props.m_nms_max_proposals_per_class.was_changed()) {
+            GST_CHECK(has_nms_output, HAILO_INVALID_OPERATION, m_element, RESOURCE, "NMS max proposals per class is set, but there is no NMS output in this model.");
+            if (HailoRTCommon::is_nms(out_vs.get_info())) {
+                status = out_vs.set_nms_max_proposals_per_class(m_props.m_nms_max_proposals_per_class.get());
+                GST_CHECK_SUCCESS(status, m_element, RESOURCE, "Setting NMS max proposals per class failed, status = %d", status);
+            }
+        }
+    }
+
     status = GST_HAILORECV(m_hailorecv)->impl->set_output_vstreams(std::move(vstreams->second), m_props.m_batch_size.get());
     GST_CHECK_SUCCESS(status, m_element, RESOURCE, "Setting output vstreams failed, status = %d", status);
 
@@ -760,7 +846,7 @@ Expected<std::string> HailoNetImpl::get_network_group_name(const std::string &ne
 {
     for (const auto &network_group_name : m_net_group_handle->hef()->get_network_groups_names()) {
         // Look for network_group with the given name
-        if (network_name == network_group_name) {
+        if ((network_name == network_group_name) || (network_name == HailoRTDefaults::get_network_name(network_group_name))) {
             return std::string(network_group_name);
         }
 
index 2840eb8eb89779877cc10f5cd5f8b31359961009..142c30fad5d10be78685d4406ba26485cd8ab0f3 100644 (file)
@@ -55,7 +55,7 @@ public:
         m_is_active(false), m_device_count(0), m_vdevice_key(DEFAULT_VDEVICE_KEY), m_scheduling_algorithm(HAILO_SCHEDULING_ALGORITHM_ROUND_ROBIN),
         m_scheduler_timeout_ms(HAILO_DEFAULT_SCHEDULER_TIMEOUT_MS), m_scheduler_threshold(HAILO_DEFAULT_SCHEDULER_THRESHOLD), m_scheduler_priority(HAILO_SCHEDULER_PRIORITY_NORMAL),
         m_multi_process_service(HAILO_DEFAULT_MULTI_PROCESS_SERVICE), m_input_quantized(true), m_output_quantized(true), m_input_format_type(HAILO_FORMAT_TYPE_AUTO),
-        m_output_format_type(HAILO_FORMAT_TYPE_AUTO)
+        m_output_format_type(HAILO_FORMAT_TYPE_AUTO), m_nms_score_threshold(0), m_nms_iou_threshold(0), m_nms_max_proposals_per_class(0)
 
     {}
 
@@ -75,6 +75,9 @@ public:
     HailoElemProperty<gboolean> m_output_quantized;
     HailoElemProperty<hailo_format_type_t> m_input_format_type;
     HailoElemProperty<hailo_format_type_t> m_output_format_type;
+    HailoElemProperty<gfloat> m_nms_score_threshold;
+    HailoElemProperty<gfloat> m_nms_iou_threshold;
+    HailoElemProperty<guint32> m_nms_max_proposals_per_class;
 };
 
 class HailoNetImpl final
index c04927b8d6ee1c310c5aec6b84287af50c9d3e35..25ca742c3bec0b09c931691fde5cc388df84191d 100644 (file)
@@ -149,18 +149,26 @@ GstFlowReturn HailoSendImpl::handle_frame(GstVideoFilter */*filter*/, GstVideoFr
         return GST_FLOW_OK;
     }
 
-    guint8 *frame_buffer = reinterpret_cast<guint8*>(GST_VIDEO_FRAME_PLANE_DATA(frame, 0));
+    hailo_pix_buffer_t pix_buffer = {};
+    pix_buffer.index = 0;
+    pix_buffer.number_of_planes = GST_VIDEO_INFO_N_PLANES(&frame->info);
+    for (uint32_t plane_index = 0; plane_index < pix_buffer.number_of_planes; plane_index++) {
+        pix_buffer.planes[plane_index].bytes_used = GST_VIDEO_INFO_PLANE_STRIDE(&frame->info, plane_index) * GST_VIDEO_INFO_COMP_HEIGHT(&frame->info, plane_index);
+        pix_buffer.planes[plane_index].plane_size = GST_VIDEO_INFO_PLANE_STRIDE(&frame->info, plane_index) * GST_VIDEO_INFO_COMP_HEIGHT(&frame->info, plane_index);
+        pix_buffer.planes[plane_index].user_ptr = GST_VIDEO_FRAME_PLANE_DATA(frame, plane_index);
+    }
+
     hailo_status status = HAILO_UNINITIALIZED;
 
     if (m_props.m_debug.get()) {
         std::chrono::duration<double, std::milli> latency;
         std::chrono::time_point<std::chrono::system_clock> start_time;
         start_time = std::chrono::system_clock::now();
-        status = write_to_vstreams(frame_buffer, GST_VIDEO_FRAME_SIZE(frame));
+        status = write_to_vstreams(pix_buffer);
         latency = std::chrono::system_clock::now() - start_time;
         GST_DEBUG("hailosend latency: %f milliseconds", latency.count());
     } else {
-        status = write_to_vstreams(frame_buffer, GST_VIDEO_FRAME_SIZE(frame));
+        status = write_to_vstreams(pix_buffer);
     }
 
     if (HAILO_SUCCESS != status) {
@@ -169,10 +177,13 @@ GstFlowReturn HailoSendImpl::handle_frame(GstVideoFilter */*filter*/, GstVideoFr
     return GST_FLOW_OK;
 }
 
-hailo_status HailoSendImpl::write_to_vstreams(void *buf, size_t size)
+hailo_status HailoSendImpl::write_to_vstreams(const hailo_pix_buffer_t &pix_buffer)
 {
     for (auto &in_vstream : m_input_vstreams) {
-        auto status = in_vstream.write(MemoryView(buf, size));
+        auto status = in_vstream.write(pix_buffer);
+        if (HAILO_STREAM_ABORTED_BY_USER == status) {
+            return status;
+        }
         GST_CHECK_SUCCESS(status, m_element, STREAM, "Failed writing to input vstream %s, status = %d", in_vstream.name().c_str(), status);
     }
     return HAILO_SUCCESS;
index 5ea816b31e105fb658b901ba591b97da702e0ade..33a4d7a3daed48be6861f12c2422341c43e7269d 100644 (file)
@@ -89,7 +89,7 @@ public:
     }
 
 private:
-    hailo_status write_to_vstreams(void *buf, size_t size);
+    hailo_status write_to_vstreams(const hailo_pix_buffer_t &pix_buffer);
     
     GstHailoSend *m_element;
     GstHailoNet *m_hailonet;
index 83f075aeb88b4940d56802fc7afcb42f99a78f21..709c7d302dbf9965d7eccaabbb2b1505f77c3f3a 100644 (file)
@@ -111,7 +111,6 @@ Expected<std::shared_ptr<VDevice>> NetworkGroupHandle::create_vdevice(const void
     }
     auto result = create_unique_vdevice(element, device_count, scheduling_algorithm, multi_process_service);
     GST_CHECK_EXPECTED(result, element, RESOURCE, "Failed creating vdevice, status = %d", result.status());
-    m_vdevices.insert(result.value());
     return result;
 }
 
@@ -361,7 +360,7 @@ std::shared_ptr<ConfiguredNetworkGroup> NetworkGroupConfigManager::get_configure
         return nullptr;
     }
 
-    return found->second;
+    return found->second.lock();
 }
 
 std::string NetworkGroupConfigManager::get_configure_string(const std::string &device_id, const std::string &hef_hash,
index 0a4fabf46e5eec275176588f465d65fce6dc02e3..69381ddb20498dd19e5709479569f64c667c671e 100644 (file)
@@ -51,7 +51,7 @@ private:
         const char *net_group_name, uint16_t batch_size);
 
     // TODO: change this map to store only the shared network_groups (used by multiple hailonets with the same vdevices)
-    std::unordered_map<std::string, std::shared_ptr<ConfiguredNetworkGroup>> m_configured_net_groups;
+    std::unordered_map<std::string, std::weak_ptr<ConfiguredNetworkGroup>> m_configured_net_groups;
     std::unordered_map<device_id_t, std::unordered_map<network_name_t, hailonet_name_t>> m_configured_networks;
     std::mutex m_mutex;
 };
index ffdcfc9ad21abf1a5554b8a76f28da5afae0c0cc..88f3b8df49ea0b9e8b4b3d932095506464b300d9 100644 (file)
@@ -1,4 +1,4 @@
 cmake_minimum_required(VERSION 3.11.0)
 
-include(externals/pybind11.cmake)
+include(${HAILO_EXTERNALS_CMAKE_SCRIPTS}/pybind11.cmake)
 add_subdirectory(src)
diff --git a/hailort/libhailort/bindings/python/externals/pybind11.cmake b/hailort/libhailort/bindings/python/externals/pybind11.cmake
deleted file mode 100644 (file)
index db0b705..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-cmake_minimum_required(VERSION 3.11.0)
-
-include(FetchContent)
-
-if(NOT PYTHON_EXECUTABLE AND PYBIND11_PYTHON_VERSION)
-    # venv version is prioritized (instead of PYBIND11_PYTHON_VERSION) if PYTHON_EXECUTABLE is not set.
-    # See https://pybind11.readthedocs.io/en/stable/changelog.html#v2-6-0-oct-21-2020
-    if((${CMAKE_VERSION} VERSION_LESS "3.22.0") AND (NOT WIN32))
-        find_package(PythonInterp ${PYBIND11_PYTHON_VERSION} REQUIRED)
-        set(PYTHON_EXECUTABLE ${Python_EXECUTABLE})
-    else()
-        find_package(Python3 ${PYBIND11_PYTHON_VERSION} REQUIRED EXACT COMPONENTS Interpreter Development)
-        set(PYTHON_EXECUTABLE ${Python3_EXECUTABLE})
-    endif()
-endif()
-
-FetchContent_Declare(
-    pybind11
-    GIT_REPOSITORY https://github.com/pybind/pybind11.git
-    GIT_TAG 80dc998efced8ceb2be59756668a7e90e8bef917 # Version 2.10.1
-    #GIT_SHALLOW TRUE
-    SOURCE_DIR "${CMAKE_CURRENT_LIST_DIR}/pybind11"
-    BINARY_DIR "${CMAKE_CURRENT_LIST_DIR}/pybind11"
-)
-
-if(NOT HAILO_OFFLINE_COMPILATION)
-    # https://stackoverflow.com/questions/65527126/disable-install-for-fetchcontent
-    FetchContent_GetProperties(pybind11)
-    if(NOT pybind11_POPULATED)
-        FetchContent_Populate(pybind11)
-        add_subdirectory(${pybind11_SOURCE_DIR} ${pybind11_BINARY_DIR} EXCLUDE_FROM_ALL)
-    endif()
-else()
-    add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/pybind11 EXCLUDE_FROM_ALL)
-endif()
\ No newline at end of file
index bf078b79949ad8c9dffbc1c1140735bdf72d328d..1c46e606369fc401dbb34f6b9a6ca379267070d4 100644 (file)
@@ -154,7 +154,7 @@ class HailoHWObject(object):
         if len(self._loaded_network_groups) == 1:
             return self._loaded_network_groups[0].name
         raise HailoHWObjectException(
-            "This function is only supported when there is exactly 1 loaded network group. one should use HEF.get_network_group_names() / ConfiguredNetwork.name / ActivatedNetwork.name")
+            "This function is only supported when there is exactly 1 loaded network group. Use HEF.get_network_group_names() / ConfiguredNetwork.name / ActivatedNetwork.name")
 
     def get_output_shapes(self):
         """Get the model output shapes, as returned to the user (without any hardware padding) (deprecated).
index 6f73059fdd6f3440051713d687d1efa41c8cf08c..5ef89c8c6babe404457babc3a40da55f62c980aa 100644 (file)
@@ -685,30 +685,6 @@ class ConfiguredNetwork(object):
         with ExceptionWrapper():
             return self._configured_network.get_udp_rates_dict(int(fps), int(max_supported_rate_bytes))
 
-    def _before_fork(self):
-        if self._configured_network is not None:
-            self._configured_network.before_fork()
-            for input_vstreams in self._input_vstreams_holders:
-                input_vstreams.before_fork()
-            for output_vstreams in self._output_vstreams_holders:
-                output_vstreams.before_fork()
-
-    def _after_fork_in_parent(self):
-        if self._configured_network is not None:
-            self._configured_network.after_fork_in_parent()
-            for input_vstreams in self._input_vstreams_holders:
-                input_vstreams.after_fork_in_parent()
-            for output_vstreams in self._output_vstreams_holders:
-                output_vstreams.after_fork_in_parent()
-
-    def _after_fork_in_child(self):
-        if self._configured_network is not None:
-            self._configured_network.after_fork_in_child()
-            for input_vstreams in self._input_vstreams_holders:
-                input_vstreams.after_fork_in_child()
-            for output_vstreams in self._output_vstreams_holders:
-                output_vstreams.after_fork_in_child()
-
     def _create_input_vstreams(self, input_vstreams_params):
         input_vstreams_holder = self._configured_network.InputVStreams(input_vstreams_params)
         self._input_vstreams_holders.append(input_vstreams_holder)
@@ -752,7 +728,7 @@ class ConfiguredNetwork(object):
         Args:
             timeout_ms (int): Timeout in milliseconds.
         """
-        name = network_name if network_name is not None else self.name
+        name = network_name if network_name is not None else ""
         return self._configured_network.set_scheduler_timeout(timeout_ms, name)
 
     def set_scheduler_threshold(self, threshold):
@@ -956,6 +932,20 @@ class InferVStreams(object):
             self._hw_time = time.perf_counter() - time_before_infer
 
         for name, result_array in output_buffers.items():
+            # TODO: HRT-11726 - Combine Pyhailort NMS and NMS_WITH_BYTE_MASK decoding function
+            if output_buffers_info[name].output_order == FormatOrder.HAILO_NMS_WITH_BYTE_MASK:
+                nms_shape = output_buffers_info[name].vstream_info.nms_shape
+                output_dtype = output_buffers_info[name].output_dtype
+                input_stream_infos = self._configured_net_group.get_input_stream_infos()
+                if len(input_stream_infos) != 1:
+                    raise Exception("Output format HAILO_NMS_WITH_BYTE_MASK should have 1 input. Number of inputs: {}".format(len(input_stream_infos)))
+                input_height = input_stream_infos[0].shape[0]
+                input_width = input_stream_infos[0].shape[1]
+                output_buffers[name] = HailoRTTransformUtils._output_raw_buffer_to_nms_with_byte_mask_format(result_array,
+                    nms_shape.number_of_classes, batch_size, input_height, input_width,
+                    nms_shape.max_bboxes_per_class, output_dtype, self._tf_nms_format)
+                continue
+
             is_nms = output_buffers_info[name].is_nms
             if not is_nms:
                 continue
@@ -969,7 +959,7 @@ class InferVStreams(object):
                     output_dtype, quantized_empty_bbox)
             else:
                 output_buffers[name] = HailoRTTransformUtils.output_raw_buffer_to_nms_format(result_array, nms_shape.number_of_classes)
-        
+
         self._total_time = time.perf_counter() - time_before_infer_calcs
         return output_buffers
 
@@ -1032,11 +1022,90 @@ class InferVStreams(object):
                 input_layer_name))
             input_data[input_layer_name] = numpy.asarray(input_data[input_layer_name], order='C')
 
+    def set_nms_score_threshold(self, threshold):
+        """Set NMS score threshold, used for filtering out candidates. Any box with score<TH is suppressed.
+
+        Args:
+            threshold (float): NMS score threshold to set.
+
+        Note:
+            This function will fail in cases where there is no output with NMS operations on the CPU.
+        """
+        return self._infer_pipeline.set_nms_score_threshold(threshold)
+
+    def set_nms_iou_threshold(self, threshold):
+        """Set NMS intersection over union overlap Threshold,
+            used in the NMS iterative elimination process where potential duplicates of detected items are suppressed.
+
+        Args:
+            threshold (float): NMS IoU threshold to set.
+
+        Note:
+            This function will fail in cases where there is no output with NMS operations on the CPU.
+        """
+        return self._infer_pipeline.set_nms_iou_threshold(threshold)
+
+    def set_nms_max_proposals_per_class(self, max_proposals_per_class):
+        """Set a limit for the maximum number of boxes per class.
+
+        Args:
+            max_proposals_per_class (int): NMS max proposals per class to set.
+
+        Note:
+            This function will fail in cases where there is no output with NMS operations on the CPU.
+        """
+        return self._infer_pipeline.set_nms_max_proposals_per_class(max_proposals_per_class)
+
     def __exit__(self, *args):
         self._infer_pipeline.release()
         return False
 
 
+class HailoDetectionBox(object):
+# TODO: HRT-11492 - Add documentation to class and functions
+
+    def __init__(self, bbox, class_id, mask_size, mask):
+        self._bbox = bbox
+        self._mask_size = mask_size
+        self._mask = mask
+        self._class_id = class_id
+
+    @property
+    def bbox(self):
+        return self._bbox
+
+    @property
+    def y_min(self):
+        return self._bbox[0]
+
+    @property
+    def x_min(self):
+        return self._bbox[1]
+
+    @property
+    def y_max(self):
+        return self._bbox[2]
+
+    @property
+    def x_max(self):
+        return self._bbox[3]
+
+    @property
+    def score(self):
+        return self._bbox[4]
+
+    @property
+    def class_id(self):
+        return self._class_id
+
+    @property
+    def mask_size(self):
+        return self._mask_size
+
+    @property
+    def mask(self):
+        return self._mask
+
 class HailoRTTransformUtils(object):
     @staticmethod
     def get_dtype(data_bytes):
@@ -1064,6 +1133,9 @@ class HailoRTTransformUtils(object):
         with ExceptionWrapper():
             src_format_type = HailoRTTransformUtils._get_format_type(src_buffer.dtype)
             dst_format_type = HailoRTTransformUtils._get_format_type(dst_buffer.dtype)
+            if not _pyhailort.is_qp_valid(quant_info):
+                raise HailoRTInvalidOperationException("quant_info is invalid as the model was compiled with multiple quant_infos. "
+                                                       "Please compile again or provide a list of quant_infos.")
             _pyhailort.dequantize_output_buffer(src_buffer, dst_buffer, src_format_type, dst_format_type, elements_count, quant_info)
 
     @staticmethod
@@ -1079,8 +1151,20 @@ class HailoRTTransformUtils(object):
         with ExceptionWrapper():
             src_format_type = HailoRTTransformUtils._get_format_type(raw_buffer.dtype)
             dst_format_type = HailoRTTransformUtils._get_format_type(dst_dtype)
+            if not _pyhailort.is_qp_valid(quant_info):
+                raise HailoRTInvalidOperationException("quant_info is invalid as the model was compiled with multiple quant_infos. "
+                                                       "Please compile again or provide a list of quant_infos.")
             _pyhailort.dequantize_output_buffer_in_place(raw_buffer, src_format_type, dst_format_type, elements_count, quant_info)
 
+    @staticmethod
+    def is_qp_valid(quant_info):
+        """Returns if quant_info is valid.
+
+        Args:
+            quant_info (:class:`~hailo_platform.pyhailort.pyhailort.QuantInfo`): The quantization info.
+        """
+        return _pyhailort.is_qp_valid(quant_info)
+
     @staticmethod
     def quantize_input_buffer(src_buffer, dst_buffer, elements_count, quant_info):
         """Quantize the data in input buffer `src_buffer` and output it to the buffer `dst_buffer`
@@ -1096,6 +1180,9 @@ class HailoRTTransformUtils(object):
         with ExceptionWrapper():
             src_format_type = HailoRTTransformUtils._get_format_type(src_buffer.dtype)
             dst_format_type = HailoRTTransformUtils._get_format_type(dst_buffer.dtype)
+            if not _pyhailort.is_qp_valid(quant_info):
+                raise HailoRTInvalidOperationException("quant_info is invalid as the model was compiled with multiple quant_infos. "
+                                                       "Please compile again or provide a list of quant_infos.")
             _pyhailort.quantize_input_buffer(src_buffer, dst_buffer, src_format_type, dst_format_type, elements_count, quant_info)
 
     @staticmethod
@@ -1142,6 +1229,121 @@ class HailoRTTransformUtils(object):
                 offset += BBOX_PARAMS * class_bboxes_amount
         return converted_output_frame
 
+    @staticmethod
+    def _output_raw_buffer_to_nms_with_byte_mask_format(raw_output_buffer, number_of_classes, batch_size, image_height, image_width,
+            max_bboxes_per_class, output_dtype, is_tf_format=False):
+        if is_tf_format:
+            if os.environ.get('HAILO_TF_FORMAT_INTERNAL'):
+                return HailoRTTransformUtils._output_raw_buffer_to_nms_with_byte_mask_tf_format(raw_output_buffer, number_of_classes,
+                    batch_size, image_height, image_width, max_bboxes_per_class, output_dtype)
+            else:
+                raise HailoRTException("TF format is not supported with HAILO_NMS_WITH_BYTE_MASK format order")
+        else:
+            return HailoRTTransformUtils._output_raw_buffer_to_nms_with_byte_mask_hailo_format(raw_output_buffer, number_of_classes)
+
+    @staticmethod
+    def _output_raw_buffer_to_nms_with_byte_mask_hailo_format(raw_output_buffer, number_of_classes):
+        converted_output_buffer = []
+        for frame in raw_output_buffer:
+            converted_output_buffer.append(
+                HailoRTTransformUtils._output_raw_buffer_to_nms_with_byte_mask_hailo_format_single_frame(frame, number_of_classes))
+        return converted_output_buffer
+
+    @staticmethod
+    def _output_raw_buffer_to_nms_with_byte_mask_hailo_format_single_frame(raw_output_buffer, number_of_classes):
+        offset = 0
+        converted_output_frame = []
+        for class_i in range(number_of_classes):
+            class_bboxes_amount = int(raw_output_buffer[offset])
+            offset += 1
+            classes_boxes = []
+
+            if class_bboxes_amount != 0:
+                for bbox_i in range(class_bboxes_amount):
+                    bbox = raw_output_buffer[offset : offset + BBOX_PARAMS]
+                    offset += BBOX_PARAMS
+
+                    bbox_mask_size_in_bytes = raw_output_buffer[offset]
+                    offset += 1
+                    bbox_mask_size = int(bbox_mask_size_in_bytes / 4)
+
+                    bbox_mask = raw_output_buffer[offset : (offset + bbox_mask_size)]
+                    offset += bbox_mask_size
+
+                    hailo_bbox = HailoDetectionBox(bbox, class_i, bbox_mask_size_in_bytes, bbox_mask)
+                    classes_boxes.append(hailo_bbox)
+
+            converted_output_frame.append(classes_boxes)
+        return converted_output_frame
+
+    @staticmethod
+    def _output_raw_buffer_to_nms_with_byte_mask_tf_format(raw_output_buffer, number_of_classes, batch_size, image_height, image_width,
+            max_bboxes_per_class, output_dtype):
+        offset = 0
+        # The + 1 is for the extra row containing the bbox coordinates, score and class_id
+        output_height = image_height + 1
+
+        # We create the tf_format buffer with reversed max_bboxes_per_class/features for performance optimization
+        converted_output_buffer = numpy.empty([batch_size, max_bboxes_per_class, output_height, image_width], dtype=output_dtype)
+
+        for frame_idx in range(len(raw_output_buffer)):
+            offset = HailoRTTransformUtils._output_raw_buffer_to_nms_with_byte_mask_tf_format_single_frame(
+                raw_output_buffer[frame_idx], converted_output_buffer[frame_idx], number_of_classes, max_bboxes_per_class,
+                image_height, image_width, offset)
+        converted_output_buffer = numpy.moveaxis(converted_output_buffer, 1, 3)
+        return converted_output_buffer
+
+    @staticmethod
+    def _output_raw_buffer_to_nms_with_byte_mask_tf_format_single_frame(raw_output_buffer, converted_output_frame, number_of_classes,
+        max_boxes, image_height, image_width, offset):
+
+        detections = []
+        for class_i in range(number_of_classes):
+            class_bboxes_amount = int(raw_output_buffer[offset])
+            offset += 1
+
+            if class_bboxes_amount != 0:
+                for bbox_i in range(class_bboxes_amount):
+                    bbox = raw_output_buffer[offset : offset + BBOX_PARAMS]
+                    offset += BBOX_PARAMS
+
+                    bbox_mask_size_in_bytes = raw_output_buffer[offset]
+                    offset += 1
+                    bbox_mask_size = int(bbox_mask_size_in_bytes // 4)
+
+                    bbox_mask = raw_output_buffer[offset : (offset + bbox_mask_size)]
+                    offset += bbox_mask_size
+
+                    y_min = bbox[0] * image_height
+                    x_min = bbox[1] * image_width
+                    bbox_width = round((bbox[3] - bbox[1]) * image_width)
+                    resized_mask = numpy.empty([image_height, image_width])
+
+                    for i in range(bbox_mask_size):
+                        if (bbox_mask[i] == 1):
+                            x = int(x_min + (i % bbox_width))
+                            y = int(y_min + (i // bbox_width))
+                            if (x >= image_width):
+                                x = image_width - 1
+                            if ( y >= image_height):
+                                y = image_height - 1
+                            resized_mask[y][x] = 1
+
+                    padding = image_width - len(bbox)
+                    bbox_padded = numpy.pad(bbox, pad_width=(0, padding), mode='constant')
+                    bbox_padded[len(bbox)] = class_i
+
+                    converted_detection = numpy.append(resized_mask ,[bbox_padded], axis=0)
+                    detections.append((bbox[4], converted_detection))
+
+        detections.sort(key=lambda tup: tup[0], reverse=True)
+        for detection_idx in range(len(detections)):
+            if (detection_idx >= max_boxes):
+                return offset
+            converted_output_frame[detection_idx] = detections[detection_idx][1]
+
+        return offset
+
     @staticmethod
     def _get_format_type(dtype):
         if dtype == numpy.uint8:
@@ -1313,7 +1515,7 @@ class HailoFormatFlags(_pyhailort.FormatFlags):
 
 SUPPORTED_PROTOCOL_VERSION = 2
 SUPPORTED_FW_MAJOR = 4
-SUPPORTED_FW_MINOR = 14
+SUPPORTED_FW_MINOR = 15
 SUPPORTED_FW_REVISION = 0
 
 MEGA_MULTIPLIER = 1000.0 * 1000.0
@@ -1323,7 +1525,8 @@ class DeviceArchitectureTypes(IntEnum):
     HAILO8_A0 = 0
     HAILO8 = 1
     HAILO8L = 2
-    HAILO15 = 3
+    HAILO15H = 3
+    PLUTO = 4
 
     def __str__(self):
         return self.name
@@ -1379,7 +1582,7 @@ class BoardInformation(object):
         if ((device_arch == DeviceArchitectureTypes.HAILO8) or
             (device_arch == DeviceArchitectureTypes.HAILO8L)):
             return 'hailo8'
-        elif device_arch == DeviceArchitectureTypes.HAILO15:
+        elif device_arch == DeviceArchitectureTypes.HAILO15H:
             return 'hailo15'
         else:
             raise HailoRTException("Unsupported device architecture.")
@@ -2415,31 +2618,9 @@ class VDevice(object):
 
         self._open_vdevice()
 
-    def _before_fork(self):
-        if self._vdevice is not None:
-            self._vdevice.before_fork()
-            for configured_network in self._loaded_network_groups:
-                configured_network._before_fork()
-
-    def _after_fork_in_parent(self):
-        if self._vdevice is not None:
-            self._vdevice.after_fork_in_parent()
-            for configured_network in self._loaded_network_groups:
-                configured_network._after_fork_in_parent()
-
-    def _after_fork_in_child(self):
-        if self._vdevice is not None:
-            self._vdevice.after_fork_in_child()
-            for configured_network in self._loaded_network_groups:
-                configured_network._after_fork_in_child()
-
     def _open_vdevice(self):
         if self._params is None:
             self._params = VDevice.create_params()
-        if  sys.platform != "win32" and self._params.multi_process_service:
-            os.register_at_fork(before=lambda: self._before_fork())
-            os.register_at_fork(after_in_parent=lambda: self._after_fork_in_parent())
-            os.register_at_fork(after_in_child=lambda: self._after_fork_in_child())
         with ExceptionWrapper():
             device_ids = [] if self._device_ids is None else self._device_ids
             self._vdevice = _pyhailort.VDevice.create(self._params, device_ids)
@@ -2518,20 +2699,18 @@ class InputVStreamParams(object):
     """Parameters of an input virtual stream (host to device)."""
 
     @staticmethod
-    def make(configured_network, quantized=True, format_type=None, timeout_ms=None, queue_size=None, network_name=None):
+    def make(configured_network, quantized=None, format_type=None, timeout_ms=None, queue_size=None, network_name=None):
         """Create input virtual stream params from a configured network group. These params determine the format of the
         data that will be fed into the network group.
 
         Args:
             configured_network (:class:`ConfiguredNetwork`): The configured network group for which
                 the params are created.
-            quantized (bool): Whether the data fed into the chip is already quantized. True means
-                the data is already quantized. False means it's HailoRT's responsibility to quantize
-                (scale) the data. Defaults to True.
+            quantized (bool): Deprecated parameter that will be ignored. Determine whether to quantize (scale)
+                the data will be decided by the src-data and dst-data types.
             format_type (:class:`~hailo_platform.pyhailort.pyhailort.FormatType`): The
-                default format type of the data for all input virtual streams. If quantized is False,
-                the default is :attr:`~hailo_platform.pyhailort.pyhailort.FormatType.FLOAT32`. Otherwise,
-                the default is :attr:`~hailo_platform.pyhailort.pyhailort.FormatType.AUTO`,
+                default format type of the data for all input virtual streams.
+                The default is :attr:`~hailo_platform.pyhailort.pyhailort.FormatType.AUTO`,
                 which means the data is fed in the same format expected by the device (usually
                 uint8).
             timeout_ms (int): The default timeout in milliseconds for all input virtual streams.
@@ -2545,10 +2724,9 @@ class InputVStreamParams(object):
             params.
         """
         if format_type is None:
-            if not quantized:
-                format_type = FormatType.FLOAT32
-            else:
-                format_type = FormatType.AUTO
+            format_type = FormatType.AUTO
+        if quantized is None:
+            quantized = format_type != FormatType.FLOAT32
         if timeout_ms is None:
             timeout_ms = DEFAULT_VSTREAM_TIMEOUT_MS
         if queue_size is None:
@@ -2559,20 +2737,18 @@ class InputVStreamParams(object):
                 format_type, timeout_ms, queue_size)
 
     @staticmethod
-    def make_from_network_group(configured_network, quantized=True, format_type=None, timeout_ms=None, queue_size=None, network_name=None):
+    def make_from_network_group(configured_network, quantized=None, format_type=None, timeout_ms=None, queue_size=None, network_name=None):
         """Create input virtual stream params from a configured network group. These params determine the format of the
         data that will be fed into the network group.
 
         Args:
             configured_network (:class:`ConfiguredNetwork`): The configured network group for which
                 the params are created.
-            quantized (bool): Whether the data fed into the chip is already quantized. True means
-                the data is already quantized. False means it's HailoRT's responsibility to quantize
-                (scale) the data. Defaults to True.
+            quantized (bool): Deprecated parameter that will be ignored. Determine whether to quantize (scale)
+                the data will be decided by the src-data and dst-data types.
             format_type (:class:`~hailo_platform.pyhailort.pyhailort.FormatType`): The
-                default format type of the data for all input virtual streams. If quantized is False,
-                the default is :attr:`~hailo_platform.pyhailort.pyhailort.FormatType.FLOAT32`. Otherwise,
-                the default is :attr:`~hailo_platform.pyhailort.pyhailort.FormatType.AUTO`,
+                default format type of the data for all input virtual streams.
+                The default is :attr:`~hailo_platform.pyhailort.pyhailort.FormatType.AUTO`,
                 which means the data is fed in the same format expected by the device (usually
                 uint8).
             timeout_ms (int): The default timeout in milliseconds for all input virtual streams.
@@ -2592,20 +2768,18 @@ class OutputVStreamParams(object):
     """Parameters of an output virtual stream (device to host)."""
 
     @staticmethod
-    def make(configured_network, quantized=True, format_type=None, timeout_ms=None, queue_size=None, network_name=None):
+    def make(configured_network, quantized=None, format_type=None, timeout_ms=None, queue_size=None, network_name=None):
         """Create output virtual stream params from a configured network group. These params determine the format of the
         data that will be returned from the network group.
 
         Args:
             configured_network (:class:`ConfiguredNetwork`): The configured network group for which
                 the params are created.
-            quantized (bool): Whether the data returned from the chip should be quantized. True means
-                the data is still quantized. False means it's HailoRT's responsibility to de-quantize
-                (rescale) the data. Defaults to True.
+            quantized (bool): Deprecated parameter that will be ignored. Determine whether to de-quantize (rescale)
+                the data will be decided by the src-data and dst-data types.
             format_type (:class:`~hailo_platform.pyhailort.pyhailort.FormatType`): The
-                default format type of the data for all output virtual streams. If quantized is False,
-                the default is :attr:`~hailo_platform.pyhailort.pyhailort.FormatType.FLOAT32`. Otherwise,
-                the default is :attr:`~hailo_platform.pyhailort.pyhailort.FormatType.AUTO`,
+                default format type of the data for all output virtual streams.
+                The default is :attr:`~hailo_platform.pyhailort.pyhailort.FormatType.AUTO`,
                 which means the returned data is in the same format returned from the device (usually
                 uint8).
             timeout_ms (int): The default timeout in milliseconds for all output virtual streams.
@@ -2619,10 +2793,9 @@ class OutputVStreamParams(object):
             params.
         """
         if format_type is None:
-            if not quantized:
-                format_type = FormatType.FLOAT32
-            else:
-                format_type = FormatType.AUTO
+            format_type = FormatType.AUTO
+        if quantized is None:
+            quantized = format_type != FormatType.FLOAT32
         if timeout_ms is None:
             timeout_ms = DEFAULT_VSTREAM_TIMEOUT_MS
         if queue_size is None:
@@ -2633,21 +2806,19 @@ class OutputVStreamParams(object):
                 format_type, timeout_ms, queue_size)
 
     @staticmethod
-    def make_from_network_group(configured_network, quantized=True, format_type=None, timeout_ms=None, queue_size=None, network_name=None):
+    def make_from_network_group(configured_network, quantized=None, format_type=None, timeout_ms=None, queue_size=None, network_name=None):
         """Create output virtual stream params from a configured network group. These params determine the format of the
         data that will be returned from the network group.
 
         Args:
             configured_network (:class:`ConfiguredNetwork`): The configured network group for which
                 the params are created.
-            quantized (bool): Whether the data returned from the chip is already quantized. True means
-                the data is already quantized. False means it's HailoRT's responsibility to quantize
-                (scale) the data. Defaults to True.
+            quantized (bool): Deprecated parameter that will be ignored. Determine whether to de-quantize (rescale)
+                the data will be decided by the src-data and dst-data types.
             format_type (:class:`~hailo_platform.pyhailort.pyhailort.FormatType`): The
-                default format type of the data for all output virtual streams. If quantized is False,
-                the default is :attr:`~hailo_platform.pyhailort.pyhailort.FormatType.FLOAT32`. Otherwise,
-                the default is :attr:`~hailo_platform.pyhailort.pyhailort.FormatType.AUTO`,
-                which means the data is fed in the same format expected by the device (usually
+                default format type of the data for all output virtual streams.
+                The default is :attr:`~hailo_platform.pyhailort.pyhailort.FormatType.AUTO`,
+                which means the returned data is in the same format returned from the device (usually
                 uint8).
             timeout_ms (int): The default timeout in milliseconds for all output virtual streams.
                 Defaults to DEFAULT_VSTREAM_TIMEOUT_MS. In case of timeout, :class:`HailoRTTimeout` will be raised.
@@ -2662,21 +2833,19 @@ class OutputVStreamParams(object):
         return OutputVStreamParams.make(configured_network, quantized, format_type, timeout_ms, queue_size, network_name)
 
     @staticmethod
-    def make_groups(configured_network, quantized=True, format_type=None, timeout_ms=None, queue_size=None):
+    def make_groups(configured_network, quantized=None, format_type=None, timeout_ms=None, queue_size=None):
         """Create output virtual stream params from a configured network group. These params determine the format of the
         data that will be returned from the network group. The params groups are splitted with respect to their underlying streams for multi process usges.
 
         Args:
             configured_network (:class:`ConfiguredNetwork`): The configured network group for which
                 the params are created.
-            quantized (bool): Whether the data returned from the chip is already quantized. True means
-                the data is already quantized. False means it's HailoRT's responsibility to quantize
-                (scale) the data. Defaults to True.
+            quantized (bool): Deprecated parameter that will be ignored. Determine whether to de-quantize (rescale)
+                the data will be decided by the src-data and dst-data types.
             format_type (:class:`~hailo_platform.pyhailort.pyhailort.FormatType`): The
-                default format type of the data for all output virtual streams. If quantized is False,
-                the default is :attr:`~hailo_platform.pyhailort.pyhailort.FormatType.FLOAT32`. Otherwise,
-                the default is :attr:`~hailo_platform.pyhailort.pyhailort.FormatType.AUTO`,
-                which means the data is fed in the same format expected by the device (usually
+                default format type of the data for all output virtual streams.
+                The default is :attr:`~hailo_platform.pyhailort.pyhailort.FormatType.AUTO`,
+                which means the returned data is in the same format returned from the device (usually
                 uint8).
             timeout_ms (int): The default timeout in milliseconds for all output virtual streams.
                 Defaults to DEFAULT_VSTREAM_TIMEOUT_MS. In case of timeout, :class:`HailoRTTimeout` will be raised.
@@ -2731,7 +2900,7 @@ class InputVStream(object):
         Args:
             input_data (:obj:`numpy.ndarray`): Data to run inference on.
         """
-        
+
         if input_data.dtype != self._input_dtype:
             input_data = input_data.astype(self._input_dtype)
 
@@ -2758,19 +2927,6 @@ class InputVStream(object):
         with ExceptionWrapper():
             return self._send_object.info
 
-    def _before_fork(self):
-        if self._send_object is not None:
-            self._send_object.before_fork()
-
-    def _after_fork_in_parent(self):
-        if self._send_object is not None:
-            self._send_object.after_fork_in_parent()
-
-    def _after_fork_in_child(self):
-        if self._send_object is not None:
-            self._send_object.after_fork_in_child()
-
-
 class InputVStreams(object):
     """Input vstreams pipelines that allows to send data, to be used as a context manager."""
 
@@ -2816,21 +2972,10 @@ class InputVStreams(object):
     def __exit__(self, *args):
         self._input_vstreams_holder.__exit__(*args)
         return False
-    
+
     def __iter__(self):
         return iter(self._vstreams.values())
 
-    def _before_fork(self):
-        for vstream in self._vstreams.values():
-            vstream._before_fork()
-
-    def _after_fork_in_parent(self):
-        for vstream in self._vstreams.values():
-            vstream._after_fork_in_parent()
-
-    def _after_fork_in_child(self):
-        for vstream in self._vstreams.values():
-            vstream._after_fork_in_child()
 
 
 class OutputLayerUtils(object):
@@ -2856,7 +3001,11 @@ class OutputLayerUtils(object):
     @property
     def output_dtype(self):
         return _pyhailort.get_dtype(self._user_buffer_format.type)
-    
+
+    @property
+    def output_order(self):
+        return self._user_buffer_format.order
+
     @property
     def output_shape(self):
         return self._output_shape
@@ -2864,7 +3013,7 @@ class OutputLayerUtils(object):
     @property
     def vstream_info(self):
         return self._vstream_info
-    
+
     @property
     def output_tensor_info(self):
         return self.output_shape, self.output_dtype
@@ -2887,7 +3036,8 @@ class OutputLayerUtils(object):
 
     @property
     def tf_nms_fomrat_shape(self):
-        if not self.is_nms:
+        # TODO: HRT-11726 - Combine is_nms for HAILO_NMS and NMS_WITH_BYTE_MASK
+        if not self.is_nms and not self.output_order == FormatOrder.HAILO_NMS_WITH_BYTE_MASK:
             raise HailoRTException("Requested NMS info for non-NMS layer")
         nms_shape = self._vstream_info.nms_shape
         return [nms_shape.number_of_classes, BBOX_PARAMS,
@@ -2907,6 +3057,11 @@ class OutputVStream(object):
         if self._is_nms:
             self._quantized_empty_bbox = self._output_layer_utils.quantized_empty_bbox
         self._tf_nms_format = tf_nms_format
+        self._input_stream_infos = configured_network.get_input_stream_infos()
+
+    @property
+    def output_order(self):
+        return self._output_layer_utils.output_order
 
     @property
     def shape(self):
@@ -2936,6 +3091,17 @@ class OutputVStream(object):
         with ExceptionWrapper():
             result_array = self._recv_object.recv()
 
+        if self.output_order == FormatOrder.HAILO_NMS_WITH_BYTE_MASK:
+            nms_shape = self._vstream_info.nms_shape
+            if len(self._input_stream_infos) != 1:
+                raise Exception("Output format HAILO_NMS_WITH_BYTE_MASK should have 1 input. Number of inputs: {}".format(len(self._input_stream_infos)))
+            input_height = self._input_stream_infos[0].shape[0]
+            input_width = self._input_stream_infos[0].shape[1]
+            res = HailoRTTransformUtils._output_raw_buffer_to_nms_with_byte_mask_format(result_array,
+                nms_shape.number_of_classes, 1, input_height, input_width, nms_shape.max_bboxes_per_class,
+                self._output_dtype, self._tf_nms_format)
+            return res
+
         if self._is_nms:
             nms_shape = self._vstream_info.nms_shape
             if self._tf_nms_format:
@@ -2957,17 +3123,39 @@ class OutputVStream(object):
         with ExceptionWrapper():
             return self._recv_object.info
 
-    def _before_fork(self):
-        if self._recv_object is not None:
-            self._recv_object.before_fork()
+    def set_nms_score_threshold(self, threshold):
+        """Set NMS score threshold, used for filtering out candidates. Any box with score<TH is suppressed.
 
-    def _after_fork_in_parent(self):
-        if self._recv_object is not None:
-            self._recv_object.after_fork_in_parent()
+        Args:
+            threshold (float): NMS score threshold to set.
+
+        Note:
+            This function will fail in cases where the output vstream has no NMS operations on the CPU.
+        """
+        return self._recv_object.set_nms_score_threshold(threshold)
+
+    def set_nms_iou_threshold(self, threshold):
+        """Set NMS intersection over union overlap Threshold,
+            used in the NMS iterative elimination process where potential duplicates of detected items are suppressed.
+
+        Args:
+            threshold (float): NMS IoU threshold to set.
+
+        Note:
+            This function will fail in cases where the output vstream has no NMS operations on the CPU.
+        """
+        return self._recv_object.set_nms_iou_threshold(threshold)
+
+    def set_nms_max_proposals_per_class(self, max_proposals_per_class):
+        """Set a limit for the maximum number of boxes per class.
 
-    def _after_fork_in_child(self):
-        if self._recv_object is not None:
-            self._recv_object.after_fork_in_child()
+        Args:
+            max_proposals_per_class (int): NMS max proposals per class to set.
+
+        Note:
+            This function will fail in cases where the output vstream has no NMS operations on the CPU.
+        """
+        return self._recv_object.set_nms_max_proposals_per_class(max_proposals_per_class)
 
 
 class OutputVStreams(object):
@@ -3032,15 +3220,3 @@ class OutputVStreams(object):
 
     def __iter__(self):
         return iter(self._vstreams.values())
-
-    def _before_fork(self):
-        for vstream in self._vstreams.values():
-            vstream._before_fork()
-
-    def _after_fork_in_parent(self):
-        for vstream in self._vstreams.values():
-            vstream._after_fork_in_parent()
-
-    def _after_fork_in_child(self):
-        for vstream in self._vstreams.values():
-            vstream._after_fork_in_child()
index d376d088c526ca85721db1164078e26da617c75c..b6a74278be85c31fa802900b156d21f037f612c9 100644 (file)
     "with network_group.activate(network_group_params):\n",
     "    send_process.join()\n",
     "    recv_process.join()\n",
-    "print('Done')"
+    "print('Done')\n",
+    "\n",
+    "target.release()"
    ]
   }
  ],
index d4edead05f733d1e03bdc318c16132c6d49670fc..73be8adf8e5f74a07256d2f7c67ba47ce09e6c3f 100644 (file)
    ]
   },
   {
+   "cell_type": "markdown",
+   "metadata": {},
    "source": [
     "## Single power measurement"
-   ],
-    "cell_type": "markdown",
-    "metadata": {}
+   ]
   },
   {
    "cell_type": "code",
     "    # Get saved power measurement values from the firmware.\n",
     "    measurements = target.control.get_power_measurement(buffer_index=buffer_index, should_clear=should_clear)\n",
     "    print('Average power is {} W. Min power is {} W. Max power is {} W.\\nAverage time between power samples is {} mS\\n'.format(measurements.average_value, measurements.min_value, measurements.max_value, measurements.average_time_value_milliseconds))\n",
-    "    \n",
+    "\n",
     "# Stop performing periodic power measurement\n",
-    "target.control.stop_power_measurement()"
+    "target.control.stop_power_measurement()\n",
+    "\n",
+    "target.release()"
    ]
   }
  ],
index 05fc1bc59970254f542716bcbecdaa236b4877d6..ffd700c150d4bff2a82821103aa32f56e48ef66c 100644 (file)
@@ -11,7 +11,7 @@
     "\n",
     "**Requirements:**\n",
     "\n",
-    "* Run HailoRT Multi-Process Service before running inference. See installation steps in [Multi-Process Service](../../inference/inference.rst)\n",
+    "* Enable HailoRT Multi-Process Service before running inference\n",
     "* Run the notebook inside the Python virtual environment: ```source hailo_virtualenv/bin/activate```\n",
     "\n",
     "It is recommended to use the command ``hailo tutorial`` (when inside the virtualenv) to open a Jupyter server that contains the tutorials."
index 425291b5c2609d1aaf06da79ffb4770022ceb200..4d37b8245d39ed148d0900c25d420db41d0a651c 100644 (file)
@@ -69,6 +69,6 @@ if __name__ == "__main__":
             "linux_aarch64",
         ],
         url="https://hailo.ai/",
-        version="4.14.0",
+        version="4.15.0",
         zip_safe=False,
     )
index 0f170a87848345c55ccfa3a7ce302f7324857c69..c10045a5daba94f08c24798524e4b856dba9496a 100644 (file)
@@ -34,6 +34,7 @@ set(PYHAILORT_DIR ${CMAKE_CURRENT_LIST_DIR})
 pybind11_add_module(_pyhailort
     pyhailort.cpp
     device_api.cpp
+    network_group_api.cpp
     hef_api.cpp
     vstream_api.cpp
     quantization_api.cpp
@@ -48,7 +49,7 @@ set_target_properties(_pyhailort PROPERTIES
     # VISIBILITY_INLINES_HIDDEN YES
 )
 
-find_package(HailoRT 4.14.0 EXACT REQUIRED)
+find_package(HailoRT 4.15.0 EXACT REQUIRED)
 
 target_link_libraries(_pyhailort PRIVATE HailoRT::libhailort)
 if(WIN32)
index 6d87df90de05c4849996d2c05efe3e2908b9a5e4..d135e52ca9ee6d3449595c5ab710a411ae0e57ea 100644 (file)
@@ -47,6 +47,9 @@ public:
         {
         case HAILO_FORMAT_ORDER_HAILO_NMS:
             return { HailoRTCommon::get_nms_host_shape_size(vstream_info.nms_shape) };
+        case HAILO_FORMAT_ORDER_HAILO_NMS_WITH_BYTE_MASK: {
+            return { HailoRTCommon::get_nms_with_byte_mask_host_shape_size(vstream_info.nms_shape, user_format) };
+        }
         case HAILO_FORMAT_ORDER_NC:
             return {shape.features};
         case HAILO_FORMAT_ORDER_NHW:
index 6315dafc866fdb714f4ff3440afcf113b9c21bb5..529322e5ec5c34dda290180c9e6dc4b1555b1abb 100644 (file)
@@ -9,6 +9,7 @@
  **/
 
 #include "device_api.hpp"
+
 #include <memory>
 
 
@@ -341,8 +342,11 @@ py::list DeviceWrapper::configure(const HefWrapper &hef,
     VALIDATE_EXPECTED(network_groups);
 
     py::list results;
+    m_net_groups.reserve(m_net_groups.size() + network_groups->size());
     for (const auto &network_group : network_groups.value()) {
-        results.append(network_group.get());
+        auto wrapper = ConfiguredNetworkGroupWrapper::create(network_group);
+        results.append(wrapper);
+        m_net_groups.emplace_back(wrapper);
     }
 
     return results;
index d35731677bdf5dadd938ab665fa2e288f0ac2660..5024f28fc4a9b84fc6bbdd217ed3ea822bac4297 100644 (file)
@@ -17,6 +17,7 @@
 
 #include "utils.hpp"
 #include "hef_api.hpp"
+#include "network_group_api.hpp"
 
 #include <pybind11/pybind11.h>
 
@@ -140,6 +141,7 @@ private:
         : m_device(std::move(device)) {}
 
     std::unique_ptr<Device> m_device;
+    std::vector<ConfiguredNetworkGroupWrapperPtr> m_net_groups;
 };
 
 } /* namespace hailort */
index 5644f706d584b31c79aaae46d63429c5b37ffd89..7cb3641e3a559723066a4d8c7a6486a362122ae4 100644 (file)
@@ -12,7 +12,6 @@
 #include "hef_api.hpp"
 #include <memory>
 
-
 namespace hailort
 {
 
@@ -200,35 +199,6 @@ py::list HefWrapper::get_networks_names(const std::string &net_group_name)
     return py::cast(res);
 }
 
-ActivatedAppContextManagerWrapper::ActivatedAppContextManagerWrapper(ConfiguredNetworkGroup &net_group,
-    const hailo_activate_network_group_params_t &network_group_params) :
-        m_net_group(net_group), m_network_group_params(network_group_params)
-    {}
-
-const ActivatedNetworkGroup& ActivatedAppContextManagerWrapper::enter()
-{
-    auto activated = m_net_group.activate(m_network_group_params);
-    if (activated.status() != HAILO_NOT_IMPLEMENTED) {
-        VALIDATE_EXPECTED(activated);
-        m_activated_net_group = activated.release();
-    }
-
-    return std::ref(*m_activated_net_group);
-}
-
-void ActivatedAppContextManagerWrapper::exit()
-{
-    m_activated_net_group.reset();
-}
-
-void ActivatedAppContextManagerWrapper::add_to_python_module(py::module &m)
-{
-    py::class_<ActivatedAppContextManagerWrapper>(m, "ActivatedApp")
-    .def("__enter__", &ActivatedAppContextManagerWrapper::enter, py::return_value_policy::reference)
-    .def("__exit__",  [&](ActivatedAppContextManagerWrapper &self, py::args) { self.exit(); })
-    ;
-}
-
 void HefWrapper::initialize_python_module(py::module &m)
 {
     py::class_<HefWrapper>(m, "Hef")
@@ -255,209 +225,6 @@ void HefWrapper::initialize_python_module(py::module &m)
         .def("get_all_stream_infos", &HefWrapper::get_all_stream_infos)
         .def("get_networks_names", &HefWrapper::get_networks_names)
         ;
-
-    py::class_<ConfiguredNetworkGroup, std::shared_ptr<ConfiguredNetworkGroup>>(m, "ConfiguredNetworkGroup")
-        .def("is_scheduled", [](ConfiguredNetworkGroup& self)
-            {
-                return self.is_scheduled();
-            })
-        .def("get_name", [](ConfiguredNetworkGroup& self)
-            {
-                return self.name();
-            })
-        .def("get_default_streams_interface", [](ConfiguredNetworkGroup& self)
-            {
-                auto result = self.get_default_streams_interface();
-                VALIDATE_EXPECTED(result);
-                return result.value();
-            })
-        .def("activate", [](ConfiguredNetworkGroup& self,
-            const hailo_activate_network_group_params_t &network_group_params)
-            {
-                return ActivatedAppContextManagerWrapper(self, network_group_params);
-            })
-        .def("wait_for_activation", [](ConfiguredNetworkGroup& self, uint32_t timeout_ms)
-            {
-                auto status = self.wait_for_activation(std::chrono::milliseconds(timeout_ms));
-                if (status != HAILO_NOT_IMPLEMENTED) {
-                    VALIDATE_STATUS(status);
-                }
-            })
-        .def("InputVStreams", [](ConfiguredNetworkGroup &self, std::map<std::string, hailo_vstream_params_t> &input_vstreams_params)
-            {
-                return InputVStreamsWrapper::create(self, input_vstreams_params);
-            })
-        .def("OutputVStreams", [](ConfiguredNetworkGroup &self, std::map<std::string, hailo_vstream_params_t> &output_vstreams_params)
-            {
-                return OutputVStreamsWrapper::create(self, output_vstreams_params);
-            })
-        .def("get_udp_rates_dict", [](ConfiguredNetworkGroup& self, uint32_t fps, uint32_t max_supported_rate_bytes)
-        {
-            auto rate_calculator = NetworkUdpRateCalculator::create(self);
-            VALIDATE_EXPECTED(rate_calculator);
-
-            auto udp_input_streams = self.get_input_streams_by_interface(HAILO_STREAM_INTERFACE_ETH);
-            auto results = rate_calculator->get_udp_ports_rates_dict(udp_input_streams,
-                fps, max_supported_rate_bytes);
-            VALIDATE_EXPECTED(results);
-
-            return py::cast(results.value());
-        })
-        .def("before_fork", [](ConfiguredNetworkGroup& self)
-        {
-            auto status = self.before_fork();
-            VALIDATE_STATUS(status);
-        })
-        .def("after_fork_in_parent", [](ConfiguredNetworkGroup& self)
-        {
-            auto status = self.after_fork_in_parent();
-            VALIDATE_STATUS(status);            
-        })
-        .def("after_fork_in_child", [](ConfiguredNetworkGroup& self)
-        {
-            auto status = self.after_fork_in_child();
-            VALIDATE_STATUS(status);
-        })
-        .def("set_scheduler_timeout", [](ConfiguredNetworkGroup& self, int timeout, const std::string &network_name="")
-        {
-            auto timeout_mili = std::chrono::milliseconds(timeout);
-            auto status = self.set_scheduler_timeout(timeout_mili, network_name);
-            VALIDATE_STATUS(status);
-        })
-        .def("set_scheduler_threshold", [](ConfiguredNetworkGroup& self, uint32_t threshold)
-        {
-            auto status = self.set_scheduler_threshold(threshold);
-            VALIDATE_STATUS(status);
-        })
-        .def("set_scheduler_priority", [](ConfiguredNetworkGroup& self, uint8_t priority)
-        {
-            auto status = self.set_scheduler_priority(priority);
-            VALIDATE_STATUS(status);
-        })
-        .def("get_networks_names", [](ConfiguredNetworkGroup& self)
-        {
-            auto network_infos = self.get_network_infos();
-            VALIDATE_EXPECTED(network_infos);
-            std::vector<std::string> result;
-            result.reserve(network_infos->size());
-            for (const auto &info : network_infos.value()) {
-                result.push_back(info.name);
-            }
-            return py::cast(result);
-        })
-        .def("get_sorted_output_names", [](ConfiguredNetworkGroup& self)
-        {
-            auto names_list = self.get_sorted_output_names();
-            VALIDATE_EXPECTED(names_list);
-            return py::cast(names_list.release());
-        })
-        .def("get_input_vstream_infos", [](ConfiguredNetworkGroup& self, const std::string &name)
-        {
-            auto result = self.get_input_vstream_infos(name);
-            VALIDATE_EXPECTED(result);
-            return py::cast(result.value());
-        })
-        .def("get_output_vstream_infos", [](ConfiguredNetworkGroup& self, const std::string &name)
-        {
-            auto result = self.get_output_vstream_infos(name);
-            VALIDATE_EXPECTED(result);
-            return py::cast(result.value());
-        })
-        .def("get_all_vstream_infos", [](ConfiguredNetworkGroup& self, const std::string &name)
-        {
-            auto result = self.get_all_vstream_infos(name);
-            VALIDATE_EXPECTED(result);
-            return py::cast(result.value());
-        })
-        .def("get_all_stream_infos", [](ConfiguredNetworkGroup& self, const std::string &name)
-        {
-            auto result = self.get_all_stream_infos(name);
-            VALIDATE_EXPECTED(result);
-            return py::cast(result.value());
-        })
-        .def("get_input_stream_infos", [](ConfiguredNetworkGroup& self, const std::string &name)
-        {
-            std::vector<hailo_stream_info_t> input_streams_infos;
-            auto all_streams = self.get_all_stream_infos(name);
-            VALIDATE_EXPECTED(all_streams);
-            for (auto &info : all_streams.value()) {
-                if (HAILO_H2D_STREAM == info.direction) {
-                    input_streams_infos.push_back(std::move(info));
-                }
-            }
-            return py::cast(input_streams_infos);
-        })
-        .def("get_output_stream_infos", [](ConfiguredNetworkGroup& self, const std::string &name)
-        {
-            std::vector<hailo_stream_info_t> output_streams_infos;
-            auto all_streams = self.get_all_stream_infos(name);
-            VALIDATE_EXPECTED(all_streams);
-            for (auto &info : all_streams.value()) {
-                if (HAILO_D2H_STREAM == info.direction) {
-                    output_streams_infos.push_back(std::move(info));
-                }
-            }
-            return py::cast(output_streams_infos);
-        })
-        .def("get_vstream_names_from_stream_name", [](ConfiguredNetworkGroup& self, const std::string &stream_name)
-        {
-            auto result = self.get_vstream_names_from_stream_name(stream_name);
-            VALIDATE_EXPECTED(result);
-            return py::cast(result.release());
-        })
-        .def("get_stream_names_from_vstream_name", [](ConfiguredNetworkGroup& self, const std::string &vstream_name)
-        {
-            auto result = self.get_stream_names_from_vstream_name(vstream_name);
-            VALIDATE_EXPECTED(result);
-            return py::cast(result.release());
-        })
-        .def("make_input_vstream_params", [](ConfiguredNetworkGroup& self, const std::string &name, bool quantized, hailo_format_type_t format_type,
-            uint32_t timeout_ms, uint32_t queue_size)
-        {
-            auto result = self.make_input_vstream_params(quantized, format_type, timeout_ms, queue_size, name);
-            VALIDATE_EXPECTED(result);
-            return py::cast(result.release());
-        })
-        .def("make_output_vstream_params", [](ConfiguredNetworkGroup& self, const std::string &name, bool quantized, hailo_format_type_t format_type,
-            uint32_t timeout_ms, uint32_t queue_size)
-        {
-            auto result = self.make_output_vstream_params(quantized, format_type, timeout_ms, queue_size, name);
-            VALIDATE_EXPECTED(result);
-            return py::cast(result.release());
-        })
-        .def(py::pickle(
-            [](const ConfiguredNetworkGroup &cng) { // __getstate__
-                auto handle = cng.get_client_handle();
-                VALIDATE_EXPECTED(handle);
-                return py::make_tuple(handle.value(), cng.name());
-            },
-            [](py::tuple t) { // __setstate__
-                auto handle = t[0].cast<uint32_t>();
-                auto net_group_name = t[1].cast<std::string>();
-                auto net_group = ConfiguredNetworkGroup::duplicate_network_group_client(handle, net_group_name);
-                VALIDATE_EXPECTED(net_group);
-                
-                return net_group.value();
-            }
-        ))
-        ;
-
-    ActivatedAppContextManagerWrapper::add_to_python_module(m);
-
-    py::class_<ActivatedNetworkGroup>(m, "ActivatedNetworkGroup")
-        .def("get_intermediate_buffer", [](ActivatedNetworkGroup& self, uint8_t src_context_index,
-            uint8_t src_stream_index)
-        {
-            auto buff = self.get_intermediate_buffer(std::make_pair(src_context_index, src_stream_index));
-            VALIDATE_EXPECTED(buff);
-
-            return py::bytes(reinterpret_cast<char*>(buff->data()), buff->size());
-        })
-        .def("get_invalid_frames_count", [](ActivatedNetworkGroup& self)
-        {
-            return self.get_invalid_frames_count();
-        })
-        ;
 }
 
 } /* namespace hailort */
index 87b4069f0db7f029675e8ed1d11add4098cfe36d..b0905cea8cc8239ead6dc0d76ff457be19b4be93 100644 (file)
@@ -5,8 +5,6 @@
 /**
  * @file hef_api.hpp
  * @brief Defines binding to an HEF class, and network_group usage over Python.
- *
- * TODO: doc
  **/
 
 #ifndef HEF_API_HPP_
@@ -72,20 +70,6 @@ private:
     std::unique_ptr<Hef> hef;
 };
 
-class ActivatedAppContextManagerWrapper final
-{
-public:
-    ActivatedAppContextManagerWrapper(ConfiguredNetworkGroup &net_group,
-        const hailo_activate_network_group_params_t &network_group_params);
-    
-    const ActivatedNetworkGroup& enter();
-    void exit();
-    static void add_to_python_module(py::module &m);
-private:
-    std::unique_ptr<ActivatedNetworkGroup> m_activated_net_group;
-    ConfiguredNetworkGroup &m_net_group;
-    hailo_activate_network_group_params_t m_network_group_params;
-};
 
 } /* namespace hailort */
 
index 292a7655c0c762050ec5a7c7dbbb7dd72faf6b19..0e85942c669ebf94912800138fb03981ed3dbf01 100644 (file)
@@ -1,5 +1,8 @@
 cmake_minimum_required(VERSION 3.15.0)
 
+include(${HAILO_EXTERNALS_CMAKE_SCRIPTS}/spdlog.cmake)
+include(${HAILO_EXTERNALS_CMAKE_SCRIPTS}/readerwriterqueue.cmake)
+
 pybind11_add_module(_pyhailort_internal SHARED
     pyhailort_internal.cpp
     control_api.cpp
@@ -27,6 +30,7 @@ target_link_libraries(_pyhailort_internal PRIVATE
     hef_proto
     spdlog::spdlog
     readerwriterqueue
+    profiler_proto
     scheduler_mon_proto)
 if(HAILO_BUILD_SERVICE)
     target_link_libraries(_pyhailort_internal PRIVATE grpc++_unsecure hailort_rpc_grpc_proto)
index fe7515d16a77861b04ccfcf6fc17c9319937a76b..c9d8e728b463282acb06ae9b98832b3069371c54 100644 (file)
@@ -212,10 +212,10 @@ void PyhailortInternal::demux_output_buffer(
 void PyhailortInternal::transform_input_buffer(
     py::array src, const hailo_format_t &src_format, const hailo_3d_image_shape_t &src_shape,
     uintptr_t dst, size_t dst_size, const hailo_format_t &dst_format, const hailo_3d_image_shape_t &dst_shape,
-    const hailo_quant_info_t &dst_quant_info)
+    const std::vector<hailo_quant_info_t> &dst_quant_infos)
 {
     auto transform_context = InputTransformContext::create(src_shape, src_format, dst_shape, dst_format,
-        dst_quant_info);
+        dst_quant_infos);
     VALIDATE_EXPECTED(transform_context);
 
     MemoryView dst_buffer(reinterpret_cast<uint8_t*>(dst), dst_size);
@@ -228,10 +228,10 @@ void PyhailortInternal::transform_input_buffer(
 void PyhailortInternal::transform_output_buffer(
     py::bytes src, const hailo_format_t &src_format, const hailo_3d_image_shape_t &src_shape,
     py::array dst, const hailo_format_t &dst_format, const hailo_3d_image_shape_t &dst_shape,
-    const hailo_quant_info_t &dst_quant_info)
+    const std::vector<hailo_quant_info_t> &dst_quant_infos)
 {
     auto transform_context = OutputTransformContext::create(src_shape, src_format, dst_shape, dst_format,
-        dst_quant_info, {});
+        dst_quant_infos, {});
     VALIDATE_EXPECTED(transform_context);
 
     const auto src_str = static_cast<std::string>(src);
@@ -244,10 +244,10 @@ void PyhailortInternal::transform_output_buffer(
 void PyhailortInternal::transform_output_buffer_nms(
     py::bytes src, const hailo_format_t &src_format, const hailo_3d_image_shape_t &src_shape,
     py::array dst, const hailo_format_t &dst_format, const hailo_3d_image_shape_t &dst_shape,
-    const hailo_quant_info_t &dst_quant_info, const hailo_nms_info_t &nms_info)
+    const std::vector<hailo_quant_info_t> &dst_quant_infos, const hailo_nms_info_t &nms_info)
 {
     auto transform_context = OutputTransformContext::create(src_shape, src_format, dst_shape, dst_format,
-        dst_quant_info, nms_info);
+        dst_quant_infos, nms_info);
     VALIDATE_EXPECTED(transform_context);
 
     const auto src_str = static_cast<std::string>(src);
@@ -260,19 +260,25 @@ void PyhailortInternal::transform_output_buffer_nms(
 bool PyhailortInternal::is_input_transformation_required(
     const hailo_3d_image_shape_t &src_shape, const hailo_format_t &src_format,
     const hailo_3d_image_shape_t &dst_shape, const hailo_format_t &dst_format,
-    const hailo_quant_info_t &quant_info)
+    const std::vector<hailo_quant_info_t> &quant_infos)
 {
-    return InputTransformContext::is_transformation_required(src_shape, src_format, dst_shape, dst_format,
-        quant_info);
+    auto expected_is_transforamtion_required = InputTransformContext::is_transformation_required(src_shape, src_format, dst_shape, dst_format,
+        quant_infos);
+    VALIDATE_EXPECTED(expected_is_transforamtion_required);
+
+    return expected_is_transforamtion_required.release();
 }
 
 bool PyhailortInternal::is_output_transformation_required(
     const hailo_3d_image_shape_t &src_shape, const hailo_format_t &src_format,
     const hailo_3d_image_shape_t &dst_shape, const hailo_format_t &dst_format,
-    const hailo_quant_info_t &quant_info)
+    const std::vector<hailo_quant_info_t> &quant_infos)
 {
-    return OutputTransformContext::is_transformation_required(src_shape, src_format, dst_shape, dst_format,
-        quant_info);
+    auto expected_is_transforamtion_required = OutputTransformContext::is_transformation_required(src_shape, src_format, dst_shape, dst_format,
+        quant_infos);
+    VALIDATE_EXPECTED(expected_is_transforamtion_required);
+
+    return expected_is_transforamtion_required.release();
 }
 
 py::list PyhailortInternal::get_all_layers_info(const HefWrapper &hef, const std::string &net_group_name)
@@ -309,6 +315,9 @@ PYBIND11_MODULE(_pyhailort_internal, m) {
     py::class_<LayerInfo>(m, "HailoLayerInfo", py::module_local())
         .def_readonly("is_mux", &LayerInfo::is_mux)
         .def_readonly("mux_predecessors", &LayerInfo::predecessor)
+        .def_readonly("is_multi_planar", &LayerInfo::is_multi_planar)
+        .def_readonly("planes", &LayerInfo::planes)
+        .def_readonly("plane_index", &LayerInfo::plane_index)
         .def_readonly("is_defused_nms", &LayerInfo::is_defused_nms)
         .def_readonly("fused_nms_layer", &LayerInfo::fused_nms_layer)
         .def_property_readonly("shape", [](LayerInfo& self)
@@ -359,7 +368,7 @@ PYBIND11_MODULE(_pyhailort_internal, m) {
         .def_readonly("direction", &LayerInfo::direction)
         .def_readonly("sys_index", &LayerInfo::stream_index)
         .def_readonly("name", &LayerInfo::name)
-        .def_readonly("quant_info", &LayerInfo::quant_info)
+        .def_readonly("quant_infos", &LayerInfo::quant_infos)
         // For backwards compatibility (accessing qp through layer_info directly)
         .def_property_readonly("qp_zp", [](LayerInfo& self)
         {
index e8d0994390c9842243da39657bd313a19c4366ac..bb6f0de74cbb233c683bb30789396b0939519aa5 100644 (file)
@@ -34,17 +34,17 @@ public:
         std::map<std::string, py::array> dst_buffers, const LayerInfo &mux_layer_info);
     static void transform_input_buffer(py::array src, const hailo_format_t &src_format, const hailo_3d_image_shape_t &src_shape,
         uintptr_t dst, size_t dst_size, const hailo_format_t &dst_format, const hailo_3d_image_shape_t &dst_shape,
-        const hailo_quant_info_t &dst_quant_info);
+        const std::vector<hailo_quant_info_t> &dst_quant_infos);
     static void transform_output_buffer(py::bytes src, const hailo_format_t &src_format,
         const hailo_3d_image_shape_t &src_shape, py::array dst, const hailo_format_t &dst_format,
-        const hailo_3d_image_shape_t &dst_shape, const hailo_quant_info_t &dst_quant_info);
+        const hailo_3d_image_shape_t &dst_shape, const std::vector<hailo_quant_info_t> &dst_quant_infos);
     static void transform_output_buffer_nms(py::bytes src, const hailo_format_t &src_format,
         const hailo_3d_image_shape_t &src_shape, py::array dst, const hailo_format_t &dst_format,
-        const hailo_3d_image_shape_t &dst_shape, const hailo_quant_info_t &dst_quant_info, const hailo_nms_info_t &nms_info);
+        const hailo_3d_image_shape_t &dst_shape, const std::vector<hailo_quant_info_t> &dst_quant_infos, const hailo_nms_info_t &nms_info);
     static bool is_input_transformation_required(const hailo_3d_image_shape_t &src_shape, const hailo_format_t &src_format,
-        const hailo_3d_image_shape_t &dst_shape, const hailo_format_t &dst_format, const hailo_quant_info_t &quant_info);
+        const hailo_3d_image_shape_t &dst_shape, const hailo_format_t &dst_format, const std::vector<hailo_quant_info_t> &quant_infos);
     static bool is_output_transformation_required(const hailo_3d_image_shape_t &src_shape, const hailo_format_t &src_format,
-        const hailo_3d_image_shape_t &dst_shape, const hailo_format_t &dst_format, const hailo_quant_info_t &quant_info);
+        const hailo_3d_image_shape_t &dst_shape, const hailo_format_t &dst_format, const std::vector<hailo_quant_info_t> &quant_infos);
     static py::list get_all_layers_info(const HefWrapper &hef, const std::string &net_group_name);
 };
 
diff --git a/hailort/libhailort/bindings/python/src/network_group_api.cpp b/hailort/libhailort/bindings/python/src/network_group_api.cpp
new file mode 100644 (file)
index 0000000..925f5f2
--- /dev/null
@@ -0,0 +1,95 @@
+/**
+ * Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file network_group_api.cpp
+ **/
+
+#include "network_group_api.hpp"
+
+
+namespace hailort
+{
+
+void ConfiguredNetworkGroupWrapper::add_to_python_module(py::module &m)
+{
+    py::class_<ConfiguredNetworkGroupWrapper, ConfiguredNetworkGroupWrapperPtr>(m, "ConfiguredNetworkGroup")
+        .def("is_scheduled", &ConfiguredNetworkGroupWrapper::is_scheduled)
+        .def("get_name", &ConfiguredNetworkGroupWrapper::get_name)
+        .def("get_default_streams_interface", &ConfiguredNetworkGroupWrapper::get_default_streams_interface)
+        .def("activate", &ConfiguredNetworkGroupWrapper::activate)
+        .def("wait_for_activation", &ConfiguredNetworkGroupWrapper::wait_for_activation)
+        .def("InputVStreams", &ConfiguredNetworkGroupWrapper::InputVStreams)
+        .def("OutputVStreams", &ConfiguredNetworkGroupWrapper::OutputVStreams)
+        .def("get_udp_rates_dict", &ConfiguredNetworkGroupWrapper::get_udp_rates_dict)
+        .def("set_scheduler_timeout", &ConfiguredNetworkGroupWrapper::set_scheduler_timeout)
+        .def("set_scheduler_threshold", &ConfiguredNetworkGroupWrapper::set_scheduler_threshold)
+        .def("set_scheduler_priority", &ConfiguredNetworkGroupWrapper::set_scheduler_priority)
+        .def("get_networks_names", &ConfiguredNetworkGroupWrapper::get_networks_names)
+        .def("get_sorted_output_names", &ConfiguredNetworkGroupWrapper::get_sorted_output_names)
+        .def("get_input_vstream_infos", &ConfiguredNetworkGroupWrapper::get_input_vstream_infos)
+        .def("get_output_vstream_infos", &ConfiguredNetworkGroupWrapper::get_output_vstream_infos)
+        .def("get_all_vstream_infos", &ConfiguredNetworkGroupWrapper::get_all_vstream_infos)
+        .def("get_all_stream_infos", &ConfiguredNetworkGroupWrapper::get_all_stream_infos)
+        .def("get_input_stream_infos", &ConfiguredNetworkGroupWrapper::get_input_stream_infos)
+        .def("get_output_stream_infos", &ConfiguredNetworkGroupWrapper::get_output_stream_infos)
+        .def("get_vstream_names_from_stream_name", &ConfiguredNetworkGroupWrapper::get_vstream_names_from_stream_name)
+        .def("get_stream_names_from_vstream_name", &ConfiguredNetworkGroupWrapper::get_stream_names_from_vstream_name)
+        .def("make_input_vstream_params", &ConfiguredNetworkGroupWrapper::make_input_vstream_params)
+        .def("make_output_vstream_params", &ConfiguredNetworkGroupWrapper::make_output_vstream_params)
+        .def(py::pickle(&ConfiguredNetworkGroupWrapper::pickle_get_state, &ConfiguredNetworkGroupWrapper::pickle_set_state))
+        ;
+}
+
+ActivatedAppContextManagerWrapper::ActivatedAppContextManagerWrapper(ConfiguredNetworkGroup &net_group,
+    const hailo_activate_network_group_params_t &network_group_params) :
+        m_net_group(net_group), m_network_group_params(network_group_params)
+    {}
+
+const ActivatedNetworkGroup& ActivatedAppContextManagerWrapper::enter()
+{
+    auto activated = m_net_group.activate(m_network_group_params);
+    if (activated.status() != HAILO_NOT_IMPLEMENTED) {
+        VALIDATE_EXPECTED(activated);
+        m_activated_net_group = activated.release();
+    }
+
+    return std::ref(*m_activated_net_group);
+}
+
+void ActivatedAppContextManagerWrapper::exit()
+{
+    m_activated_net_group.reset();
+}
+
+void ActivatedAppContextManagerWrapper::add_to_python_module(py::module &m)
+{
+    py::class_<ActivatedAppContextManagerWrapper>(m, "ActivatedApp")
+    .def("__enter__", &ActivatedAppContextManagerWrapper::enter, py::return_value_policy::reference)
+    .def("__exit__",  [&](ActivatedAppContextManagerWrapper &self, py::args) { self.exit(); })
+    ;
+
+    py::class_<ActivatedNetworkGroup>(m, "ActivatedNetworkGroup")
+        .def("get_intermediate_buffer", [](ActivatedNetworkGroup& self, uint8_t src_context_index,
+            uint8_t src_stream_index)
+        {
+            auto buff = self.get_intermediate_buffer(std::make_pair(src_context_index, src_stream_index));
+            VALIDATE_EXPECTED(buff);
+
+            return py::bytes(reinterpret_cast<char*>(buff->data()), buff->size());
+        })
+        .def("get_invalid_frames_count", [](ActivatedNetworkGroup& self)
+        {
+            return self.get_invalid_frames_count();
+        })
+        ;
+}
+
+void NetworkGroup_api_initialize_python_module(py::module &m)
+{
+    ConfiguredNetworkGroupWrapper::add_to_python_module(m);
+    ActivatedAppContextManagerWrapper::add_to_python_module(m);
+}
+
+} /* namespace hailort */
diff --git a/hailort/libhailort/bindings/python/src/network_group_api.hpp b/hailort/libhailort/bindings/python/src/network_group_api.hpp
new file mode 100644 (file)
index 0000000..51bb407
--- /dev/null
@@ -0,0 +1,325 @@
+/**
+ * Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file network_group_api.hpp
+ * @brief Defines binding to network group
+ **/
+
+#ifndef _HAILO_NETWORK_GROUP_API_HPP_
+#define _HAILO_NETWORK_GROUP_API_HPP_
+
+#include "utils.hpp"
+#include "vstream_api.hpp"
+
+#include "common/fork_support.hpp"
+
+#include "hailo/network_group.hpp"
+#include "hailo/network_rate_calculator.hpp"
+
+
+namespace hailort
+{
+
+class ActivatedAppContextManagerWrapper final
+{
+public:
+    ActivatedAppContextManagerWrapper(ConfiguredNetworkGroup &net_group,
+        const hailo_activate_network_group_params_t &network_group_params);
+
+    const ActivatedNetworkGroup& enter();
+    void exit();
+    static void add_to_python_module(py::module &m);
+private:
+    std::unique_ptr<ActivatedNetworkGroup> m_activated_net_group;
+    ConfiguredNetworkGroup &m_net_group;
+    hailo_activate_network_group_params_t m_network_group_params;
+};
+
+class ConfiguredNetworkGroupWrapper;
+using ConfiguredNetworkGroupWrapperPtr = std::shared_ptr<ConfiguredNetworkGroupWrapper>;
+
+class ConfiguredNetworkGroupWrapper final {
+public:
+
+    static ConfiguredNetworkGroupWrapperPtr create(std::shared_ptr<ConfiguredNetworkGroup> cng)
+    {
+        return std::make_shared<ConfiguredNetworkGroupWrapper>(cng);
+    }
+
+    ConfiguredNetworkGroupWrapper(std::shared_ptr<ConfiguredNetworkGroup> cng, bool store_guard_for_multi_process = false) :
+        m_cng(cng)
+#ifdef HAILO_IS_FORK_SUPPORTED
+        ,
+        m_atfork_guard(this, {
+            .before_fork = [this]() { before_fork(); },
+            .after_fork_in_parent = [this]() { after_fork_in_parent(); },
+            .after_fork_in_child = [this]() { after_fork_in_child(); }
+        })
+#endif
+    {
+        if (store_guard_for_multi_process) {
+            m_cng_guard_for_mt = cng;
+        }
+    }
+
+    auto is_scheduled()
+    {
+        return get().is_scheduled();
+    }
+
+    auto get_name()
+    {
+        return get().name();
+    }
+
+    auto get_default_streams_interface()
+    {
+        auto result = get().get_default_streams_interface();
+        VALIDATE_EXPECTED(result);
+        return result.value();
+    }
+
+    auto activate(const hailo_activate_network_group_params_t &network_group_params)
+    {
+        return ActivatedAppContextManagerWrapper(get(), network_group_params);
+    }
+
+    void wait_for_activation(uint32_t timeout_ms)
+    {
+        auto status = get().wait_for_activation(std::chrono::milliseconds(timeout_ms));
+        if (status != HAILO_NOT_IMPLEMENTED) {
+            VALIDATE_STATUS(status);
+        }
+    }
+
+    auto InputVStreams(const std::map<std::string, hailo_vstream_params_t> &input_vstreams_params)
+    {
+        return InputVStreamsWrapper::create(get(), input_vstreams_params);
+    }
+
+    auto OutputVStreams(const std::map<std::string, hailo_vstream_params_t> &output_vstreams_params)
+    {
+        return OutputVStreamsWrapper::create(get(), output_vstreams_params);
+    }
+
+    auto get_udp_rates_dict(uint32_t fps, uint32_t max_supported_rate_bytes)
+    {
+        auto rate_calculator = NetworkUdpRateCalculator::create(get());
+        VALIDATE_EXPECTED(rate_calculator);
+
+        auto udp_input_streams = get().get_input_streams_by_interface(HAILO_STREAM_INTERFACE_ETH);
+        auto results = rate_calculator->get_udp_ports_rates_dict(udp_input_streams,
+            fps, max_supported_rate_bytes);
+        VALIDATE_EXPECTED(results);
+
+        return py::cast(results.value());
+    }
+
+    void set_scheduler_timeout(int timeout, const std::string &network_name="")
+    {
+        auto timeout_mili = std::chrono::milliseconds(timeout);
+        auto status = get().set_scheduler_timeout(timeout_mili, network_name);
+        VALIDATE_STATUS(status);
+    }
+
+    void set_scheduler_threshold(uint32_t threshold)
+    {
+        auto status = get().set_scheduler_threshold(threshold);
+        VALIDATE_STATUS(status);
+    }
+
+    void set_scheduler_priority(uint8_t priority)
+    {
+        auto status = get().set_scheduler_priority(priority);
+        VALIDATE_STATUS(status);
+    }
+
+    auto get_networks_names()
+    {
+        auto network_infos = get().get_network_infos();
+        VALIDATE_EXPECTED(network_infos);
+        std::vector<std::string> result;
+        result.reserve(network_infos->size());
+        for (const auto &info : network_infos.value()) {
+            result.push_back(info.name);
+        }
+        return py::cast(result);
+    }
+
+    auto get_sorted_output_names()
+    {
+        auto names_list = get().get_sorted_output_names();
+        VALIDATE_EXPECTED(names_list);
+        return py::cast(names_list.release());
+    }
+
+    auto get_input_vstream_infos(const std::string &name)
+    {
+        auto result = get().get_input_vstream_infos(name);
+        VALIDATE_EXPECTED(result);
+        return py::cast(result.value());
+    }
+
+    auto get_output_vstream_infos(const std::string &name)
+    {
+        auto result = get().get_output_vstream_infos(name);
+        VALIDATE_EXPECTED(result);
+        return py::cast(result.value());
+    }
+
+    auto get_all_vstream_infos(const std::string &name)
+    {
+        auto result = get().get_all_vstream_infos(name);
+        VALIDATE_EXPECTED(result);
+        return py::cast(result.value());
+    }
+
+    auto get_all_stream_infos(const std::string &name)
+    {
+        auto result = get().get_all_stream_infos(name);
+        VALIDATE_EXPECTED(result);
+        return py::cast(result.value());
+    }
+
+    auto get_input_stream_infos(const std::string &name)
+    {
+        std::vector<hailo_stream_info_t> input_streams_infos;
+        auto all_streams = get().get_all_stream_infos(name);
+        VALIDATE_EXPECTED(all_streams);
+        for (auto &info : all_streams.value()) {
+            if (HAILO_H2D_STREAM == info.direction) {
+                input_streams_infos.push_back(std::move(info));
+            }
+        }
+        return py::cast(input_streams_infos);
+    }
+
+    auto get_output_stream_infos(const std::string &name)
+    {
+        std::vector<hailo_stream_info_t> output_streams_infos;
+        auto all_streams = get().get_all_stream_infos(name);
+        VALIDATE_EXPECTED(all_streams);
+        for (auto &info : all_streams.value()) {
+            if (HAILO_D2H_STREAM == info.direction) {
+                output_streams_infos.push_back(std::move(info));
+            }
+        }
+        return py::cast(output_streams_infos);
+    }
+
+    auto get_vstream_names_from_stream_name(const std::string &stream_name)
+    {
+        auto result = get().get_vstream_names_from_stream_name(stream_name);
+        VALIDATE_EXPECTED(result);
+        return py::cast(result.release());
+    }
+
+    auto get_stream_names_from_vstream_name(const std::string &vstream_name)
+    {
+        auto result = get().get_stream_names_from_vstream_name(vstream_name);
+        VALIDATE_EXPECTED(result);
+        return py::cast(result.release());
+    }
+
+    auto make_input_vstream_params(const std::string &name, bool quantized, hailo_format_type_t format_type,
+        uint32_t timeout_ms, uint32_t queue_size)
+    {
+        auto result = get().make_input_vstream_params(quantized, format_type, timeout_ms, queue_size, name);
+        VALIDATE_EXPECTED(result);
+        return py::cast(result.release());
+    }
+
+    auto make_output_vstream_params(const std::string &name, bool quantized, hailo_format_type_t format_type,
+        uint32_t timeout_ms, uint32_t queue_size)
+    {
+        auto result = get().make_output_vstream_params(quantized, format_type, timeout_ms, queue_size, name);
+        VALIDATE_EXPECTED(result);
+        return py::cast(result.release());
+    }
+
+    ConfiguredNetworkGroup &get()
+    {
+        auto cng = m_cng.lock();
+        VALIDATE_NOT_NULL(cng, HAILO_INTERNAL_FAILURE);
+        return *cng;
+    }
+
+    ConfiguredNetworkGroup &get() const
+    {
+        auto cng = m_cng.lock();
+        VALIDATE_NOT_NULL(cng, HAILO_INTERNAL_FAILURE);
+        return *cng;
+    }
+
+    void before_fork()
+    {
+        auto cng = m_cng.lock();
+        if (cng) {
+            cng->before_fork();
+        }
+    }
+
+    void after_fork_in_parent()
+    {
+        auto cng = m_cng.lock();
+        if (cng) {
+            cng->after_fork_in_parent();
+        }
+    }
+
+    void after_fork_in_child()
+    {
+        auto cng = m_cng.lock();
+        if (cng) {
+            cng->after_fork_in_child();
+        }
+    }
+
+    static auto pickle_get_state(const ConfiguredNetworkGroupWrapper &self)
+    {
+        auto handle = self.get().get_client_handle();
+        VALIDATE_EXPECTED(handle);
+
+        auto vdevice_handle = self.get().get_vdevice_client_handle();
+        VALIDATE_EXPECTED(vdevice_handle);
+
+        return py::make_tuple(handle.value(), vdevice_handle.value(), self.get().name());
+    }
+
+    static auto pickle_set_state(py::tuple t)
+    {
+        auto handle = t[0].cast<uint32_t>();
+        auto vdevice_handle = t[1].cast<uint32_t>();
+        auto net_group_name = t[2].cast<std::string>();
+        auto net_group = ConfiguredNetworkGroup::duplicate_network_group_client(handle, vdevice_handle, net_group_name);
+        VALIDATE_EXPECTED(net_group);
+
+        const bool store_guard_for_multi_process = true;
+        return std::make_shared<ConfiguredNetworkGroupWrapper>(net_group.release(), store_guard_for_multi_process);
+    }
+
+    static void add_to_python_module(py::module &m);
+
+private:
+    // Normally, the ownership of the network group is the Device/VDevice objects. We keep weak_ptr
+    // to force free the network group before freeing the device/vdevice.
+    std::weak_ptr<ConfiguredNetworkGroup> m_cng;
+
+    // On multi-process, when pickling this object (the windows multi-process flow) the device/vdevice
+    // doesn't own the network group object.
+    // To solve this problem, we store here and optional guard for the network group that will exist
+    // only when the object is constructed with pickle.
+    std::shared_ptr<ConfiguredNetworkGroup> m_cng_guard_for_mt;
+
+#ifdef HAILO_IS_FORK_SUPPORTED
+    AtForkRegistry::AtForkGuard m_atfork_guard;
+#endif
+};
+
+void NetworkGroup_api_initialize_python_module(py::module &m);
+
+} /* namespace hailort */
+
+#endif /* _HAILO_NETWORK_GROUP_API_HPP_ */
index 4cca84045925af1fbb5fed4b80ae341840c66666..d9db006d709a81eec65fb1ee7909ac9fc9ec344b 100644 (file)
@@ -15,6 +15,7 @@ using namespace std;
 #include "hef_api.hpp"
 #include "vstream_api.hpp"
 #include "vdevice_api.hpp"
+#include "network_group_api.hpp"
 #include "device_api.hpp"
 #include "quantization_api.hpp"
 
@@ -164,6 +165,7 @@ PYBIND11_MODULE(_pyhailort, m) {
     m.def("dequantize_output_buffer_in_place", &QuantizationBindings::dequantize_output_buffer_in_place);
     m.def("dequantize_output_buffer", &QuantizationBindings::dequantize_output_buffer);
     m.def("quantize_input_buffer", &QuantizationBindings::quantize_input_buffer);
+    m.def("is_qp_valid", &QuantizationBindings::is_qp_valid);
 
     m.def("get_format_data_bytes", &HailoRTCommon::get_format_data_bytes);
     m.def("get_dtype", &HailoRTBindingsCommon::get_dtype);
@@ -209,7 +211,8 @@ PYBIND11_MODULE(_pyhailort, m) {
         .value("HAILO8_A0", HAILO_ARCH_HAILO8_A0)
         .value("HAILO8", HAILO_ARCH_HAILO8)
         .value("HAILO8L", HAILO_ARCH_HAILO8L)
-        .value("HAILO15", HAILO_ARCH_HAILO15)
+        .value("HAILO15H", HAILO_ARCH_HAILO15H)
+        .value("PLUTO", HAILO_ARCH_PLUTO)
     ;
 
     /* TODO: SDK-15648 */
@@ -462,16 +465,19 @@ PYBIND11_MODULE(_pyhailort, m) {
         .def(py::init<>())
         .def_readonly("number_of_classes", &hailo_nms_shape_t::number_of_classes)
         .def_readonly("max_bboxes_per_class", &hailo_nms_shape_t::max_bboxes_per_class)
+        .def_readonly("max_mask_size", &hailo_nms_shape_t::max_mask_size)
         .def(py::pickle(
             [](const hailo_nms_shape_t &nms_shape) { // __getstate__
                 return py::make_tuple(
                     nms_shape.number_of_classes,
-                    nms_shape.max_bboxes_per_class);
+                    nms_shape.max_bboxes_per_class,
+                    nms_shape.max_mask_size);
             },
             [](py::tuple t) { // __setstate__
                 hailo_nms_shape_t nms_shape;
                 nms_shape.number_of_classes = t[0].cast<uint32_t>();
                 nms_shape.max_bboxes_per_class = t[1].cast<uint32_t>();
+                nms_shape.max_mask_size = t[2].cast<uint32_t>();
                 return nms_shape;
             }
         ))
@@ -513,6 +519,7 @@ PYBIND11_MODULE(_pyhailort, m) {
         .value("RGB4", HAILO_FORMAT_ORDER_RGB4)
         .value("I420", HAILO_FORMAT_ORDER_I420)
         .value("YYYYUV", HAILO_FORMAT_ORDER_HAILO_YYYYUV)
+        .value("HAILO_NMS_WITH_BYTE_MASK", HAILO_FORMAT_ORDER_HAILO_NMS_WITH_BYTE_MASK)
         ;
 
     py::enum_<hailo_format_flags_t>(m, "FormatFlags", py::arithmetic())
@@ -1010,7 +1017,7 @@ PYBIND11_MODULE(_pyhailort, m) {
             }
         })
         .def_property_readonly("nms_shape", [](const hailo_vstream_info_t &self) {
-            if (HAILO_FORMAT_ORDER_HAILO_NMS != self.format.order) {
+            if (!HailoRTCommon::is_nms(self)) {
                 throw HailoRTCustomException("nms_shape is availale only on nms order vstreams");
             }
             return self.nms_shape;
@@ -1025,7 +1032,7 @@ PYBIND11_MODULE(_pyhailort, m) {
         })
         .def(py::pickle(
             [](const hailo_vstream_info_t &vstream_info) { // __getstate__
-                if (HAILO_FORMAT_ORDER_HAILO_NMS == vstream_info.format.order) {
+                if (HailoRTCommon::is_nms(vstream_info)) {
                     return py::make_tuple(
                         vstream_info.name,
                         vstream_info.network_name,
@@ -1050,7 +1057,7 @@ PYBIND11_MODULE(_pyhailort, m) {
                 strcpy(vstream_info.network_name, t[1].cast<std::string>().c_str());
                 vstream_info.direction = t[2].cast<hailo_stream_direction_t>();
                 vstream_info.format = t[3].cast<hailo_format_t>();
-                if (HAILO_FORMAT_ORDER_HAILO_NMS == vstream_info.format.order) {
+                if (HailoRTCommon::is_nms(vstream_info)) {
                     vstream_info.nms_shape = t[4].cast<hailo_nms_shape_t>();
                 }
                 else {
@@ -1104,6 +1111,7 @@ PYBIND11_MODULE(_pyhailort, m) {
     HefWrapper::initialize_python_module(m);
     VStream_api_initialize_python_module(m);
     VDevice_api_initialize_python_module(m);
+    NetworkGroup_api_initialize_python_module(m);
     DeviceWrapper::add_to_python_module(m);
 
     NetworkRateLimiter::add_to_python_module(m);
index 893afb809224fcc75503982d4880d24c44852fb6..21c93d441258e7048d91168c58dd10b6ef0b3d2b 100644 (file)
@@ -245,4 +245,9 @@ void QuantizationBindings::quantize_input_buffer(py::array src_buffer, py::array
     }
 }
 
+bool QuantizationBindings::is_qp_valid(const hailo_quant_info_t &quant_info)
+{
+    return Quantization::is_qp_valid(quant_info);
+}
+
 } /* namespace hailort */
index f28f628c97a0f025e6d527f683ba4cf190844e1e..c59903b091540a1ba12051b9f3982591302a1aed 100644 (file)
@@ -30,6 +30,7 @@ public:
         const hailo_format_type_t &dst_dtype, uint32_t shape_size, const hailo_quant_info_t &quant_info);
     static void dequantize_output_buffer(py::array src_buffer, py::array dst_buffer, const hailo_format_type_t &src_dtype,
         const hailo_format_type_t &dst_dtype, uint32_t shape_size, const hailo_quant_info_t &quant_info);
+    static bool is_qp_valid(const hailo_quant_info_t &quant_info);
 private:
     static void dequantize_output_buffer_from_uint8(py::array src_buffer, py::array dst_buffer,
         const hailo_format_type_t &dst_dtype, uint32_t shape_size, const hailo_quant_info_t &quant_info);
index eae48d73ca01a9fe524aecad8ad1da5f188bb020..590e9feb926e7a9149eedbdf9ba42e0e0f72a256 100644 (file)
@@ -5,18 +5,18 @@
 /**
  * @file vdevice_api.hpp
  * @brief Defines binding to a VDevice class usage over Python.
- *
- * TODO: doc
  **/
 
 #ifndef VDEVICE_API_HPP_
 #define VDEVICE_API_HPP_
 
+#include "utils.hpp"
+#include "network_group_api.hpp"
+
 #include "hailo/hef.hpp"
 #include "hailo/vdevice.hpp"
 #include "hailo/hailort_common.hpp"
 
-#include "utils.hpp"
 #include <iostream>
 #include <pybind11/pybind11.h>
 #include <pybind11/numpy.h>
@@ -36,19 +36,23 @@ struct VDeviceParamsWrapper {
     std::string group_id_str;
 };
 
+
+class VDeviceWrapper;
+using VDeviceWrapperPtr = std::shared_ptr<VDeviceWrapper>;
+
 class VDeviceWrapper {
 public:
-    static VDeviceWrapper create(const hailo_vdevice_params_t &params)
+    static VDeviceWrapperPtr create(const hailo_vdevice_params_t &params)
     {
-        return VDeviceWrapper(params);
+        return std::make_shared<VDeviceWrapper>(params);
     };
 
-    static VDeviceWrapper create(const VDeviceParamsWrapper &params)
+    static VDeviceWrapperPtr create(const VDeviceParamsWrapper &params)
     {
-        return VDeviceWrapper(params.orig_params);
+        return std::make_shared<VDeviceWrapper>(params.orig_params);
     }
 
-    static VDeviceWrapper create(const VDeviceParamsWrapper &params, const std::vector<std::string> &device_ids)
+    static VDeviceWrapperPtr create(const VDeviceParamsWrapper &params, const std::vector<std::string> &device_ids)
     {
         if (params.orig_params.device_ids != nullptr && (!device_ids.empty())) {
             std::cerr << "VDevice device_ids can be set in params or device_ids argument. Both parameters were passed to the c'tor";
@@ -58,10 +62,10 @@ public:
         auto device_ids_vector = HailoRTCommon::to_device_ids_vector(device_ids);
         VALIDATE_EXPECTED(device_ids_vector);
         modified_params.orig_params.device_ids = device_ids_vector->data();
-        return VDeviceWrapper(modified_params.orig_params);
+        return std::make_shared<VDeviceWrapper>(modified_params.orig_params);
     }
 
-    static VDeviceWrapper create_from_ids(const std::vector<std::string> &device_ids)
+    static VDeviceWrapperPtr create_from_ids(const std::vector<std::string> &device_ids)
     {
         auto device_ids_vector = HailoRTCommon::to_device_ids_vector(device_ids);
         VALIDATE_EXPECTED(device_ids_vector);
@@ -74,10 +78,18 @@ public:
         params.device_count = static_cast<uint32_t>(device_ids_vector->size());
         params.scheduling_algorithm = HAILO_SCHEDULING_ALGORITHM_NONE;
 
-        return VDeviceWrapper(params);
+        return std::make_shared<VDeviceWrapper>(params);
     }
 
     VDeviceWrapper(const hailo_vdevice_params_t &params)
+#ifdef HAILO_IS_FORK_SUPPORTED
+        :
+        m_atfork_guard(this, {
+            .before_fork = [this]() { if (m_vdevice) m_vdevice->before_fork(); },
+            .after_fork_in_parent = [this]() { if (m_vdevice) m_vdevice->after_fork_in_parent(); },
+            .after_fork_in_child = [this]() { if (m_vdevice) m_vdevice->after_fork_in_child(); },
+        })
+#endif
     {
         auto vdevice_expected = VDevice::create(params);
         VALIDATE_EXPECTED(vdevice_expected);
@@ -96,15 +108,15 @@ public:
     py::list configure(const HefWrapper &hef,
         const NetworkGroupsParamsMap &configure_params={})
     {
-
         auto network_groups = m_vdevice->configure(*hef.hef_ptr(), configure_params);
         VALIDATE_EXPECTED(network_groups);
 
         py::list results;
         m_net_groups.reserve(m_net_groups.size() + network_groups->size());
         for (const auto &network_group : network_groups.value()) {
-            results.append(network_group.get());
-            m_net_groups.emplace_back(network_group);
+            auto wrapper = ConfiguredNetworkGroupWrapper::create(network_group);
+            results.append(wrapper);
+            m_net_groups.emplace_back(wrapper);
         }
 
         return results;
@@ -116,38 +128,18 @@ public:
         m_vdevice.reset();
     }
 
-    void before_fork()
-    {
-        if (m_vdevice != nullptr) {
-            auto status = m_vdevice->before_fork();
-            VALIDATE_STATUS(status);
-        }
-    }
-
-    void after_fork_in_parent()
-    {
-        if (m_vdevice != nullptr) {
-            auto status = m_vdevice->after_fork_in_parent();
-            VALIDATE_STATUS(status);
-        }
-    }
-
-    void after_fork_in_child()
-    {
-        if (m_vdevice != nullptr) {
-            auto status = m_vdevice->after_fork_in_child();
-            VALIDATE_STATUS(status);
-        }
-    }
-
 private:
     std::unique_ptr<VDevice> m_vdevice;
-    ConfiguredNetworkGroupVector m_net_groups;
+    std::vector<ConfiguredNetworkGroupWrapperPtr> m_net_groups;
+
+#ifdef HAILO_IS_FORK_SUPPORTED
+    AtForkRegistry::AtForkGuard m_atfork_guard;
+#endif
 };
 
 void VDevice_api_initialize_python_module(py::module &m)
 {
-    py::class_<VDeviceWrapper>(m, "VDevice")
+    py::class_<VDeviceWrapper, VDeviceWrapperPtr>(m, "VDevice")
         .def("create", py::overload_cast<const hailo_vdevice_params_t&>(&VDeviceWrapper::create))
         .def("create", py::overload_cast<const VDeviceParamsWrapper&>(&VDeviceWrapper::create))
         .def("create", py::overload_cast<const VDeviceParamsWrapper&, const std::vector<std::string>&>(&VDeviceWrapper::create))
@@ -155,9 +147,6 @@ void VDevice_api_initialize_python_module(py::module &m)
         .def("get_physical_devices_ids", &VDeviceWrapper::get_physical_devices_ids)
         .def("configure", &VDeviceWrapper::configure)
         .def("release", &VDeviceWrapper::release)
-        .def("before_fork", &VDeviceWrapper::before_fork)
-        .def("after_fork_in_parent", &VDeviceWrapper::after_fork_in_parent)
-        .def("after_fork_in_child", &VDeviceWrapper::after_fork_in_child)
         ;
 }
 
index a17b6fd1e0ef85c7e2709f84097ad4c1da67a101..1bf057809ed2a42dba9cc38e0f566273339fefdf 100644 (file)
@@ -10,6 +10,8 @@
 #include "vstream_api.hpp"
 #include "bindings_common.hpp"
 #include "utils.hpp"
+#include "network_group_api.hpp"
+
 #include <iostream>
 
 
@@ -25,36 +27,6 @@ void InputVStreamWrapper::add_to_python_module(py::module &m)
             MemoryView(const_cast<void*>(reinterpret_cast<const void*>(data.data())), data.nbytes()));
         VALIDATE_STATUS(status);
     })
-    .def("before_fork", [](InputVStream &self)
-    {
-#ifdef HAILO_SUPPORT_MULTI_PROCESS
-        auto status = self.before_fork();
-        VALIDATE_STATUS(status);
-#else
-        (void)self;
-#endif // HAILO_SUPPORT_MULTI_PROCESS
-    }
-    )
-    .def("after_fork_in_parent", [](InputVStream &self)
-    {
-#ifdef HAILO_SUPPORT_MULTI_PROCESS
-        auto status = self.after_fork_in_parent();
-        VALIDATE_STATUS(status);
-#else
-        (void)self;
-#endif // HAILO_SUPPORT_MULTI_PROCESS
-    }
-    )
-    .def("after_fork_in_child", [](InputVStream &self)
-    {
-#ifdef HAILO_SUPPORT_MULTI_PROCESS
-        auto status = self.after_fork_in_child();
-        VALIDATE_STATUS(status);
-#else
-        (void)self;
-#endif // HAILO_SUPPORT_MULTI_PROCESS
-    }
-    )
     .def("flush", [](InputVStream &self)
     {
         hailo_status status = self.flush();
@@ -76,7 +48,7 @@ void InputVStreamWrapper::add_to_python_module(py::module &m)
     ;
 }
 
-InputVStreamsWrapper InputVStreamsWrapper::create(ConfiguredNetworkGroup &net_group,
+InputVStreamsWrapperPtr InputVStreamsWrapper::create(ConfiguredNetworkGroup &net_group,
     const std::map<std::string, hailo_vstream_params_t> &input_vstreams_params)
 {
     auto input_vstreams_expected = VStreamsBuilder::create_input_vstreams(net_group, input_vstreams_params);
@@ -87,7 +59,7 @@ InputVStreamsWrapper InputVStreamsWrapper::create(ConfiguredNetworkGroup &net_gr
         auto input_name = input.name();
         input_vstreams.emplace(input_name, std::make_unique<InputVStream>(std::move(input)));
     }
-    return InputVStreamsWrapper(input_vstreams);
+    return std::make_shared<InputVStreamsWrapper>(input_vstreams);
 }
 
 const InputVStreamsWrapper &InputVStreamsWrapper::enter()
@@ -128,53 +100,28 @@ void InputVStreamsWrapper::clear()
     VALIDATE_STATUS(status);
 }
 
-void InputVStreamsWrapper::before_fork()
-{
-#ifdef HAILO_SUPPORT_MULTI_PROCESS
-    for (auto &input_vstream : m_input_vstreams) {
-        auto status = input_vstream.second->before_fork();
-        VALIDATE_STATUS(status);
-    }
-#endif // HAILO_SUPPORT_MULTI_PROCESS
-}
-
-void InputVStreamsWrapper::after_fork_in_parent()
-{
-#ifdef HAILO_SUPPORT_MULTI_PROCESS
-    for (auto &input_vstream : m_input_vstreams) {
-        auto status = input_vstream.second->after_fork_in_parent();
-        VALIDATE_STATUS(status);
-    }
-#endif // HAILO_SUPPORT_MULTI_PROCESS
-}
-
-void InputVStreamsWrapper::after_fork_in_child()
-{
-#ifdef HAILO_SUPPORT_MULTI_PROCESS
-    for (auto &input_vstream : m_input_vstreams) {
-        auto status = input_vstream.second->after_fork_in_child();
-        VALIDATE_STATUS(status);
-    }
-#endif // HAILO_SUPPORT_MULTI_PROCESS    
-}
-
 void InputVStreamsWrapper::add_to_python_module(py::module &m)
 {
-    py::class_<InputVStreamsWrapper>(m, "InputVStreams")
+    py::class_<InputVStreamsWrapper, InputVStreamsWrapperPtr>(m, "InputVStreams")
     .def(py::init(&InputVStreamsWrapper::create))
     .def("get_input_by_name", &InputVStreamsWrapper::get_input_by_name)
     .def("get_all_inputs", &InputVStreamsWrapper::get_all_inputs)
     .def("clear", &InputVStreamsWrapper::clear)
     .def("__enter__", &InputVStreamsWrapper::enter, py::return_value_policy::reference)
     .def("__exit__",  [&](InputVStreamsWrapper &self, py::args) { self.exit(); })
-    .def("before_fork", &InputVStreamsWrapper::before_fork)
-    .def("after_fork_in_parent",  &InputVStreamsWrapper::after_fork_in_parent)
-    .def("after_fork_in_child", &InputVStreamsWrapper::after_fork_in_child)
     ;
 }
 
-InputVStreamsWrapper::InputVStreamsWrapper(std::unordered_map<std::string, std::shared_ptr<InputVStream>> &input_vstreams)
-    : m_input_vstreams(std::move(input_vstreams))
+InputVStreamsWrapper::InputVStreamsWrapper(std::unordered_map<std::string, std::shared_ptr<InputVStream>> &input_vstreams) :
+    m_input_vstreams(std::move(input_vstreams))
+#ifdef HAILO_IS_FORK_SUPPORTED
+        ,
+        m_atfork_guard(this, {
+            .before_fork = [this]() { before_fork(); },
+            .after_fork_in_parent = [this]() { after_fork_in_parent(); },
+            .after_fork_in_child = [this]() { after_fork_in_child(); }
+        })
+#endif
 {}
 
 py::dtype OutputVStreamWrapper::get_dtype(OutputVStream &self)
@@ -214,36 +161,21 @@ void OutputVStreamWrapper::add_to_python_module(py::module &m)
         return py::array(get_dtype(self), get_shape(self), unmanaged_addr,
             py::capsule(unmanaged_addr, [](void *p) { delete reinterpret_cast<uint8_t*>(p); }));
     })
-    .def("before_fork", [](OutputVStream &self)
+    .def("set_nms_score_threshold", [](OutputVStream &self, float32_t threshold)
     {
-#ifdef HAILO_SUPPORT_MULTI_PROCESS
-        auto status = self.before_fork();
+        hailo_status status = self.set_nms_score_threshold(threshold);
         VALIDATE_STATUS(status);
-#else
-        (void)self;
-#endif // HAILO_SUPPORT_MULTI_PROCESS
-    }
-    )
-    .def("after_fork_in_parent", [](OutputVStream &self)
+    })
+    .def("set_nms_iou_threshold", [](OutputVStream &self, float32_t threshold)
     {
-#ifdef HAILO_SUPPORT_MULTI_PROCESS
-        auto status = self.after_fork_in_parent();
+        hailo_status status = self.set_nms_iou_threshold(threshold);
         VALIDATE_STATUS(status);
-#else
-        (void)self;
-#endif // HAILO_SUPPORT_MULTI_PROCESS
-    }
-    )
-    .def("after_fork_in_child", [](OutputVStream &self)
+    })
+    .def("set_nms_max_proposals_per_class", [](OutputVStream &self, uint32_t max_proposals_per_class)
     {
-#ifdef HAILO_SUPPORT_MULTI_PROCESS
-        auto status = self.after_fork_in_child();
+        hailo_status status = self.set_nms_max_proposals_per_class(max_proposals_per_class);
         VALIDATE_STATUS(status);
-#else
-        (void)self;
-#endif // HAILO_SUPPORT_MULTI_PROCESS
-    }
-    )
+    })
     .def_property_readonly("info", [](OutputVStream &self)
     {
         return self.get_info();
@@ -254,7 +186,7 @@ void OutputVStreamWrapper::add_to_python_module(py::module &m)
     ;
 }
 
-OutputVStreamsWrapper OutputVStreamsWrapper::create(ConfiguredNetworkGroup &net_group,
+OutputVStreamsWrapperPtr OutputVStreamsWrapper::create(ConfiguredNetworkGroup &net_group,
         const std::map<std::string, hailo_vstream_params_t> &output_vstreams_params)
 {
     auto output_vstreams_expected = VStreamsBuilder::create_output_vstreams(net_group, output_vstreams_params);
@@ -265,7 +197,7 @@ OutputVStreamsWrapper OutputVStreamsWrapper::create(ConfiguredNetworkGroup &net_
         auto output_name = output.name();
         output_vstreams.emplace(output_name, std::make_unique<OutputVStream>(std::move(output)));
     }
-    return OutputVStreamsWrapper(output_vstreams);
+    return std::make_shared<OutputVStreamsWrapper>(output_vstreams);
 }
 
 std::shared_ptr<OutputVStream> OutputVStreamsWrapper::get_output_by_name(const std::string &name)
@@ -301,65 +233,61 @@ void OutputVStreamsWrapper::clear()
     for (auto &name_vstream_pair : m_output_vstreams) {
         outputs.emplace_back(std::ref(*name_vstream_pair.second));
     }
-    
+
     auto status = OutputVStream::clear(outputs);
     VALIDATE_STATUS(status);
 }
 
 void OutputVStreamsWrapper::before_fork()
 {
-#ifdef HAILO_SUPPORT_MULTI_PROCESS
-    for (auto &output_vstream : m_output_vstreams) {
-        auto status = output_vstream.second->before_fork();
-        VALIDATE_STATUS(status);
+    for (auto &vstream : m_output_vstreams) {
+        vstream.second->before_fork();
     }
-#endif // HAILO_SUPPORT_MULTI_PROCESS
 }
 
 void OutputVStreamsWrapper::after_fork_in_parent()
 {
-#ifdef HAILO_SUPPORT_MULTI_PROCESS
-    for (auto &output_vstream : m_output_vstreams) {
-        auto status = output_vstream.second->after_fork_in_parent();
-        VALIDATE_STATUS(status);
+    for (auto &vstream : m_output_vstreams) {
+        vstream.second->after_fork_in_parent();
     }
-#endif // HAILO_SUPPORT_MULTI_PROCESS
 }
 
 void OutputVStreamsWrapper::after_fork_in_child()
 {
-#ifdef HAILO_SUPPORT_MULTI_PROCESS
-    for (auto &output_vstream : m_output_vstreams) {
-        auto status = output_vstream.second->after_fork_in_child();
-        VALIDATE_STATUS(status);
+    for (auto &vstream : m_output_vstreams) {
+        vstream.second->after_fork_in_child();
     }
-#endif // HAILO_SUPPORT_MULTI_PROCESS    
 }
 
 void OutputVStreamsWrapper::add_to_python_module(py::module &m)
 {
-    py::class_<OutputVStreamsWrapper>(m, "OutputVStreams")
+    py::class_<OutputVStreamsWrapper, OutputVStreamsWrapperPtr>(m, "OutputVStreams")
     .def(py::init(&OutputVStreamsWrapper::create))
     .def("get_output_by_name", &OutputVStreamsWrapper::get_output_by_name)
     .def("get_all_outputs", &OutputVStreamsWrapper::get_all_outputs)
     .def("clear", &OutputVStreamsWrapper::clear)
     .def("__enter__", &OutputVStreamsWrapper::enter, py::return_value_policy::reference)
     .def("__exit__",  [&](OutputVStreamsWrapper &self, py::args) { self.exit(); })
-    .def("before_fork", &OutputVStreamsWrapper::before_fork)
-    .def("after_fork_in_parent",  &OutputVStreamsWrapper::after_fork_in_parent)
-    .def("after_fork_in_child", &OutputVStreamsWrapper::after_fork_in_child)
     ;
 }
 
-OutputVStreamsWrapper::OutputVStreamsWrapper(std::unordered_map<std::string, std::shared_ptr<OutputVStream>> &output_vstreams)
-    : m_output_vstreams(std::move(output_vstreams))
+OutputVStreamsWrapper::OutputVStreamsWrapper(std::unordered_map<std::string, std::shared_ptr<OutputVStream>> &output_vstreams) :
+    m_output_vstreams(std::move(output_vstreams))
+#ifdef HAILO_IS_FORK_SUPPORTED
+        ,
+        m_atfork_guard(this, {
+            .before_fork = [this]() { before_fork(); },
+            .after_fork_in_parent = [this]() { after_fork_in_parent(); },
+            .after_fork_in_child = [this]() { after_fork_in_child(); }
+        })
+#endif
 {}
 
-InferVStreamsWrapper InferVStreamsWrapper::create(ConfiguredNetworkGroup &network_group,
+InferVStreamsWrapper InferVStreamsWrapper::create(ConfiguredNetworkGroupWrapper &network_group,
     const std::map<std::string, hailo_vstream_params_t> &input_vstreams_params,
     const std::map<std::string, hailo_vstream_params_t> &output_vstreams_params)
 {
-    auto infer_pipeline = InferVStreams::create(network_group, input_vstreams_params, output_vstreams_params);
+    auto infer_pipeline = InferVStreams::create(network_group.get(), input_vstreams_params, output_vstreams_params);
     VALIDATE_EXPECTED(infer_pipeline);
     auto infer_vstream_ptr = std::make_shared<InferVStreams>(std::move(infer_pipeline.value()));
 
@@ -435,6 +363,25 @@ void InferVStreamsWrapper::release()
     m_infer_pipeline.reset();
 }
 
+void InputVStreamsWrapper::before_fork()
+{
+    for (auto &vstream : m_input_vstreams) {
+        vstream.second->before_fork();
+    }
+}
+void InputVStreamsWrapper::after_fork_in_parent()
+{
+    for (auto &vstream : m_input_vstreams) {
+        vstream.second->after_fork_in_parent();
+    }
+}
+void InputVStreamsWrapper::after_fork_in_child()
+{
+    for (auto &vstream : m_input_vstreams) {
+        vstream.second->after_fork_in_child();
+    }
+}
+
 void InferVStreamsWrapper::add_to_python_module(py::module &m)
 {
     py::class_<InferVStreamsWrapper>(m, "InferVStreams")
@@ -444,6 +391,18 @@ void InferVStreamsWrapper::add_to_python_module(py::module &m)
     .def("get_user_buffer_format", &InferVStreamsWrapper::get_user_buffer_format)
     .def("infer", &InferVStreamsWrapper::infer)
     .def("release",  [](InferVStreamsWrapper &self, py::args) { self.release(); })
+    .def("set_nms_score_threshold", [](InferVStreamsWrapper &self, float32_t threshold)
+    {
+        VALIDATE_STATUS(self.m_infer_pipeline->set_nms_score_threshold(threshold));
+    })
+    .def("set_nms_iou_threshold", [](InferVStreamsWrapper &self, float32_t threshold)
+    {
+        VALIDATE_STATUS(self.m_infer_pipeline->set_nms_iou_threshold(threshold));
+    })
+    .def("set_nms_max_proposals_per_class", [](InferVStreamsWrapper &self, uint32_t max_proposals_per_class)
+    {
+        VALIDATE_STATUS(self.m_infer_pipeline->set_nms_max_proposals_per_class(max_proposals_per_class));
+    })
     ;
 }
 
index d39c11e8935ab770ab2fa812bc5690eb1d0e06c7..a66459853bdd6cb830de12afa4b3b40a209a81c8 100644 (file)
 #ifndef _VSTREAM_API_HPP_
 #define _VSTREAM_API_HPP_
 
+#include "utils.hpp"
+
+#include "common/fork_support.hpp"
+
 #include "hailo/vstream.hpp"
 #include "hailo/inference_pipeline.hpp"
-#include "utils.hpp"
 
 #include <pybind11/pybind11.h>
 #include <pybind11/numpy.h>
 namespace hailort
 {
 
+class ConfiguredNetworkGroupWrapper;
+
 class InputVStreamWrapper final
 {
 public:
     static void add_to_python_module(py::module &m);
 };
 
-class InputVStreamsWrapper final 
+
+class InputVStreamsWrapper;
+using InputVStreamsWrapperPtr = std::shared_ptr<InputVStreamsWrapper>;
+
+class InputVStreamsWrapper final
 {
 public:
-    static InputVStreamsWrapper create(ConfiguredNetworkGroup &net_group,
+    static InputVStreamsWrapperPtr create(ConfiguredNetworkGroup &net_group,
         const std::map<std::string, hailo_vstream_params_t> &input_vstreams_params);
     const InputVStreamsWrapper &enter();
     void exit();
@@ -42,11 +51,17 @@ public:
     void before_fork();
     void after_fork_in_parent();
     void after_fork_in_child();
+
     static void add_to_python_module(py::module &m);
 
-private:
     InputVStreamsWrapper(std::unordered_map<std::string, std::shared_ptr<InputVStream>> &input_vstreams);
+
     std::unordered_map<std::string, std::shared_ptr<InputVStream>> m_input_vstreams;
+
+private:
+#ifdef HAILO_IS_FORK_SUPPORTED
+    AtForkRegistry::AtForkGuard m_atfork_guard;
+#endif
 };
 
 class OutputVStreamWrapper final
@@ -58,10 +73,13 @@ public:
     static void add_to_python_module(py::module &m);
 };
 
+class OutputVStreamsWrapper;
+using OutputVStreamsWrapperPtr = std::shared_ptr<OutputVStreamsWrapper>;
+
 class OutputVStreamsWrapper final
 {
 public:
-    static OutputVStreamsWrapper create(ConfiguredNetworkGroup &net_group,
+    static OutputVStreamsWrapperPtr create(ConfiguredNetworkGroup &net_group,
         const std::map<std::string, hailo_vstream_params_t> &output_vstreams_params);
     std::shared_ptr<OutputVStream> get_output_by_name(const std::string &name);
     const OutputVStreamsWrapper &enter();
@@ -73,15 +91,20 @@ public:
     void after_fork_in_child();
     static void add_to_python_module(py::module &m);
 
-private:
     OutputVStreamsWrapper(std::unordered_map<std::string, std::shared_ptr<OutputVStream>> &output_vstreams);
+
     std::unordered_map<std::string, std::shared_ptr<OutputVStream>> m_output_vstreams;
+
+private:
+#ifdef HAILO_IS_FORK_SUPPORTED
+    AtForkRegistry::AtForkGuard m_atfork_guard;
+#endif
 };
 
 class InferVStreamsWrapper final
 {
 public:
-    static InferVStreamsWrapper create(ConfiguredNetworkGroup &network_group,
+    static InferVStreamsWrapper create(ConfiguredNetworkGroupWrapper &network_group,
         const std::map<std::string, hailo_vstream_params_t> &input_vstreams_params,
         const std::map<std::string, hailo_vstream_params_t> &output_vstreams_params);
     void infer(std::map<std::string, py::array> input_data, std::map<std::string, py::array> output_data,
@@ -90,11 +113,14 @@ public:
     hailo_format_t get_user_buffer_format(const std::string &stream_name);
     std::vector<size_t> get_shape(const std::string &stream_name);
     void release();
+    void before_fork();
+    void after_fork_in_parent();
+    void after_fork_in_child();
     static void add_to_python_module(py::module &m);
 
 private:
     InferVStreamsWrapper(std::shared_ptr<InferVStreams> &infer_pipeline);
-    
+
     std::shared_ptr<InferVStreams> m_infer_pipeline;
 };
 
index 1f142d385b4680cf2adf8a340cce232c2730d524..f1adf4ccf1f4e5ff98196fb1325ee96398bcf894 100644 (file)
@@ -23,7 +23,7 @@
         package_dest: /usr/include/aarch64-linux-gnu
     -   version: '3.9'
         installation: manual
-        package_name: https://launchpad.net/~deadsnakes/+archive/ubuntu/ppa/+build/26280901/+files/libpython3.9-dev_3.9.17-1+focal1_arm64.deb
+        package_name: https://launchpad.net/~deadsnakes/+archive/ubuntu/ppa/+files/libpython3.9-dev_3.9.18-1+focal1_arm64.deb
         package_dest: /usr/include/aarch64-linux-gnu
     -   version: '3.10'
         installation: manual
index dd15bce2a8a492781a0870a64f34120822893aa2..d2444e146f9f6bd547cd5dabb8053350c8d1a116 100644 (file)
@@ -53,7 +53,7 @@ The following examples are provided, demonstrating the HailoRT API:
     - The threads will continuously initiate an async read or write operations.
     - The main thread will stop the async operations and the threads by deactivating the network group.
   - `multi_process_example` - Demonstrates how to work with HailoRT multi-process service and using the HailoRT Model Scheduler for network groups switching.
-  Using the script `multi_process_example.sh` one can specify the number of processes to run each hef, see `multi_process_example.sh -h` for more information.
+  Using the script `multi_process_example.sh` / `multi_process_example.ps1` one can specify the number of processes to run each hef, see `multi_process_example.sh -h`  / `multi_process_example.ps1 -h` for more information.
   - `notification_callback_example` - Demonstrates how to work with notification callbacks, same as `notification_callback_example` C example.
 You can find more details about each example in the HailoRT user guide.
 ## Compiling with CMake
index 02644959be6aab4ffce4086528c991d03e2dfaf6..afd08086c4c465c7831e60d5e53526c5210e6a6c 100644 (file)
@@ -1,9 +1,9 @@
 cmake_minimum_required(VERSION 3.0.0)
 
-find_package(Threads REQUIRED)
 set(THREADS_PREFER_PTHREAD_FLAG ON)
+find_package(Threads REQUIRED)
 
-find_package(HailoRT 4.14.0 EXACT REQUIRED)
+find_package(HailoRT 4.15.0 EXACT REQUIRED)
 
 SET_SOURCE_FILES_PROPERTIES(data_quantization_example.c PROPERTIES LANGUAGE C)
 
index 0a807a5adc1fbf3143c7914736aa6d599740876b..b7c49c521d108233187f6a767cf60097b0da906a 100644 (file)
@@ -1,9 +1,9 @@
 cmake_minimum_required(VERSION 3.0.0)
 
-find_package(Threads REQUIRED)
 set(THREADS_PREFER_PTHREAD_FLAG ON)
+find_package(Threads REQUIRED)
 
-find_package(HailoRT 4.14.0 EXACT REQUIRED)
+find_package(HailoRT 4.15.0 EXACT REQUIRED)
 
 SET_SOURCE_FILES_PROPERTIES(infer_pipeline_example.c PROPERTIES LANGUAGE C)
 
index 0b501c92a478d30a608695cf32d246e129f2ecc6..7537adba79b7ccaed9dee742b7459e77af54daf0 100644 (file)
@@ -1,9 +1,9 @@
 cmake_minimum_required(VERSION 3.0.0)
 
-find_package(Threads REQUIRED)
 set(THREADS_PREFER_PTHREAD_FLAG ON)
+find_package(Threads REQUIRED)
 
-find_package(HailoRT 4.14.0 EXACT REQUIRED)
+find_package(HailoRT 4.15.0 EXACT REQUIRED)
 
 SET_SOURCE_FILES_PROPERTIES(multi_device_example.c PROPERTIES LANGUAGE C)
 
index 0ca96b47c79d2723c467903ccf9e4848fdfc68c5..63cb7930202fe374abff49ba9bed2eac05bf6537 100644 (file)
@@ -3,7 +3,7 @@ cmake_minimum_required(VERSION 3.0.0)
 find_package(Threads REQUIRED)
 set(THREADS_PREFER_PTHREAD_FLAG ON)
 
-find_package(HailoRT 4.14.0 EXACT REQUIRED)
+find_package(HailoRT 4.15.0 EXACT REQUIRED)
 
 SET_SOURCE_FILES_PROPERTIES(multi_network_vstream_example.c PROPERTIES LANGUAGE C)
 
index 6345906b0e3ba803d2b1242e454f0730df15b7f4..f2da6a885459de400ac151af9974ac3c5d8c1621 100644 (file)
@@ -1,9 +1,9 @@
 cmake_minimum_required(VERSION 3.0.0)
 
-find_package(Threads REQUIRED)
 set(THREADS_PREFER_PTHREAD_FLAG ON)
+find_package(Threads REQUIRED)
 
-find_package(HailoRT 4.14.0 EXACT REQUIRED)
+find_package(HailoRT 4.15.0 EXACT REQUIRED)
 
 SET_SOURCE_FILES_PROPERTIES(notification_callback_example.c PROPERTIES LANGUAGE C)
 
index d3921a30296082294b14b91aca377c3ef292221a..46e62470d31f813254da9c3cbb25a1d555f85d48 100644 (file)
@@ -1,6 +1,6 @@
 cmake_minimum_required(VERSION 3.0.0)
 
-find_package(HailoRT 4.14.0 EXACT REQUIRED)
+find_package(HailoRT 4.15.0 EXACT REQUIRED)
 
 SET_SOURCE_FILES_PROPERTIES(power_measurement_example.c PROPERTIES LANGUAGE C)
 
index 2fe6c2752ca688a49bef8fc8be5e2ce2d62b5296..f6a75659eb422291537b90c1de9590bbec986d76 100644 (file)
@@ -1,14 +1,11 @@
 cmake_minimum_required(VERSION 3.0.0)
 
-find_package(Threads REQUIRED)
-set(THREADS_PREFER_PTHREAD_FLAG ON)
-
-find_package(HailoRT 4.14.0 EXACT REQUIRED)
+find_package(HailoRT 4.15.0 EXACT REQUIRED)
 
 SET_SOURCE_FILES_PROPERTIES(raw_async_streams_single_thread_example.c PROPERTIES LANGUAGE C)
 
 add_executable(c_raw_async_streams_single_thread_example raw_async_streams_single_thread_example.c)
-target_link_libraries(c_raw_async_streams_single_thread_example PRIVATE HailoRT::libhailort Threads::Threads)
+target_link_libraries(c_raw_async_streams_single_thread_example PRIVATE HailoRT::libhailort)
 target_include_directories(c_raw_async_streams_single_thread_example PRIVATE "${CMAKE_CURRENT_LIST_DIR}/../common")
 
 if(WIN32)
index b92b40c3aac4d27eccf1c35c4e54695afebcb673..87e76ac665d7a357346ae6f0aeab314c62eb88bf 100644 (file)
@@ -1,9 +1,9 @@
 cmake_minimum_required(VERSION 3.0.0)
 
-find_package(Threads REQUIRED)
 set(THREADS_PREFER_PTHREAD_FLAG ON)
+find_package(Threads REQUIRED)
 
-find_package(HailoRT 4.14.0 EXACT REQUIRED)
+find_package(HailoRT 4.15.0 EXACT REQUIRED)
 
 SET_SOURCE_FILES_PROPERTIES(raw_streams_example.c PROPERTIES LANGUAGE C)
 
index dfad8155dbac504bb95c942afbba13867629a8b8..e4b0e6157e1a12569db753abf87e303c07403c01 100644 (file)
@@ -185,7 +185,7 @@ int main()
     hailo_output_stream output_streams [MAX_EDGE_LAYERS] = {NULL};
     size_t number_input_streams = 0;
     size_t number_output_streams = 0;
-    size_t index = 0;
+    size_t i = 0;
 
     status = hailo_scan_devices(NULL, device_ids, &actual_devices_count);
     REQUIRE_SUCCESS(status, l_exit, "Failed to scan devices");
@@ -214,14 +214,14 @@ int main()
         &number_output_streams);
     REQUIRE_SUCCESS(status, l_release_hef, "Failed getting output streams infos");
 
-    for (index = 0; index < number_input_streams; index++) {
-        status = hailo_get_input_stream(network_group, input_streams_info[index].name, &input_streams[index]);
-        REQUIRE_SUCCESS(status, l_release_hef, "Failed getting input stream %s", input_streams_info[index].name);
+    for (i = 0; i < number_input_streams; i++) {
+        status = hailo_get_input_stream(network_group, input_streams_info[i].name, &input_streams[i]);
+        REQUIRE_SUCCESS(status, l_release_hef, "Failed getting input stream %s", input_streams_info[i].name);
     }
 
-    for (index = 0; index < number_output_streams; index++) {
-        status = hailo_get_output_stream(network_group, output_streams_info[index].name, &output_streams[index]);
-        REQUIRE_SUCCESS(status, l_release_hef, "Failed getting output stream %s", output_streams_info[index].name);
+    for (i = 0; i < number_output_streams; i++) {
+        status = hailo_get_output_stream(network_group, output_streams_info[i].name, &output_streams[i]);
+        REQUIRE_SUCCESS(status, l_release_hef, "Failed getting output stream %s", output_streams_info[i].name);
     }
 
     status = hailo_activate_network_group(network_group, NULL, &activated_network_group);
index 05ee65ee6da4a1002b1c471733ff646d151c9d42..44e28161795c760ab0346b173ea7ae6d7e774f72 100644 (file)
@@ -1,9 +1,9 @@
 cmake_minimum_required(VERSION 3.0.0)
 
-find_package(Threads REQUIRED)
 set(THREADS_PREFER_PTHREAD_FLAG ON)
+find_package(Threads REQUIRED)
 
-find_package(HailoRT 4.14.0 EXACT REQUIRED)
+find_package(HailoRT 4.15.0 EXACT REQUIRED)
 
 SET_SOURCE_FILES_PROPERTIES(switch_network_groups_example.c PROPERTIES LANGUAGE C)
 
index 2efcf9aca13ff301d99704fdb2a859d76a5fbe5a..7b1052573311b511f551922c43b58dc9a2a59cf3 100644 (file)
@@ -192,6 +192,7 @@ int main()
     read_thread_args_t read_args[HEF_COUNT][MAX_EDGE_LAYERS];
 
     char HEF_FILES[HEF_COUNT][MAX_HEF_PATH_LEN] = {"hefs/multi_network_shortcut_net.hef", "hefs/shortcut_net.hef"};
+    // Note: default batch_size is 0, which is not used in this example
     uint16_t batch_sizes[HEF_COUNT] = {BATCH_SIZE_1, BATCH_SIZE_2};
 
     status = hailo_init_vdevice_params(&params);
index 7687b0a35113034f8e871f15b46e450205b2d3e7..47536dff1defefdc33683b59bdf67f038fd569e3 100644 (file)
@@ -1,9 +1,9 @@
 cmake_minimum_required(VERSION 3.0.0)
 
-find_package(Threads REQUIRED)
 set(THREADS_PREFER_PTHREAD_FLAG ON)
+find_package(Threads REQUIRED)
 
-find_package(HailoRT 4.14.0 EXACT REQUIRED)
+find_package(HailoRT 4.15.0 EXACT REQUIRED)
 
 SET_SOURCE_FILES_PROPERTIES(switch_network_groups_manually_example.c PROPERTIES LANGUAGE C)
 
index fb4af1be1ad0aafb862449a92cdfdf6f3e5625ef..a7fa7856dd8e05641b4f9fd1fa874ffa1b4f9b2f 100644 (file)
@@ -1,9 +1,9 @@
 cmake_minimum_required(VERSION 3.0.0)
 
-find_package(Threads REQUIRED)
 set(THREADS_PREFER_PTHREAD_FLAG ON)
+find_package(Threads REQUIRED)
 
-find_package(HailoRT 4.14.0 EXACT REQUIRED)
+find_package(HailoRT 4.15.0 EXACT REQUIRED)
 
 SET_SOURCE_FILES_PROPERTIES(vstreams_example.c PROPERTIES LANGUAGE C)
 
index ba966bf1166e846213064ba51b7e1575c39f44fe..66e31716743152cb22463395c05083b0a776881f 100644 (file)
@@ -29,7 +29,7 @@ if(NOT CMAKE_SYSTEM_NAME STREQUAL QNX)
     # TODO: HRT-10956 support QNX async examples
     add_subdirectory(raw_async_streams_multi_thread_example)
     add_subdirectory(raw_async_streams_single_thread_example)
-    set(CPP_EXAMPLE_TARGETS ${C_EXAMPLE_TARGETS}
+    set(CPP_EXAMPLE_TARGETS ${CPP_EXAMPLE_TARGETS}
         cpp_raw_async_streams_multi_thread_example
         cpp_raw_async_streams_single_thread_example)
 endif()
index 8967d4224fda0f0fb5b8777337ddaf9da64fdb33..bbaa826bc396c7917e971158e78f309e0008d953 100644 (file)
@@ -1,6 +1,6 @@
 cmake_minimum_required(VERSION 3.0.0)
 
-find_package(HailoRT 4.14.0 EXACT REQUIRED)
+find_package(HailoRT 4.15.0 EXACT REQUIRED)
 
 add_executable(cpp_infer_pipeline_example infer_pipeline_example.cpp)
 target_link_libraries(cpp_infer_pipeline_example PRIVATE HailoRT::libhailort)
index d766e49a82321e6cf14ab442e897f95be530d817..012440197071629f0b1cacbc77ca567ffe681e70 100644 (file)
@@ -1,9 +1,9 @@
 cmake_minimum_required(VERSION 3.0.0)
 
-find_package(Threads REQUIRED)
 set(THREADS_PREFER_PTHREAD_FLAG ON)
+find_package(Threads REQUIRED)
 
-find_package(HailoRT 4.14.0 EXACT REQUIRED)
+find_package(HailoRT 4.15.0 EXACT REQUIRED)
 
 add_executable(cpp_multi_device_example multi_device_example.cpp)
 target_link_libraries(cpp_multi_device_example PRIVATE HailoRT::libhailort Threads::Threads)
index e3d894281e5275c465965370c27c71a5bb81e2fc..8374a42600d4b6bb204c9ccf11033ba6080865bf 100644 (file)
@@ -3,7 +3,7 @@ cmake_minimum_required(VERSION 3.0.0)
 find_package(Threads REQUIRED)
 set(THREADS_PREFER_PTHREAD_FLAG ON)
 
-find_package(HailoRT 4.14.0 EXACT REQUIRED)
+find_package(HailoRT 4.15.0 EXACT REQUIRED)
 
 add_executable(cpp_multi_network_vstream_example multi_network_vstream_example.cpp)
 target_link_libraries(cpp_multi_network_vstream_example PRIVATE HailoRT::libhailort Threads::Threads)
index 0ad9311b7e5ecace5b27729064d645533904ee5e..b5257bb085d02b2913892693cd2f070186506c2b 100644 (file)
@@ -1,9 +1,9 @@
 cmake_minimum_required(VERSION 3.0.0)
 
-find_package(Threads REQUIRED)
 set(THREADS_PREFER_PTHREAD_FLAG ON)
+find_package(Threads REQUIRED)
 
-find_package(HailoRT 4.14.0 EXACT REQUIRED)
+find_package(HailoRT 4.15.0 EXACT REQUIRED)
 
 add_executable(cpp_multi_process_example multi_process_example.cpp)
 target_link_libraries(cpp_multi_process_example PRIVATE HailoRT::libhailort Threads::Threads)
diff --git a/hailort/libhailort/examples/cpp/multi_process_example/multi_process_example.ps1 b/hailort/libhailort/examples/cpp/multi_process_example/multi_process_example.ps1
new file mode 100644 (file)
index 0000000..884f791
--- /dev/null
@@ -0,0 +1,68 @@
+Param(
+    [Switch]$h,
+    [Int]$n = 1,
+    [Int]$m = 1
+)
+
+$max_processes_count = 8
+$first_hef="hefs\multi_network_shortcut_net.hef"
+$second_hef="hefs\shortcut_net.hef"
+$executable_base_name="cpp_multi_process_example"
+$executable_name="$executable_base_name.exe"
+
+# find the executable (can be under Release / Debug dirs)
+$files = Get-ChildItem -Filter "$executable_name" -Recurse -File
+if ($files.Count -eq 0) {
+    Write-Error "No files found."
+} elseif ($files.Count -gt 1) {
+    Write-Host "More than one file found:"
+    foreach ($file in $files) {
+        Write-Host $file.FullName
+    }
+    Write-Error "Delete all but one of the files."
+} else {
+    $executable=$files.FullName
+}
+
+function Show-Help {
+    Write-Host "Usage: [-h] [-n <number>] [-m <number>]"
+    Write-Host "  -h    Print usage and exit"
+    Write-Host "  -n    Number of processes to run example with $first_hef. Max is $max_processes_count (defualt is $max_processes_count)"
+    Write-Host "  -m    Number of processes to run example with $second_hef. Max is $max_processes_count (defualt is $max_processes_count)"
+}
+
+if ($h) {
+    Show-Help
+    exit
+}
+
+if ($n -gt $max_processes_count) {
+    Write-Host "Max processes to run each hef is $max_processes_count! Given $n for $first_hef"
+    exit 1
+}
+
+if ($m -gt $max_processes_count) {
+    Write-Host "Max processes to run each hef is $max_processes_count! Given $m for $second_hef"
+    exit 1
+}
+
+$max_hef_count = If ($n -gt $m) { $n } Else { $m }
+$i = 1
+
+do {
+    if ($i -le $n) {
+        Start-Process -FilePath $executable -ArgumentList $first_hef -NoNewWindow
+        Write-Host "($i / $n) starting 1st hef "
+    }
+
+    if ($i -le $m) {
+        Start-Process -FilePath $executable -ArgumentList $second_hef -NoNewWindow
+        Write-Host "($i / $m) starting 2nd hef "
+    }
+
+    $i++
+} while ($i -le $max_hef_count)
+
+$processes = Get-Process | Where-Object { $_.Name -in "$executable_base_name" } | ForEach-Object {
+        $_ | Wait-Process
+}
index 208199ae40d1dac05a2005eba1b6f5aed811a580..e01b717d1cb9d5099f9e049e47bfd65f8578ddb0 100644 (file)
@@ -1,6 +1,6 @@
 cmake_minimum_required(VERSION 3.0.0)
 
-find_package(HailoRT 4.14.0 EXACT REQUIRED)
+find_package(HailoRT 4.15.0 EXACT REQUIRED)
 
 add_executable(cpp_notification_callback_example notification_callback_example.cpp)
 target_link_libraries(cpp_notification_callback_example PRIVATE HailoRT::libhailort)
index 17522afca37c5142cdb1035c9fff5c67c9cba438..2db7d96854ea53de1cb54ffe05bb1bb4973d6eaf 100644 (file)
@@ -1,6 +1,6 @@
 cmake_minimum_required(VERSION 3.0.0)
 
-find_package(HailoRT 4.14.0 EXACT REQUIRED)
+find_package(HailoRT 4.15.0 EXACT REQUIRED)
 
 add_executable(cpp_power_measurement_example power_measurement_example.cpp)
 target_link_libraries(cpp_power_measurement_example PRIVATE HailoRT::libhailort)
index d89940f976bcf9af90107c7f24bdd45e2b390e59..db6e185a5370fff3ee2bd07528296feccf0b9e56 100644 (file)
@@ -1,9 +1,9 @@
 cmake_minimum_required(VERSION 3.0.0)
 
-find_package(Threads REQUIRED)
 set(THREADS_PREFER_PTHREAD_FLAG ON)
+find_package(Threads REQUIRED)
 
-find_package(HailoRT 4.14.0 EXACT REQUIRED)
+find_package(HailoRT 4.15.0 EXACT REQUIRED)
 
 add_executable(cpp_raw_async_streams_multi_thread_example raw_async_streams_multi_thread_example.cpp)
 target_link_libraries(cpp_raw_async_streams_multi_thread_example PRIVATE HailoRT::libhailort Threads::Threads)
index 0c240872d51eab1f972cf2161b66a3d00a5fe453..ce8dc326f86f95ba26fb83412bd1fa5469a77b7e 100644 (file)
@@ -1,9 +1,9 @@
 cmake_minimum_required(VERSION 3.0.0)
 
-find_package(Threads REQUIRED)
 set(THREADS_PREFER_PTHREAD_FLAG ON)
+find_package(Threads REQUIRED)
 
-find_package(HailoRT 4.14.0 EXACT REQUIRED)
+find_package(HailoRT 4.15.0 EXACT REQUIRED)
 
 add_executable(cpp_raw_async_streams_single_thread_example raw_async_streams_single_thread_example.cpp)
 target_link_libraries(cpp_raw_async_streams_single_thread_example PRIVATE HailoRT::libhailort Threads::Threads)
index d30f854b43d500bc8016ad5bc5e6f891b781cd81..8bd3678776e96b942f63f29def837c1bf5e0f010 100644 (file)
@@ -1,9 +1,9 @@
 cmake_minimum_required(VERSION 3.0.0)
 
-find_package(Threads REQUIRED)
 set(THREADS_PREFER_PTHREAD_FLAG ON)
+find_package(Threads REQUIRED)
 
-find_package(HailoRT 4.14.0 EXACT REQUIRED)
+find_package(HailoRT 4.15.0 EXACT REQUIRED)
 
 add_executable(cpp_raw_streams_example raw_streams_example.cpp)
 target_link_libraries(cpp_raw_streams_example PRIVATE HailoRT::libhailort Threads::Threads)
index 3338ff16376b5585d4c976494550e5e74d77d185..5c0ca8fd51455e82174944ead8acff06de032e70 100644 (file)
@@ -1,9 +1,9 @@
 cmake_minimum_required(VERSION 3.0.0)
 
-find_package(Threads REQUIRED)
 set(THREADS_PREFER_PTHREAD_FLAG ON)
+find_package(Threads REQUIRED)
 
-find_package(HailoRT 4.14.0 EXACT REQUIRED)
+find_package(HailoRT 4.15.0 EXACT REQUIRED)
 
 add_executable(cpp_switch_network_groups_example switch_network_groups_example.cpp)
 target_link_libraries(cpp_switch_network_groups_example PRIVATE HailoRT::libhailort Threads::Threads)
index 156d5235b37729de1d6351df7c2c9669dcfd7725..b035fef69d0bdca6a842393f1d7a25e0d4367e1c 100644 (file)
@@ -150,6 +150,7 @@ int main()
     }
     auto vdevice = vdevice_exp.release();
 
+    // Note: default batch_size is 0, which is not used in this example
     std::vector<uint16_t> batch_sizes { BATCH_SIZE_1, BATCH_SIZE_2 };
     std::vector<std::string> hef_paths = {"hefs/multi_network_shortcut_net.hef", "hefs/shortcut_net.hef"};
 
index 8ef520bc135f276df6c8dfd3e55c2c48c8db766d..7777a941ee49fc7ebdf23133468ec8f009cee1ef 100644 (file)
@@ -3,7 +3,7 @@ cmake_minimum_required(VERSION 3.0.0)
 find_package(Threads REQUIRED)
 set(THREADS_PREFER_PTHREAD_FLAG ON)
 
-find_package(HailoRT 4.14.0 EXACT REQUIRED)
+find_package(HailoRT 4.15.0 EXACT REQUIRED)
 
 add_executable(cpp_switch_network_groups_manually_example switch_network_groups_manually_example.cpp)
 target_link_libraries(cpp_switch_network_groups_manually_example PRIVATE HailoRT::libhailort Threads::Threads)
index 771516413e2c0c18f203e9cf013c9e016eef8bd3..7ea25662ec1e6ad7748fbec7ff0251056628a272 100644 (file)
@@ -1,9 +1,9 @@
 cmake_minimum_required(VERSION 3.0.0)
 
-find_package(Threads REQUIRED)
 set(THREADS_PREFER_PTHREAD_FLAG ON)
+find_package(Threads REQUIRED)
 
-find_package(HailoRT 4.14.0 EXACT REQUIRED)
+find_package(HailoRT 4.15.0 EXACT REQUIRED)
 
 add_executable(cpp_vstreams_example vstreams_example.cpp)
 target_link_libraries(cpp_vstreams_example PRIVATE HailoRT::libhailort Threads::Threads)
index 3c5e90908f4a080926a323031720f0ebc028c340..7c231c3092ac880b801e76ba9671fa105740386a 100644 (file)
@@ -37,7 +37,7 @@ enum ProtoHEFExtensionType {
     HW_PADDING = 11;
     KO_RUN_ASAP = 12;
     HAILO_NET_FLOW = 13;
-    HAILO_NET_FLOW_YOLO_NMS = 14;
+    HAILO_NET_FLOW_YOLOV5_NMS = 14;
     HAILO_NET_FLOW_YOLOX_NMS = 15;
     HAILO_NET_FLOW_SSD_NMS = 16;
     HAILO_NET_FLOW_IOU_NMS = 17;
@@ -47,6 +47,7 @@ enum ProtoHEFExtensionType {
     HAILO_NET_FLOW_ARGMAX = 21;
     HAILO_NET_FLOW_SOFTMAX = 22;
     ALIGNED_FORMAT_TYPE = 23;
+    HAILO_NET_FLOW_YOLOV5_SEG_NMS = 24;
     OUTPUT_SCALE_PER_FEATURE = 25;
     PERIPH_CALCULATION_IN_HAILORT = 26;
     UNUSED = 0XFFFF;
@@ -72,7 +73,7 @@ message ProtoHEFHeader {
     // The version of the SDK the HEF has been created with
     string sdk_version = 3;
 
-    // The format version of the hef file 
+    // The format version of the hef file
     uint64 version = 4;
 }
 
@@ -90,6 +91,7 @@ enum ProtoHEFHwArch {
     PROTO__HW_ARCH__HAILO15H = 103;
     PROTO__HW_ARCH__GINGER = 104;
     PROTO__HW_ARCH__LAVENDER = 105;
+    PROTO__HW_ARCH__PLUTO = 106;
 }
 
 
@@ -217,6 +219,32 @@ message ProtoHEFSSDNmsOp {
 
 message ProtoHEFIOUNmsOp {};
 
+message ProtoHEFYoloSegProtoInfo {
+    // Prototype info mask number
+    uint32 number = 1;
+
+    // Prototype info mask number pixels stride
+    uint32 stride = 2;
+
+    // The name of the proto layer
+    string proto_layer = 3;
+};
+
+message ProtoHEFYoloSegNmsOp {
+    // Input image dimensions
+    double image_height = 1;
+    double image_width = 2;
+
+    // List of bbox decoders (anchors) for the NMS layer. Each model has its own number of boxes per anchor
+    repeated ProtoHEFYoloBboxDecoder bbox_decoders = 3;
+
+    // Prototype info masks
+    ProtoHEFYoloSegProtoInfo proto_info = 4;
+
+    // Mask threshold
+    double mask_threshold = 5;
+};
+
 message ProtoHEFNmsOp {
     // NMS score threshold
     double nms_score_th = 1;
@@ -242,6 +270,7 @@ message ProtoHEFNmsOp {
         ProtoHEFYoloxNmsOp yolox_nms_op = 8; // YOLO-X post process
         ProtoHEFSSDNmsOp ssd_nms_op = 9; // SSD post process
         ProtoHEFIOUNmsOp iou_op = 10; // IoU only
+        ProtoHEFYoloSegNmsOp yolo_seg_op = 11; // YOLOv5 seg post process
     }
 };
 
@@ -656,6 +685,9 @@ message ProtoHEFActionEnableNMS {
 
     // Burst-size
     uint32 burst_size = 4;
+
+    // Division-factor
+    uint32 division_factor = 5;
 }
 
 // None action - Do not do anything
@@ -717,6 +749,7 @@ message ProtoHEFEdgeLayer {
     oneof edge {
         ProtoHEFEdgeLayerInfo layer_info = 3;
         ProtoHEFEdgeLayerMux layer_mux = 4;
+        ProtoHEFEdgeLayerPlanes layer_planes = 8;
     };
     ProtoHEFContextSwitchInformation context_switch_info = 5;
     uint32 network_index = 6;
@@ -735,6 +768,7 @@ enum ProtoHEFEdgeLayerDirection {
 enum ProtoHEFEdgeLayerType {
     PROTO__EDGE_LAYER_TYPE__INFO = 0;
     PROTO__EDGE_LAYER_TYPE__MUX = 1;
+    PROTO__EDGE_LAYER_TYPE__PLANES = 2;
 }
 
 message ProtoHEFEdgeLayerInfo {
@@ -750,6 +784,7 @@ message ProtoHefEdge {
     oneof edge {
         ProtoHEFEdgeLayerInfo layer_info = 1;
         ProtoHEFEdgeLayerMux layer_mux = 2;
+        ProtoHEFEdgeLayerPlanes layer_planes = 3;
     };
 }
 
@@ -762,6 +797,21 @@ message ProtoHEFEdgeLayerMux {
     repeated string original_names = 6;
 }
 
+enum ProtoHEFEPlanesFormat {
+    PROTO__PLANES__FORMAT__NV12 = 0;
+    PROTO__PLANES__FORMAT__NV21= 1;
+    PROTO__PLANES__FORMAT__I420 = 2;
+}
+
+message ProtoHEFEdgeLayerPlanes {
+    string name = 1;
+    ProtoHEFEPlanesFormat planes_format = 2;
+    repeated ProtoHefEdge planes = 3;
+    uint32 height = 4;
+    uint32 width = 5;
+    uint32 features = 6;
+}
+
 message ProtoHEFEdgeLayerMuxData {
     uint32 number_of_predecessors = 1;
     uint32 height_gcd = 2;
index e0693ba6d78288904fd5b2a496a5e1ec8f030d93..1b647f95dc716c1dbf4f8034e8f7fce04f1135f3 100644 (file)
@@ -177,7 +177,8 @@ public:
 
     MemoryView& operator=(MemoryView&& other) = default;
     MemoryView(const MemoryView &) = default;
-    MemoryView& operator=(MemoryView &) = default;
+    MemoryView& operator=(const MemoryView &) = default;
+    MemoryView(MemoryView &&) = default;
 
     static const MemoryView create_const(const void *data, size_t size);
 
index bb12702e2436f215d0976ce03a56060ac1ee045a..8ad3e9d6d09e2a972d64d072509d83eebfdb2d77 100644 (file)
@@ -298,7 +298,7 @@ public:
      * Get current throttling state of temperature protection and overcurrent protection components.
      * If any throttling is enabled, the function return true.
      *
-     * @return Upon success, returns Expected of @a bool, indicates weather the throttling state is active or not.
+     * @return Upon success, returns Expected of @a bool, indicates whether the throttling state is active or not.
      *         Otherwise, returns Unexpected of ::hailo_status error.
      */
     Expected<bool> get_throttling_state();
@@ -347,7 +347,7 @@ public:
     /**
      * Enable/Disable Pause frames.
      *
-     * @param[in] rx_pause_frames_enable  Indicating weather to enable or disable pause frames.
+     * @param[in] rx_pause_frames_enable  Indicating whether to enable or disable pause frames.
      * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns an ::hailo_status error.
      */
     hailo_status set_pause_frames(bool rx_pause_frames_enable);
@@ -498,7 +498,7 @@ public:
      *  Update the firmware of a Hailo device.
      * 
      * @param[in] firmware_binary       The firmware code to be updated to the device.
-     * @param[in] should_reset          Bool indicating weather to reset the device after updating.
+     * @param[in] should_reset          Bool indicating whether to reset the device after updating.
      * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
      * @note Calling this function while running other operations on the device (including inference) will
      * lead to unexpected results!
index 28ebf6119f7459ba394fabff03e33221e6fe4d6d..1dc826117f6e6682342a73ead7afbeb6565ed549 100644 (file)
@@ -105,7 +105,7 @@ public:
     using Waitable::Waitable;
 
     static Expected<Event> create(const State& initial_state);
-    static EventPtr create_shared(const State& initial_state);
+    static Expected<EventPtr> create_shared(const State& initial_state);
 
     virtual hailo_status signal() override;
     virtual bool is_auto_reset() override;
index 67e57b919aed853124735907de21266e2500f8cd..c33b94e089948acff3b6928bad4c703b00f7bfce 100644 (file)
@@ -73,6 +73,8 @@ extern "C" {
 #define HAILO_SCHEDULER_PRIORITY_MAX (31)
 #define HAILO_SCHEDULER_PRIORITY_MIN (0)
 
+#define MAX_NUMBER_OF_PLANES (4)
+
 typedef float float32_t;
 typedef double float64_t;
 typedef uint16_t nms_bbox_counter_t;
@@ -113,7 +115,7 @@ typedef uint16_t nms_bbox_counter_t;
     HAILO_STATUS__X(31, HAILO_STOP_VDMA_CHANNEL_FAIL                  /*!< Stopping VDMA channel failure */)\
     HAILO_STATUS__X(32, HAILO_CLOSE_VDMA_CHANNEL_FAIL                 /*!< Closing VDMA channel failure */)\
     HAILO_STATUS__X(33, HAILO_ATR_TABLES_CONF_VALIDATION_FAIL         /*!< Validating address translation tables failure, for FW control use */)\
-    HAILO_STATUS__X(34, HAILO_CONTROL_EVENT_CREATE_FAIL               /*!< Creating control event failure */)\
+    HAILO_STATUS__X(34, HAILO_EVENT_CREATE_FAIL                       /*!< Creating event failure */)\
     HAILO_STATUS__X(35, HAILO_READ_EVENT_FAIL                         /*!< Reading event failure */)\
     HAILO_STATUS__X(36, HAILO_DRIVER_FAIL                             /*!< Driver failure */)\
     HAILO_STATUS__X(37, HAILO_INVALID_FIRMWARE_MAGIC                  /*!< Invalid FW magic */)\
@@ -411,7 +413,8 @@ typedef enum hailo_device_architecture_e {
     HAILO_ARCH_HAILO8_A0 = 0,
     HAILO_ARCH_HAILO8,
     HAILO_ARCH_HAILO8L,
-    HAILO_ARCH_HAILO15,
+    HAILO_ARCH_HAILO15H,
+    HAILO_ARCH_PLUTO,
     
     /** Max enum value to maintain ABI Integrity */
     HAILO_ARCH_MAX_ENUM = HAILO_MAX_ENUM
@@ -557,28 +560,28 @@ typedef enum {
     /**
      * Chosen automatically to match the format expected by the device.
      */
-    HAILO_FORMAT_ORDER_AUTO                  = 0,
+    HAILO_FORMAT_ORDER_AUTO                             = 0,
 
     /**
      *  - Host side: [N, H, W, C]
-     *  - Device side: [N, H, W, C], where width is padded to 8 elements
+     *  - Device side: [N, H, W, C], where width is padded to 8 bytes
      */
-    HAILO_FORMAT_ORDER_NHWC                 = 1,
+    HAILO_FORMAT_ORDER_NHWC                             = 1,
 
     /**
      *  - Not used for host side
-     *  - Device side: [N, H, C, W], where width is padded to 8 elements
+     *  - Device side: [N, H, C, W], where width is padded to 8 bytes
      */
-    HAILO_FORMAT_ORDER_NHCW                 = 2,
+    HAILO_FORMAT_ORDER_NHCW                             = 2,
 
     /**
      * FCR means first channels (features) are sent to HW:
      *  - Host side: [N, H, W, C]
      *  - Device side: [N, H, W, C]:
-     *      - Input - channels are expected to be aligned to 8 elements
-     *      - Output - width is padded to 8 elements
+     *      - Input - channels are expected to be aligned to 8 bytes
+     *      - Output - width is padded to 8 bytes
      */
-    HAILO_FORMAT_ORDER_FCR                  = 3,
+    HAILO_FORMAT_ORDER_FCR                              = 3,
 
     /**
      * F8CR means first 8-channels X width are sent to HW:
@@ -590,39 +593,39 @@ typedef enum {
      *      - W X 8C_1, W X 8C_2, ... , W X 8C_n
      * ...
      */
-    HAILO_FORMAT_ORDER_F8CR                 = 4,
+    HAILO_FORMAT_ORDER_F8CR                             = 4,
 
     /**
      * Output format of argmax layer:
      * - Host side: [N, H, W, 1]
-     * - Device side: [N, H, W, 1], where width is padded to 8 elements
+     * - Device side: [N, H, W, 1], where width is padded to 8 bytes
      */
-    HAILO_FORMAT_ORDER_NHW                  = 5,
+    HAILO_FORMAT_ORDER_NHW                              = 5,
 
     /**
      * Channels only:
      * - Host side: [N,C]
-     * - Device side: [N, C], where channels are padded to 8 elements
+     * - Device side: [N, C], where channels are padded to 8 bytes
      */
-    HAILO_FORMAT_ORDER_NC                   = 6,
+    HAILO_FORMAT_ORDER_NC                               = 6,
 
     /**
      * Bayer format:
      * - Host side: [N, H, W, 1]
-     * - Device side: [N, H, W, 1], where width is padded to 8 elements
+     * - Device side: [N, H, W, 1], where width is padded to 8 bytes
      */
-    HAILO_FORMAT_ORDER_BAYER_RGB            = 7,
+    HAILO_FORMAT_ORDER_BAYER_RGB                        = 7,
 
     /**
      * Bayer format, same as ::HAILO_FORMAT_ORDER_BAYER_RGB where
      * Channel is 12 bit
      */
-    HAILO_FORMAT_ORDER_12_BIT_BAYER_RGB     = 8,
+    HAILO_FORMAT_ORDER_12_BIT_BAYER_RGB                 = 8,
 
     /**
      * NMS bbox
      * - Host side
-     * 
+     *
      *      For each class (::hailo_nms_shape_t.number_of_classes), the layout is
      *          \code
      *          struct (packed) {
@@ -632,25 +635,25 @@ typedef enum {
      *          \endcode
      *
      *      The host format type can be either ::HAILO_FORMAT_TYPE_FLOAT32 or ::HAILO_FORMAT_TYPE_UINT16.
-     * 
+     *
      *      Maximum amount of bboxes per class is ::hailo_nms_shape_t.max_bboxes_per_class.
      *
      * - Device side output (result of NMS layer):
      *      Internal implementation
      */
-    HAILO_FORMAT_ORDER_HAILO_NMS            = 9,
+    HAILO_FORMAT_ORDER_HAILO_NMS                        = 9,
 
     /**
      * - Not used for host side
      * - Device side: [N, H, W, C], where channels are 4 (RGB + 1 padded zero byte) and width is padded to 8 elements
      */
-    HAILO_FORMAT_ORDER_RGB888               = 10,
+    HAILO_FORMAT_ORDER_RGB888                           = 10,
 
     /**
      * - Host side: [N, C, H, W]
      * - Not used for device side
      */
-    HAILO_FORMAT_ORDER_NCHW                 = 11,
+    HAILO_FORMAT_ORDER_NCHW                             = 11,
 
     /**
      * YUV format, encoding 2 pixels in 32 bits
@@ -658,53 +661,79 @@ typedef enum {
      * - Host side: [Y0, U0, Y1, V0]
      * - Device side: [Y0, U0, Y1, V0]
      */
-    HAILO_FORMAT_ORDER_YUY2                 = 12,
+    HAILO_FORMAT_ORDER_YUY2                             = 12,
 
     /**
      * YUV format, encoding 8 pixels in 96 bits
      *      [Y0, Y1, Y2, Y3, Y4, Y5, Y6, Y7, U0, V0, U1, V1] represents
      *          [Y0, U0, V0], [Y1, U0, V0], [Y2, U0, V0], [Y3, U0, V0], [Y4, U1, V1], [Y5, U1, V1], [Y6, U1, V1], [Y7, U1, V1]
+     * - Not used for device side
      */
-    HAILO_FORMAT_ORDER_NV12                 = 13,
+    HAILO_FORMAT_ORDER_NV12                             = 13,
 
     /**
      * YUV format, encoding 8 pixels in 96 bits
      *      [Y0, Y1, Y2, Y3, Y4, Y5, Y6, Y7, V0, U0, V1, U1] represents
      *          [Y0, V0, U0], [Y1, V0, U0], [Y2, V0, U0], [Y3, V0, U0], [Y4, V1, U1], [Y5, V1, U1], [Y6, V1, U1], [Y7, V1, U1]
+     * - Not used for device side
      */
-    HAILO_FORMAT_ORDER_NV21                 = 14,
+    HAILO_FORMAT_ORDER_NV21                             = 14,
 
     /**
      * Internal implementation for HAILO_FORMAT_ORDER_NV12 format
      *      [Y0, Y1, Y2, Y3, Y4, Y5, Y6, Y7, U0, V0, U1, V1] is represented by [Y0, Y1, Y2, Y3, U0, V0, Y4, Y5, Y6, Y7, U1, V1]
+     * - Not used for host side
      */
-    HAILO_FORMAT_ORDER_HAILO_YYUV           = 15,
+    HAILO_FORMAT_ORDER_HAILO_YYUV                       = 15,
 
     /**
      * Internal implementation for HAILO_FORMAT_ORDER_NV21 format
      *      [Y0, Y1, Y2, Y3, Y4, Y5, Y6, Y7, V0, U0, V1, U1] is represented by [Y0, Y1, Y2, Y3, V0, U0, Y4, Y5, Y6, Y7, V1, U1]
+     * - Not used for host side
      */
-    HAILO_FORMAT_ORDER_HAILO_YYVU           = 16,
+    HAILO_FORMAT_ORDER_HAILO_YYVU                       = 16,
 
     /**
      * RGB, where every row is padded to 4.
      * - Host side: [N, H, W, C], where width*channels are padded to 4.
      * - Not used for device side
      */
-    HAILO_FORMAT_ORDER_RGB4                 = 17,
+    HAILO_FORMAT_ORDER_RGB4                             = 17,
 
     /**
      * YUV format, encoding 8 pixels in 96 bits
      *      [Y0, Y1, Y2, Y3, Y4, Y5, Y6, Y7, U0, U1, V0, V1] represents
      *          [Y0, U0, V0,], [Y1, U0, V0], [Y2, U0, V0], [Y3, U0, V0], [Y4, U1, V1], [Y5, U1, V1], [Y6, U1, V1], [Y7, U1, V1]
+     * - Not used for device side
      */
-    HAILO_FORMAT_ORDER_I420                 = 18,
+    HAILO_FORMAT_ORDER_I420                             = 18,
 
     /**
      * Internal implementation for HAILO_FORMAT_ORDER_I420 format
      *      [Y0, Y1, Y2, Y3, Y4, Y5, Y6, Y7, U0, U1, V0, V1] is represented by [Y0, Y1, Y2, Y3, U0, V0, Y4, Y5, Y6, Y7, U1, V1]
+     * - Not used for host side
      */
-    HAILO_FORMAT_ORDER_HAILO_YYYYUV         = 19,
+    HAILO_FORMAT_ORDER_HAILO_YYYYUV                     = 19,
+
+    /**
+     * NMS bbox
+     * - Host side
+     *
+     *      For each class (::hailo_nms_shape_t.number_of_classes), the layout is
+     *          \code
+     *          struct (packed) {
+     *              float32_t bbox_count;
+     *              hailo_bbox_with_byte_mask_t bbox_with_byte_mask[bbox_count];
+     *          };
+     *          \endcode
+     *
+     *      The host format type supported ::HAILO_FORMAT_TYPE_FLOAT32.
+     *
+     *      Maximum amount of bboxes per class is ::hailo_nms_shape_t.max_bboxes_per_class.
+     *
+     * - Not used for device side
+     */
+    HAILO_FORMAT_ORDER_HAILO_NMS_WITH_BYTE_MASK         = 20,
 
     /** Max enum value to maintain ABI Integrity */
     HAILO_FORMAT_ORDER_MAX_ENUM             = HAILO_MAX_ENUM
@@ -720,6 +749,8 @@ typedef enum {
      * - Input data: HailoRT assumes that the data is already quantized (scaled) by the user,
      *   so it does not perform the quantization (scaling) step.
      * - Output data: The data will be returned to the user without rescaling (i.e., the data won't be rescaled by HailoRT).
+     * @note This flag is deprecated and its usage is ignored. Determine whether to quantize (or de-quantize) the data will be decided by
+     *       the src-data and dst-data types.
      */
     HAILO_FORMAT_FLAGS_QUANTIZED            = 1 << 0,
 
@@ -1197,16 +1228,34 @@ typedef struct {
     uint32_t features;
 } hailo_3d_image_shape_t;
 
+/** image buffer plane */
+typedef struct {
+    /** actual data */
+    uint32_t bytes_used;
+    uint32_t plane_size;
+    void *user_ptr;
+} hailo_pix_buffer_plane_t;
+
+/** image buffer */
+typedef struct {
+    uint32_t index;
+    hailo_pix_buffer_plane_t planes[MAX_NUMBER_OF_PLANES];
+    uint32_t number_of_planes;
+} hailo_pix_buffer_t;
+
 typedef struct {
     uint32_t class_group_index;
     char original_name[HAILO_MAX_STREAM_NAME_SIZE];
 } hailo_nms_defuse_info_t;
 
 typedef enum {
-    HAILO_BURST_TYPE_NO_BURST       = 0,
-    HAILO_BURST_TYPE_H8_PER_CLASS   = 1,
-    HAILO_BURST_TYPE_H15_PER_CLASS  = 2,
-    HAILO_BURST_TYPE_H15_PER_FRAME  = 3
+    HAILO_BURST_TYPE_H8_BBOX = 0,
+    HAILO_BURST_TYPE_H15_BBOX,
+    HAILO_BURST_TYPE_H8_PER_CLASS,
+    HAILO_BURST_TYPE_H15_PER_CLASS,
+    HAILO_BURST_TYPE_H15_PER_FRAME,
+
+    HAILO_BURST_TYPE_COUNT
 } hailo_nms_burst_type_t;
 
 /** NMS Internal HW Info */
@@ -1240,6 +1289,8 @@ typedef struct {
     uint32_t number_of_classes;
     /** Maximum amount of bboxes per nms class */
     uint32_t max_bboxes_per_class;
+    /** Maximum mask size */
+    uint32_t max_mask_size;
 } hailo_nms_shape_t;
 
 #pragma pack(push, 1)
@@ -1258,6 +1309,17 @@ typedef struct {
     float32_t x_max;
     float32_t score;
 } hailo_bbox_float32_t;
+
+typedef struct {
+    hailo_bbox_float32_t bbox;
+
+    /** Mask size in bytes */
+    uint32_t mask_size;
+
+    /** Mask */
+    // TODO: HRT-11413 - Add documentation on byte mask
+    uint8_t *mask;
+} hailo_bbox_with_byte_mask_t;
 #pragma pack(pop)
 
 /**
@@ -1310,8 +1372,8 @@ typedef struct {
     /** User specific data. Can be used as a context for the callback. */
     void *opaque;
 } hailo_stream_read_async_completion_info_t;
-
 /**
+
  * Async stream read complete callback prototype.
  */
 typedef void (*hailo_stream_read_async_callback_t)(const hailo_stream_read_async_completion_info_t *info);
@@ -1323,7 +1385,7 @@ typedef void (*hailo_stream_read_async_callback_t)(const hailo_stream_read_async
 typedef struct {
     /* Union to contain shapes and nms parameters - they cannot exist at the same time */
     union
-    { 
+    {
         struct
         {
             hailo_3d_image_shape_t shape;
@@ -1462,6 +1524,8 @@ typedef enum {
     HAILO_NOTIFICATION_ID_HEALTH_MONITOR_CLOCK_CHANGED_EVENT,
     /** Matches hailo_notification_message_parameters_t::hailo_hw_infer_manager_infer_done_notification */
     HAILO_NOTIFICATION_ID_HW_INFER_MANAGER_INFER_DONE,
+    /** Matches hailo_notification_message_parameters_t::context_switch_run_time_error */
+    HAILO_NOTIFICATION_ID_CONTEXT_SWITCH_RUN_TIME_ERROR_EVENT,
 
     /** Must be last! */
     HAILO_NOTIFICATION_ID_COUNT,
@@ -1551,6 +1615,14 @@ typedef struct {
     uint32_t infer_cycles;
 } hailo_hw_infer_manager_infer_done_notification_message_t;
 
+typedef struct {
+    uint32_t exit_status;
+    uint8_t network_group_index;
+    uint16_t batch_index;
+    uint8_t context_index;
+    uint16_t action_index;
+} hailo_context_switch_run_time_error_message_t;
+
 /** Union of all notification messages parameters. See ::hailo_notification_t */
 typedef union {
     /** Ethernet rx error */
@@ -1573,6 +1645,8 @@ typedef union {
     hailo_health_monitor_clock_changed_notification_message_t health_monitor_clock_changed_notification;
     /* HW infer manager finished infer notification */
     hailo_hw_infer_manager_infer_done_notification_message_t hw_infer_manager_infer_done_notification;
+    /** context switch run time error event */
+    hailo_context_switch_run_time_error_message_t context_switch_run_time_error;
 } hailo_notification_message_parameters_t;
 
 /** Notification data that will be passed to the callback passed in ::hailo_notification_callback */
@@ -2011,7 +2085,7 @@ HAILORTAPI hailo_status hailo_get_previous_system_state(hailo_device device, hai
  * Enable/Disable Pause frames.
  *
  * @param[in] device                  A ::hailo_device object.
- * @param[in] rx_pause_frames_enable  Indicating weather to enable or disable pause frames.
+ * @param[in] rx_pause_frames_enable  Indicating whether to enable or disable pause frames.
  * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns an ::hailo_status error.
  */
 HAILORTAPI hailo_status hailo_set_pause_frames(hailo_device device, bool rx_pause_frames_enable);
@@ -2048,7 +2122,7 @@ HAILORTAPI hailo_status hailo_reset_device(hailo_device device, hailo_reset_devi
 /**
  * Updates firmware to device flash.
  * 
- * @param[in]  device                 A ::hailo_output_stream object.
+ * @param[in]  device                 A ::hailo_device object.
  * @param[in]  firmware_buffer        A pointer to a buffer that contains the firmware to be updated on the @a device.
  * @param[in]  firmware_buffer_size   The size in bytes of the buffer pointed by @a firmware_buffer.
  * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
@@ -2060,7 +2134,7 @@ HAILORTAPI hailo_status hailo_update_firmware(hailo_device device, void *firmwar
 /**
  * Updates second stage to device flash.
  * 
- * @param[in]  device                 A ::hailo_output_stream object.
+ * @param[in]  device                 A ::hailo_device object.
  * @param[in]  second_stage_buffer        A pointer to a buffer that contains the second_stage to be updated on the @a device.
  * @param[in]  second_stage_buffer_size   The size in bytes of the buffer pointed by @a second_stage_buffer.
  * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
@@ -2225,7 +2299,7 @@ HAILORTAPI hailo_status hailo_create_vdevice(hailo_vdevice_params_t *params, hai
  *
  * @param[in]  vdevice                     A ::hailo_vdevice object to be configured.
  * @param[in]  hef                         A ::hailo_hef object to configure the @a vdevice by.
- * @param[in]  params                      A @a hailo_configure_params_t (may be NULL). Can be initialzed to default values using ::hailo_init_configure_params.
+ * @param[in]  params                      A @a hailo_configure_params_t (may be NULL). Can be initialzed to default values using ::hailo_init_configure_params_by_vdevice.
  * @param[out] network_groups              Array of network_groups that were loaded from the HEF file.
  * @param[inout] number_of_network_groups  As input - the size of network_groups array. As output - the number of network_groups loaded.
  * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
@@ -2433,7 +2507,7 @@ HAILORTAPI hailo_status hailo_hef_get_stream_info_by_name(hailo_hef hef, const c
  * @param[out] vstream_infos             A pointer to a buffer of ::hailo_stream_info_t that receives the informations.
  * @param[inout] vstream_infos_count     As input - the maximum amount of entries in @a vstream_infos array.
  *                                       As output - the actual amount of entries written if the function returns with ::HAILO_SUCCESS
- *                                       and the amount of entries needed if the function returns ::HAILO_INSUFFICIENT_BUFFER.
+ *                                       or the amount of entries needed if the function returns ::HAILO_INSUFFICIENT_BUFFER.
  * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
  */
 HAILORTAPI hailo_status hailo_hef_get_all_vstream_infos(hailo_hef hef, const char *name,
@@ -2631,7 +2705,7 @@ HAILORTAPI hailo_status hailo_init_configure_network_group_params_mipi_input(hai
  *
  * @param[in]  device                      A ::hailo_device object to be configured.
  * @param[in]  hef                         A ::hailo_hef object to configure the @a device by.
- * @param[in]  params                      A @a hailo_configure_params_t (may be NULL). Can be initialzed to default values using ::hailo_init_configure_params.
+ * @param[in]  params                      A @a hailo_configure_params_t (may be NULL). Can be initialzed to default values using ::hailo_init_configure_params_by_device.
  * @param[out] network_groups              Array of network_groups that were loaded from the HEF file.
  * @param[inout] number_of_network_groups  As input - the size of network_groups array. As output - the number of network_groups loaded.
  * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
@@ -2896,6 +2970,34 @@ HAILORTAPI hailo_status hailo_get_input_stream_info(hailo_input_stream stream, h
  */
 HAILORTAPI hailo_status hailo_get_output_stream_info(hailo_output_stream stream, hailo_stream_info_t *stream_info);
 
+/**
+ * Gets quant infos for a given input stream.
+ * 
+ * @param[in]     stream        A ::hailo_input_stream object.
+ * @param[out]    quant_infos   A pointer to a buffer of @a hailo_quant_info_t that will be filled with quant infos.
+ * @param[inout]  quant_infos_count   As input - the maximum amount of entries in @a quant_infos array.
+ *                                    As output - the actual amount of entries written if the function returns with ::HAILO_SUCCESS
+ *                                    or the amount of entries needed if the function returns ::HAILO_INSUFFICIENT_BUFFER.
+ * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a hailo_status error.
+ * @note In case a single qp is present - the returned list will be of size 1.
+ *       Otherwise - the returned list will be of the same length as the number of the frame's features.
+ */
+HAILORTAPI hailo_status hailo_get_input_stream_quant_infos(hailo_input_stream stream, hailo_quant_info_t *quant_infos, size_t *quant_infos_count);
+
+/**
+ * Gets quant infos for a given output stream.
+ * 
+ * @param[in]     stream       A ::hailo_output_stream object.
+ * @param[out]    quant_infos  A pointer to a buffer of @a hailo_quant_info_t that will be filled with quant infos.
+ * @param[inout]  quant_infos_count   As input - the maximum amount of entries in @a quant_infos array.
+ *                                    As output - the actual amount of entries written if the function returns with ::HAILO_SUCCESS
+ *                                    or the amount of entries needed if the function returns ::HAILO_INSUFFICIENT_BUFFER.
+ * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a hailo_status error.
+ * @note In case a single qp is present - the returned list will be of size 1.
+ *       Otherwise - the returned list will be of the same length as the number of the frame's features.
+ */
+HAILORTAPI hailo_status hailo_get_output_stream_quant_infos(hailo_output_stream stream, hailo_quant_info_t *quant_infos, size_t *quant_infos_count);
+
 /**
  * Synchronously reads data from a stream.
  *
@@ -3082,7 +3184,6 @@ HAILORTAPI size_t hailo_get_host_frame_size(const hailo_stream_info_t *stream_in
  * @param[out]    transform_context - A ::hailo_input_transform_context
  * 
  * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a hailo_status error.
- * 
  * @note To release the transform_context, call the ::hailo_release_input_transform_context function
  *      with the returned ::hailo_input_transform_context.
  * 
@@ -3090,6 +3191,21 @@ HAILORTAPI size_t hailo_get_host_frame_size(const hailo_stream_info_t *stream_in
 HAILORTAPI hailo_status hailo_create_input_transform_context(const hailo_stream_info_t *stream_info,
     const hailo_transform_params_t *transform_params, hailo_input_transform_context *transform_context);
 
+/**
+ * Creates an input transform_context object. Allocates all necessary buffers used for the transformation (pre-process).
+ *
+ * @param[in]     stream              A ::hailo_input_stream object
+ * @param[in]     transform_params    A ::hailo_transform_params_t user transformation parameters.
+ * @param[out]    transform_context   A ::hailo_input_transform_context
+ *
+ * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a hailo_status error.
+ * @note To release the transform_context, call the ::hailo_release_input_transform_context function
+ *      with the returned ::hailo_input_transform_context.
+ *
+ */
+HAILORTAPI hailo_status hailo_create_input_transform_context_by_stream(hailo_input_stream stream,
+    const hailo_transform_params_t *transform_params, hailo_input_transform_context *transform_context);
+
 /**
  * Releases a transform_context object including all allocated buffers.
  * 
@@ -3100,21 +3216,23 @@ HAILORTAPI hailo_status hailo_create_input_transform_context(const hailo_stream_
 HAILORTAPI hailo_status hailo_release_input_transform_context(hailo_input_transform_context transform_context);
 
 /**
- * Check whether or not a transformation is needed.
+ * Check whether or not a transformation is needed - for quant_info per feature case.
  *
  * @param[in]  src_image_shape         The shape of the src buffer (host shape).
  * @param[in]  src_format              The format of the src buffer (host format).
  * @param[in]  dst_image_shape         The shape of the dst buffer (hw shape).
  * @param[in]  dst_format              The format of the dst buffer (hw format).
- * @param[in]  quant_info              A ::hailo_quant_info_t object containing quantization information.
+ * @param[in]  quant_infos             A pointer to an array of ::hailo_quant_info_t object containing quantization information.
+ * @param[in]  quant_infos_count       The number of ::hailo_quant_info_t elements pointed by quant_infos.
+ *                                     quant_infos_count should be equals to either 1, or src_image_shape.features
  * @param[out] transformation_required Indicates whether or not a transformation is needed.
  * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a hailo_status error.
  * @note In case @a transformation_required is false, the src frame is ready to be sent to HW without any transformation.
  */
-HAILORTAPI hailo_status hailo_is_input_transformation_required(
+HAILORTAPI hailo_status hailo_is_input_transformation_required2(
     const hailo_3d_image_shape_t *src_image_shape, const hailo_format_t *src_format,
     const hailo_3d_image_shape_t *dst_image_shape, const hailo_format_t *dst_format,
-    const hailo_quant_info_t *quant_info, bool *transformation_required);
+    const hailo_quant_info_t *quant_infos, size_t quant_infos_count, bool *transformation_required);
 
 /**
  * Transforms an input frame pointed to by @a src directly to the buffer pointed to by @a dst.
@@ -3132,6 +3250,25 @@ HAILORTAPI hailo_status hailo_is_input_transformation_required(
 HAILORTAPI hailo_status hailo_transform_frame_by_input_transform_context(hailo_input_transform_context transform_context,
     const void *src, size_t src_size, void *dst, size_t dst_size);
 
+/**
+ * Check whether or not a transformation is needed - for quant_info per feature case.
+ *
+ * @param[in]  src_image_shape         The shape of the src buffer (hw shape).
+ * @param[in]  src_format              The format of the src buffer (hw format).
+ * @param[in]  dst_image_shape         The shape of the dst buffer (host shape).
+ * @param[in]  dst_format              The format of the dst buffer (host format).
+ * @param[in]  quant_infos             A pointer to an array of ::hailo_quant_info_t object containing quantization information.
+ * @param[in]  quant_infos_count       The number of ::hailo_quant_info_t elements pointed by quant_infos.
+ *                                     quant_infos_count should be equals to either 1, or dst_image_shape.features.
+ * @param[out] transformation_required Indicates whether or not a transformation is needed.
+ * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a hailo_status error.
+ * @note In case @a transformation_required is false, the src frame is already in the required format without any transformation.
+ */
+HAILORTAPI hailo_status hailo_is_output_transformation_required2(
+    const hailo_3d_image_shape_t *src_image_shape, const hailo_format_t *src_format,
+    const hailo_3d_image_shape_t *dst_image_shape, const hailo_format_t *dst_format,
+    const hailo_quant_info_t *quant_infos, size_t quant_infos_count, bool *transformation_required);
+
 /**
  * Creates an output transform_context object. Allocates all necessary buffers used for the transformation (post-process).
  * 
@@ -3140,39 +3277,34 @@ HAILORTAPI hailo_status hailo_transform_frame_by_input_transform_context(hailo_i
  * @param[out]    transform_context - A ::hailo_output_transform_context
  * 
  * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a hailo_status error.
- * 
  * @note To release the transform_context, call the ::hailo_release_output_transform_context function
  *      with the returned ::hailo_output_transform_context.
- * 
  */
 HAILORTAPI hailo_status hailo_create_output_transform_context(const hailo_stream_info_t *stream_info,
     const hailo_transform_params_t *transform_params, hailo_output_transform_context *transform_context);
 
 /**
- * Releases a transform_context object including all allocated buffers.
+ * Creates an output transform_context object. Allocates all necessary buffers used for the transformation (post-process).
  * 
- * @param[in]    transform_context - A ::hailo_output_transform_context object.
+ * @param[in]     stream              A ::hailo_output_stream object
+ * @param[in]     transform_params    A ::hailo_transform_params_t user transformation parameters.
+ * @param[out]    transform_context   A ::hailo_output_transform_context
  * 
  * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a hailo_status error.
+ * @note To release the transform_context, call the ::hailo_release_output_transform_context function
+ *      with the returned ::hailo_output_transform_context.
  */
-HAILORTAPI hailo_status hailo_release_output_transform_context(hailo_output_transform_context transform_context);
+HAILORTAPI hailo_status hailo_create_output_transform_context_by_stream(hailo_output_stream stream,
+     const hailo_transform_params_t *transform_params, hailo_output_transform_context *transform_context);
 
 /**
- * Check whether or not a transformation is needed.
- *
- * @param[in]  src_image_shape         The shape of the src buffer (hw shape).
- * @param[in]  src_format              The format of the src buffer (hw format).
- * @param[in]  dst_image_shape         The shape of the dst buffer (host shape).
- * @param[in]  dst_format              The format of the dst buffer (host format).
- * @param[in]  quant_info              A ::hailo_quant_info_t object containing quantization information.
- * @param[out] transformation_required Indicates whether or not a transformation is needed.
+ * Releases a transform_context object including all allocated buffers.
+ * 
+ * @param[in]    transform_context - A ::hailo_output_transform_context object.
+ * 
  * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a hailo_status error.
- * @note In case @a transformation_required is false, the src frame is already in the required format without any transformation.
  */
-HAILORTAPI hailo_status hailo_is_output_transformation_required(
-    const hailo_3d_image_shape_t *src_image_shape, const hailo_format_t *src_format,
-    const hailo_3d_image_shape_t *dst_image_shape, const hailo_format_t *dst_format,
-    const hailo_quant_info_t *quant_info, bool *transformation_required);
+HAILORTAPI hailo_status hailo_release_output_transform_context(hailo_output_transform_context transform_context);
 
 /**
  * Transforms an output frame pointed to by @a src directly to the buffer pointed to by @a dst.
@@ -3190,6 +3322,19 @@ HAILORTAPI hailo_status hailo_is_output_transformation_required(
 HAILORTAPI hailo_status hailo_transform_frame_by_output_transform_context(hailo_output_transform_context transform_context,
     const void *src, size_t src_size, void *dst, size_t dst_size);
 
+/**
+ * Returns whether or not qp is valid
+ *
+ * @param[in]     quant_info      A ::hailo_quant_info_t object.
+ * @param[out]    is_qp_valid     Indicates whether or not qp is valid.
+ *
+ * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a hailo_status error.
+ * @note QP will be invalid in case HEF file was compiled with multiple QPs, and then the user will try working with API for single QP.
+ *       For example - if HEF was compiled with multiple QPs and then the user calls hailo_get_input_stream_info,
+ *       The ::hailo_quant_info_t object of the ::hailo_stream_info_t object will be invalid.
+ */
+HAILORTAPI hailo_status hailo_is_qp_valid(const hailo_quant_info_t quant_info, bool *is_qp_valid);
+
 /**
  * Creates an demuxer for the given mux stream. Allocates all necessary buffers
  * used for the demux process.
@@ -3295,14 +3440,15 @@ HAILORTAPI hailo_status hailo_fuse_nms_frames(const hailo_nms_fuse_input_t *nms_
  *                                      the function returns input virtual stream params of the given network.
  *                                      If NULL is passed, the function returns the input virtual stream params of 
  *                                      all the networks of the first network group.
- * @param[in] quantized                 Whether the data fed into the chip is already quantized. True means
- *                                      the data is already quantized. False means it's HailoRT's responsibility
- *                                      to quantize (scale) the data.
+ * @param[in] quantized                 Deprecated parameter that will be ignored. Determine whether to quantize (scale)
+ *                                      the data will be decided by the src-data and dst-data types.
  * @param[in] format_type               The default format type for all input virtual streams.
  * @param[out] input_params             List of params for input virtual streams.
  * @param[inout] input_params_count     On input: Amount of @a input_params array.
  *                                      On output: Will be filled with the detected amount of input vstreams on the @a network or @a network_group.
  * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
+ * @note The argument @a quantized is deprecated and its usage is ignored. Determine whether to quantize (scale) the data will be decided by
+ *       the src-data and dst-data types.
  */
 HAILORTAPI hailo_status hailo_hef_make_input_vstream_params(hailo_hef hef, const char *name, 
     bool quantized, hailo_format_type_t format_type, 
@@ -3319,14 +3465,15 @@ HAILORTAPI hailo_status hailo_hef_make_input_vstream_params(hailo_hef hef, const
  *                                      the function returns output virtual stream params of the given network.
  *                                      If NULL is passed, the function returns the output virtual stream params of 
  *                                      all the networks of the first network group.
- * @param[in] quantized                 Whether the data returned from the device should be quantized. True
- *                                      means that the data returned to the user is still quantized. False
- *                                      means it's HailoRT's responsibility to de-quantize (rescale) the data.
+ * @param[in] quantized                 Deprecated parameter that will be ignored. Determine whether to de-quantize (rescale)
+ *                                      the data will be decided by the src-data and dst-data types.
  * @param[in] format_type               The default format type for all output virtual streams.
  * @param[out] output_params            List of params for output virtual streams.
  * @param[inout] output_params_count    On input: Amount of @a output_params array.
  *                                      On output: Will be filled with the detected amount of output vstreams on the @a network or @a network_group.
  * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
+ * @note The argument @a quantized is deprecated and its usage is ignored. Determine whether to de-quantize (rescale) the data will be decided by
+ *       the src-data and dst-data types.
  */
 HAILORTAPI hailo_status hailo_hef_make_output_vstream_params(hailo_hef hef, const char *name, 
     bool quantized, hailo_format_type_t format_type, 
@@ -3336,14 +3483,15 @@ HAILORTAPI hailo_status hailo_hef_make_output_vstream_params(hailo_hef hef, cons
  * Creates input virtual stream params for a given network_group.
  *
  * @param[in]  network_group            Network group that owns the streams.
- * @param[in]  quantized                Whether the data fed into the chip is already quantized. True means
- *                                      the data is already quantized. False means it's HailoRT's responsibility
- *                                      to quantize (scale) the data.
+ * @param[in]  quantized                Deprecated parameter that will be ignored. Determine whether to quantize (scale)
+ *                                      the data will be decided by the src-data and dst-data types.
  * @param[in]  format_type              The default format type for all input virtual streams.
  * @param[out] input_params             List of params for input virtual streams.
  * @param[inout] input_params_count     On input: Amount of @a input_params array.
  *                                      On output: Will be filled with the detected amount of input vstreams on the @a network_group.
  * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
+ * @note The argument @a quantized is deprecated and its usage is ignored. Determine whether to quantize (scale) the data will be decided by
+ *       the src-data and dst-data types.
  */
 HAILORTAPI hailo_status hailo_make_input_vstream_params(hailo_configured_network_group network_group, bool quantized,
     hailo_format_type_t format_type, hailo_input_vstream_params_by_name_t *input_params, size_t *input_params_count);
@@ -3352,14 +3500,15 @@ HAILORTAPI hailo_status hailo_make_input_vstream_params(hailo_configured_network
  * Creates output virtual stream params for given network_group.
  *
  * @param[in]  network_group            Network group that owns the streams.
- * @param[in]  quantized                Whether the data returned from the device should be quantized. True
- *                                      means that the data returned to the user is still quantized. False
- *                                      means it's HailoRT's responsibility to de-quantize (rescale) the data.
+ * @param[in]  quantized                Deprecated parameter that will be ignored. Determine whether to de-quantize (rescale)
+ *                                      the data will be decided by the src-data and dst-data types.
  * @param[in]  format_type              The default format type for all output virtual streams.
  * @param[out] output_params            List of params for output virtual streams.
  * @param[inout] output_params_count    On input: Amount of @a output_params array.
  *                                      On output: Will be filled with the detected amount of output vstreams on the @a network_group.
  * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
+ * @note The argument @a quantized is deprecated and its usage is ignored. Determine whether to de-quantize (rescale) the data will be decided by
+ *       the src-data and dst-data types.
  */
 HAILORTAPI hailo_status hailo_make_output_vstream_params(hailo_configured_network_group network_group, bool quantized,
     hailo_format_type_t format_type, hailo_output_vstream_params_by_name_t *output_params,
@@ -3432,6 +3581,34 @@ HAILORTAPI hailo_status hailo_get_input_vstream_info(hailo_input_vstream input_v
  */
 HAILORTAPI hailo_status hailo_get_input_vstream_user_format(hailo_input_vstream input_vstream, hailo_format_t *user_buffer_format);
 
+/**
+ * Gets quant infos for a given input vstream.
+ *
+ * @param[in]     vstream      A ::hailo_input_vstream object.
+ * @param[out]    quant_infos  A pointer to a buffer of @a hailo_quant_info_t that will be filled with quant infos.
+ * @param[inout]  quant_infos_count   As input - the maximum amount of entries in @a quant_infos array.
+ *                                    As output - the actual amount of entries written if the function returns with ::HAILO_SUCCESS
+ *                                    or the amount of entries needed if the function returns ::HAILO_INSUFFICIENT_BUFFER.
+ * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a hailo_status error.
+ * @note In case a single qp is present - the returned list will be of size 1.
+ *       Otherwise - the returned list will be of the same length as the number of the frame's features.
+ */
+HAILORTAPI hailo_status hailo_get_input_vstream_quant_infos(hailo_input_vstream vstream, hailo_quant_info_t *quant_infos, size_t *quant_infos_count);
+
+/**
+ * Gets quant infos for a given output vstream.
+ *
+ * @param[in]     vstream      A ::hailo_output_vstream object.
+ * @param[out]    quant_infos  A pointer to a buffer of @a hailo_quant_info_t that will be filled with quant infos.
+ * @param[inout]  quant_infos_count   As input - the maximum amount of entries in @a quant_infos array.
+ *                                    As output - the actual amount of entries written if the function returns with ::HAILO_SUCCESS
+ *                                    or the amount of entries needed if the function returns ::HAILO_INSUFFICIENT_BUFFER.
+ * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a hailo_status error.
+ * @note In case a single qp is present - the returned list will be of size 1.
+ *       Otherwise - the returned list will be of the same length as the number of the frame's features.
+ */
+HAILORTAPI hailo_status hailo_get_output_vstream_quant_infos(hailo_output_vstream vstream, hailo_quant_info_t *quant_infos, size_t *quant_infos_count);
+
 /**
  * Gets the size of a virtual stream's frame on the host side in bytes
  * (the size could be affected by the format type - for example using UINT16, or by the data not being quantized yet)
@@ -3480,11 +3657,22 @@ HAILORTAPI hailo_status hailo_get_vstream_frame_size(hailo_vstream_info_t *vstre
  *                             \e shape inside ::hailo_vstream_info_t (Can be obtained using ::hailo_get_input_vstream_info).
  * @param[in] buffer_size      @a buffer buffer size in bytes. The size is expected to be the size returned from
  *                             ::hailo_get_input_vstream_frame_size.
- * 
+ *
  * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
  */
 HAILORTAPI hailo_status hailo_vstream_write_raw_buffer(hailo_input_vstream input_vstream, const void *buffer, size_t buffer_size);
 
+/**
+ * Writes thte buffer to hailo device via input virtual stream @a input_vstream.
+ *
+ * @param[in] input_vstream    A ::hailo_input_vstream object.
+ * @param[in] buffer           A pointer to the buffer containing
+ *                             pointers to the planes to where the data to
+ *                             be sent to the device is stored.
+ * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
+ */
+HAILORTAPI hailo_status hailo_vstream_write_pix_buffer(hailo_input_vstream input_vstream, const hailo_pix_buffer_t *buffer);
+
 /**
  * Blocks until the pipeline buffers of @a input_vstream are flushed.
  * 
@@ -3508,6 +3696,40 @@ HAILORTAPI hailo_status hailo_flush_input_vstream(hailo_input_vstream input_vstr
  */
 HAILORTAPI hailo_status hailo_vstream_read_raw_buffer(hailo_output_vstream output_vstream, void *buffer, size_t buffer_size);
 
+/**
+ * Set NMS score threshold, used for filtering out candidates. Any box with score<TH is suppressed.
+ *
+ * @param[in] output_vstream   A ::hailo_output_vstream object.
+ * @param[in] threshold        NMS score threshold to set.
+ *
+ * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
+ * @note This function will fail in cases where the output vstream has no NMS operations on the CPU.
+ */
+HAILORTAPI hailo_status hailo_vstream_set_nms_score_threshold(hailo_output_vstream output_vstream, float32_t threshold);
+
+/**
+ * Set NMS intersection over union overlap Threshold,
+ * used in the NMS iterative elimination process where potential duplicates of detected items are suppressed.
+ *
+ * @param[in] output_vstream   A ::hailo_output_vstream object.
+ * @param[in] threshold        NMS IoU threshold to set.
+ *
+ * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
+ * @note This function will fail in cases where the output vstream has no NMS operations on the CPU.
+ */
+HAILORTAPI hailo_status hailo_vstream_set_nms_iou_threshold(hailo_output_vstream output_vstream, float32_t threshold);
+
+/**
+ * Set a limit for the maximum number of boxes per class.
+ *
+ * @param[in] output_vstream             A ::hailo_output_vstream object.
+ * @param[in] max_proposals_per_class    NMS max proposals per class to set.
+ *
+ * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
+ * @note This function will fail in cases where the output vstream has no NMS operations on the CPU.
+ */
+HAILORTAPI hailo_status hailo_vstream_set_nms_max_proposals_per_class(hailo_output_vstream output_vstream, uint32_t max_proposals_per_class);
+
 /**
  * Release input virtual streams.
  * 
@@ -3583,7 +3805,7 @@ HAILORTAPI hailo_status hailo_infer(hailo_configured_network_group configured_ne
  * @param[out] networks_infos         A pointer to a buffer of ::hailo_network_info_t that receives the informations.
  * @param[inout] number_of_networks   As input - the maximum amount of entries in @a hailo_network_info_t array.
  *                                    As output - the actual amount of entries written if the function returns with ::HAILO_SUCCESS
- *                                    and the amount of entries needed if the function returns ::HAILO_INSUFFICIENT_BUFFER.
+ *                                    or the amount of entries needed if the function returns ::HAILO_INSUFFICIENT_BUFFER.
  * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, if the buffer is
  *                                    insufficient to hold the information a ::HAILO_INSUFFICIENT_BUFFER would be
  *                                    returned. In any other case, returns a ::hailo_status error.
@@ -3598,7 +3820,7 @@ HAILORTAPI hailo_status hailo_hef_get_network_infos(hailo_hef hef, const char *n
  * @param[out] networks_infos         A pointer to a buffer of ::hailo_network_info_t that receives the informations.
  * @param[inout] number_of_networks   As input - the maximum amount of entries in @a hailo_network_info_t array.
  *                                    As output - the actual amount of entries written if the function returns with ::HAILO_SUCCESS
- *                                    and the amount of entries needed if the function returns ::HAILO_INSUFFICIENT_BUFFER.
+ *                                    or the amount of entries needed if the function returns ::HAILO_INSUFFICIENT_BUFFER.
  * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, if the buffer is
  *                                    insufficient to hold the information a ::HAILO_INSUFFICIENT_BUFFER would be
  *                                    returned. In any other case, returns a ::hailo_status error.
@@ -3638,6 +3860,42 @@ HAILORTAPI hailo_status hailo_set_sleep_state(hailo_device device, hailo_sleep_s
  *  @{
  */
 
+/**
+ * Check whether or not a transformation is needed.
+ *
+ * @param[in]  src_image_shape         The shape of the src buffer (host shape).
+ * @param[in]  src_format              The format of the src buffer (host format).
+ * @param[in]  dst_image_shape         The shape of the dst buffer (hw shape).
+ * @param[in]  dst_format              The format of the dst buffer (hw format).
+ * @param[in]  quant_info              A ::hailo_quant_info_t object containing quantization information.
+ * @param[out] transformation_required Indicates whether or not a transformation is needed.
+ * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a hailo_status error.
+ * @note In case @a transformation_required is false, the src frame is ready to be sent to HW without any transformation.
+ */
+HAILORTAPI hailo_status hailo_is_input_transformation_required(
+    const hailo_3d_image_shape_t *src_image_shape, const hailo_format_t *src_format,
+    const hailo_3d_image_shape_t *dst_image_shape, const hailo_format_t *dst_format,
+    const hailo_quant_info_t *quant_info, bool *transformation_required)
+    DEPRECATED("hailo_is_input_transformation_required is deprecated. Please use hailo_is_input_transformation_required2 instead.");
+
+/**
+ * Check whether or not a transformation is needed.
+ *
+ * @param[in]  src_image_shape         The shape of the src buffer (hw shape).
+ * @param[in]  src_format              The format of the src buffer (hw format).
+ * @param[in]  dst_image_shape         The shape of the dst buffer (host shape).
+ * @param[in]  dst_format              The format of the dst buffer (host format).
+ * @param[in]  quant_info              A ::hailo_quant_info_t object containing quantization information.
+ * @param[out] transformation_required Indicates whether or not a transformation is needed.
+ * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a hailo_status error.
+ * @note In case @a transformation_required is false, the src frame is already in the required format without any transformation.
+ */
+HAILORTAPI hailo_status hailo_is_output_transformation_required(
+    const hailo_3d_image_shape_t *src_image_shape, const hailo_format_t *src_format,
+    const hailo_3d_image_shape_t *dst_image_shape, const hailo_format_t *dst_format,
+    const hailo_quant_info_t *quant_info, bool *transformation_required)
+    DEPRECATED("hailo_is_output_transformation_required is deprecated. Please use hailo_is_output_transformation_required2 instead.");
+
 /** @} */ // end of group_deprecated_functions_and_defines
 
 
index 7c503a14d4e3cbed599277d429c3555e85f53d78..1c85ac028507e5e9762922566da515eef681a2c8 100644 (file)
@@ -21,6 +21,7 @@
 #include "hailo/stream.hpp"
 #include "hailo/vstream.hpp"
 #include "hailo/inference_pipeline.hpp"
+#include "hailo/infer_model.hpp"
 #include "hailo/transform.hpp"
 #include "hailo/expected.hpp"
 #include "hailo/buffer.hpp"
index 996c6abf6e16e0981c3650c414baacdc79ea9a21..00259a7bb880d081c5df1a3e7433ea6e6566198b 100644 (file)
@@ -34,12 +34,14 @@ public:
     static_assert(sizeof(hailo_bbox_t) / sizeof(uint16_t) == sizeof(hailo_bbox_float32_t) / sizeof(float32_t),
         "Mismatch bbox params size");
     static const uint32_t BBOX_PARAMS = sizeof(hailo_bbox_t) / sizeof(uint16_t);
+    static const uint32_t MASK_PARAMS = 1; // mask_size
     static const uint32_t MAX_DEFUSED_LAYER_COUNT = 9;
     static const size_t HW_DATA_ALIGNMENT = 8;
     static const uint32_t MUX_INFO_COUNT = 32;
     static const uint32_t MAX_MUX_PREDECESSORS = 4;
     static const uint16_t ETH_INPUT_BASE_PORT = 32401;
     static const uint16_t ETH_OUTPUT_BASE_PORT = 32501;
+    static const uint32_t MAX_NMS_BURST_SIZE = 65536;
 
     /**
      * Gets the NMS host shape size (number of elements) from NMS info.
@@ -153,6 +155,8 @@ public:
             return "UINT16";
         case HAILO_FORMAT_TYPE_FLOAT32:
             return "FLOAT32";
+        case HAILO_FORMAT_TYPE_AUTO:
+            return "AUTO";
         default:
             return "Nan";
         }
@@ -174,8 +178,10 @@ public:
             return "HAILO8";
         case HAILO_ARCH_HAILO8L:
             return "HAILO8L";
-        case HAILO_ARCH_HAILO15:
-            return "HAILO15";
+        case HAILO_ARCH_HAILO15H:
+            return "HAILO15H";
+        case HAILO_ARCH_PLUTO:
+            return "PLUTO";
         default:
             return "UNKNOWN ARCHITECTURE";
         }
@@ -229,6 +235,8 @@ public:
             return "I420";
         case HAILO_FORMAT_ORDER_HAILO_YYYYUV:
             return "YYYYUV";
+        case HAILO_FORMAT_ORDER_HAILO_NMS_WITH_BYTE_MASK:
+            return "HAILO NMS WITH METADATA";
         default:
             return "Nan";
         }
@@ -264,9 +272,26 @@ public:
      * @param[in] format            A ::hailo_format_t object.
      * @return The NMS host frame size in bytes.
      */
-    static constexpr uint32_t get_nms_host_frame_size(const hailo_nms_shape_t &nms_shape, const hailo_format_t &format)
+    static uint32_t get_nms_host_frame_size(const hailo_nms_shape_t &nms_shape, const hailo_format_t &format);
+
+    /**
+     * Gets HAILO_NMS_WITH_BYTE_MASK host shape size in bytes by nms_shape and buffer format.
+     *
+     * @param[in] nms_shape             The NMS shape to get size from.
+     * @param[in] format                A ::hailo_format_t object.
+     * @return The HAILO_NMS_WITH_BYTE_MASK host shape size.
+     */
+    static constexpr uint32_t get_nms_with_byte_mask_host_shape_size(const hailo_nms_shape_t &nms_shape, const hailo_format_t &format)
     {
-        return get_nms_host_shape_size(nms_shape) * get_format_data_bytes(format);
+        // Assuming 1 byte per pixel for the mask
+        auto bbox_size = BBOX_PARAMS + MASK_PARAMS + nms_shape.max_mask_size;
+        const uint32_t size_per_class = 1 + (bbox_size * nms_shape.max_bboxes_per_class);
+        double shape_size = size_per_class * nms_shape.number_of_classes;
+        if ((shape_size * get_format_data_bytes(format)) < UINT32_MAX) {
+            return static_cast<uint32_t>(shape_size);
+        } else {
+            return UINT32_MAX / get_format_data_bytes(format);
+        }
     }
 
     /**
@@ -315,7 +340,7 @@ public:
             trans_params.user_buffer_format.type = stream_info.format.type;
         }
 
-        if (HAILO_FORMAT_ORDER_HAILO_NMS == stream_info.format.order) {
+        if (HailoRTCommon::is_nms(stream_info)) {
             return get_nms_host_frame_size(stream_info.nms_info, trans_params.user_buffer_format);
         } else {
             auto shape = (HAILO_STREAM_NO_TRANSFORM == trans_params.transform_mode) ? stream_info.hw_shape :
@@ -331,14 +356,13 @@ public:
      * @param[in] format               A ::hailo_format_t object.
      * @return The frame's size in bytes.
      */
-    static constexpr uint32_t get_frame_size(const hailo_vstream_info_t &vstream_info,
-        hailo_format_t format)
+    static constexpr uint32_t get_frame_size(const hailo_vstream_info_t &vstream_info, hailo_format_t format)
     {
         if (HAILO_FORMAT_TYPE_AUTO == format.type) {
             format.type = vstream_info.format.type;
         }
 
-        if (HAILO_FORMAT_ORDER_HAILO_NMS == vstream_info.format.order) {
+        if (HailoRTCommon::is_nms(vstream_info)) {
             return get_nms_host_frame_size(vstream_info.nms_shape, format);
         } else {
             return get_frame_size(vstream_info.shape, format);
@@ -350,6 +374,21 @@ public:
         return (HAILO_STREAM_INTERFACE_PCIE == stream_interface) || (HAILO_STREAM_INTERFACE_INTEGRATED == stream_interface);
     }
 
+    static constexpr bool is_nms(const hailo_vstream_info_t &vstream_info)
+    {
+        return is_nms(vstream_info.format.order);
+    }
+
+    static constexpr bool is_nms(const hailo_stream_info_t &stream_info)
+    {
+        return is_nms(stream_info.format.order);
+    }
+
+    static constexpr bool is_nms(const hailo_format_order_t &order)
+    {
+        return ((HAILO_FORMAT_ORDER_HAILO_NMS == order) || (HAILO_FORMAT_ORDER_HAILO_NMS_WITH_BYTE_MASK == order));
+    }
+
     static Expected<hailo_device_id_t> to_device_id(const std::string &device_id);
     static Expected<std::vector<hailo_device_id_t>> to_device_ids_vector(const std::vector<std::string> &device_ids_str);
 };
@@ -424,6 +463,17 @@ inline constexpr hailo_pipeline_elem_stats_flags_t& operator|=(hailo_pipeline_el
     return a;
 }
 
+inline constexpr hailo_stream_flags_t operator|(hailo_stream_flags_t a, hailo_stream_flags_t b)
+{
+    return static_cast<hailo_stream_flags_t>(static_cast<int>(a) | static_cast<int>(b));
+}
+
+inline constexpr hailo_stream_flags_t& operator|=(hailo_stream_flags_t &a, hailo_stream_flags_t b)
+{
+    a = a | b;
+    return a;
+}
+
 inline bool is_bit_set(uint32_t num, uint8_t i)
 {
     return (1 == ((num >> i) & 1));
index 3a06ada87b3db758aef0fcb06a0958854078af6b..91278575abc1ad00ff2dea31f851d912ad602624 100644 (file)
@@ -398,14 +398,15 @@ public:
      *                              the function returns the input virtual stream params of the given network.
      *                              If NULL is passed, the function returns the input virtual stream params of 
      *                              all the networks of the first network group.
-     * @param[in] quantized         Whether the data fed into the chip is already quantized. True means
-     *                              the data is already quantized. False means it's HailoRT's responsibility
-     *                              to quantize (scale) the data.
+     * @param[in] quantized         Deprecated parameter that will be ignored. Determine whether to quantize (scale)
+     *                              the data will be decided by the src-data and dst-data types.
      * @param[in] format_type       The default format type for all input virtual streams.
      * @param[in] timeout_ms        The default timeout in milliseconds for all input virtual streams.
      * @param[in] queue_size        The default queue size for all input virtual streams.
      * @return Upon success, returns Expected of a map of input virtual stream name to params.
      *         Otherwise, returns Unexpected of ::hailo_status error.
+     * @note The argument @a quantized is deprecated and its usage is ignored. Determine whether to quantize (scale) the data will be decided by
+     *       the src-data and dst-data types.
      */
     Expected<std::map<std::string, hailo_vstream_params_t>> make_input_vstream_params(
         const std::string &name, bool quantized, hailo_format_type_t format_type,
@@ -421,14 +422,15 @@ public:
      *                              the function returns the output virtual stream params of the given network.
      *                              If NULL is passed, the function returns the output virtual stream params of
      *                              all the networks of the first network group.
-     * @param[in] quantized         Whether the data returned from the chip is already quantized. True means
-     *                              the data is already quantized. False means it's HailoRT's responsibility
-     *                              to quantize (scale) the data.
+     * @param[in] quantized         Deprecated parameter that will be ignored. Determine whether to de-quantize (rescale)
+     *                              the data will be decided by the src-data and dst-data types.
      * @param[in] format_type       The default format type for all output virtual streams.
      * @param[in] timeout_ms        The default timeout in milliseconds for all output virtual streams.
      * @param[in] queue_size        The default queue size for all output virtual streams.
      * @return Upon success, returns Expected of a map of output virtual stream name to params.
      *         Otherwise, returns Unexpected of ::hailo_status error.
+     * @note The argument @a quantized is deprecated and its usage is ignored. Determine whether to de-quantize (rescale) the data will be decided by
+     *       the src-data and dst-data types.
      */
     Expected<std::map<std::string, hailo_vstream_params_t>> make_output_vstream_params(
         const std::string &name, bool quantized, hailo_format_type_t format_type,
diff --git a/hailort/libhailort/include/hailo/infer_model.hpp b/hailort/libhailort/include/hailo/infer_model.hpp
new file mode 100644 (file)
index 0000000..4a20423
--- /dev/null
@@ -0,0 +1,164 @@
+/**
+ * Copyright (c) 2020-2023 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file infer_model.hpp
+ * @brief Async Infer
+ **/
+
+#ifndef _HAILO_ASYNC_INFER_HPP_
+#define _HAILO_ASYNC_INFER_HPP_
+
+#include "hailo/network_group.hpp"
+#include "hailo/hef.hpp"
+#include "hailo/vdevice.hpp"
+
+#include <condition_variable>
+#include <mutex>
+
+/** hailort namespace */
+namespace hailort
+{
+
+class ConfiguredInferModelImpl;
+class HAILORTAPI AsyncInferJob
+{
+public:
+    AsyncInferJob() = default;
+    virtual ~AsyncInferJob();
+
+    AsyncInferJob(const AsyncInferJob &other) = delete;
+    AsyncInferJob &operator=(const AsyncInferJob &other) = delete;
+    AsyncInferJob(AsyncInferJob &&other);
+    AsyncInferJob &operator=(AsyncInferJob &&other);
+
+    hailo_status wait(std::chrono::milliseconds timeout);
+    void detach();
+
+private:
+    friend class ConfiguredInferModelImpl;
+
+    class Impl;
+    AsyncInferJob(std::shared_ptr<Impl> pimpl);
+    std::shared_ptr<Impl> m_pimpl;
+    bool m_should_wait_in_dtor;
+};
+
+struct CompletionInfoAsyncInfer;
+class HAILORTAPI ConfiguredInferModel
+{
+public:
+    class HAILORTAPI Bindings
+    {
+    public:
+        class HAILORTAPI InferStream
+        {
+        public:
+            hailo_status set_buffer(MemoryView view);
+            MemoryView get_buffer();
+
+        private:
+            friend class ConfiguredInferModelImpl;
+
+            class Impl;
+            InferStream(std::shared_ptr<Impl> pimpl);
+            std::shared_ptr<Impl> m_pimpl;
+        };
+
+        Expected<InferStream> input();
+        Expected<InferStream> output();
+        Expected<InferStream> input(const std::string &name);
+        Expected<InferStream> output(const std::string &name);
+
+    private:
+        friend class ConfiguredInferModelImpl;
+
+        Bindings(std::unordered_map<std::string, InferStream> &&inputs,
+            std::unordered_map<std::string, InferStream> &&outputs);
+
+        std::unordered_map<std::string, Bindings::InferStream> m_inputs;
+        std::unordered_map<std::string, Bindings::InferStream> m_outputs;
+    };
+
+    Expected<Bindings> create_bindings();
+    hailo_status wait_for_async_ready(std::chrono::milliseconds timeout);
+    hailo_status activate();
+    void deactivate();
+    hailo_status run(Bindings bindings, std::chrono::milliseconds timeout);
+    Expected<AsyncInferJob> run_async(Bindings bindings,
+        std::function<void(const CompletionInfoAsyncInfer &)> callback = [] (const CompletionInfoAsyncInfer &) {});
+
+private:
+    friend class InferModel;
+
+    ConfiguredInferModel(std::shared_ptr<ConfiguredInferModelImpl> pimpl);
+
+    std::shared_ptr<ConfiguredInferModelImpl> m_pimpl;
+};
+
+struct HAILORTAPI CompletionInfoAsyncInfer
+{
+    CompletionInfoAsyncInfer(ConfiguredInferModel::Bindings _bindings, hailo_status _status) : bindings(_bindings), status(_status)
+    {
+    }
+
+    ConfiguredInferModel::Bindings bindings;
+    hailo_status status;
+};
+
+class HAILORTAPI InferModel final
+{
+public:
+    ~InferModel() = default;
+
+    class HAILORTAPI InferStream
+    {
+    public:
+        const std::string name() const;
+        size_t get_frame_size() const;
+        void set_format_type(hailo_format_type_t type);
+        void set_format_order(hailo_format_order_t order);
+
+    private:
+        friend class InferModel;
+        friend class VDeviceBase;
+
+        class Impl;
+        InferStream(std::shared_ptr<Impl> pimpl);
+        hailo_format_t get_user_buffer_format();
+
+        std::shared_ptr<Impl> m_pimpl;
+    };
+
+    Expected<ConfiguredInferModel> configure(const std::string &network_name = "");
+    Expected<InferStream> input();
+    Expected<InferStream> output();
+    Expected<InferStream> input(const std::string &name);
+    Expected<InferStream> output(const std::string &name);
+    const std::vector<InferStream> &inputs() const;
+    const std::vector<InferStream> &outputs() const;
+    const std::vector<std::string> &get_input_names() const;
+    const std::vector<std::string> &get_output_names() const;
+    
+    InferModel(InferModel &&);
+
+private:
+    friend class VDeviceBase;
+
+    InferModel(VDevice &vdevice, Hef &&hef, std::unordered_map<std::string, InferStream> &&inputs,
+        std::unordered_map<std::string, InferStream> &&outputs);
+
+    std::reference_wrapper<VDevice> m_vdevice;
+    Hef m_hef;
+    std::unordered_map<std::string, InferStream> m_inputs;
+    std::unordered_map<std::string, InferStream> m_outputs;
+    std::vector<InferStream> m_inputs_vector;
+    std::vector<InferStream> m_outputs_vector;
+    std::vector<std::string> m_input_names;
+    std::vector<std::string> m_output_names;
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_ASYNC_INFER_HPP_ */
index 201d6443a47503b710e2926e041df9cd5bf20f6d..a6811b26c596fa034c5cf727e1c565269baead59 100644 (file)
@@ -82,6 +82,34 @@ public:
      */
     std::vector<std::reference_wrapper<OutputVStream>> get_output_vstreams();
 
+    /**
+     * Set NMS score threshold, used for filtering out candidates. Any box with score<TH is suppressed.
+     *
+     * @param[in] threshold        NMS score threshold to set.
+     * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
+     * @note This function will fail in cases where there is no output with NMS operations on the CPU.
+     */
+    hailo_status set_nms_score_threshold(float32_t threshold);
+
+    /**
+     * Set NMS intersection over union overlap Threshold,
+     * used in the NMS iterative elimination process where potential duplicates of detected items are suppressed.
+     *
+     * @param[in] threshold        NMS IoU threshold to set.
+     * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
+     * @note This function will fail in cases where there is no output with NMS operations on the CPU.
+     */
+    hailo_status set_nms_iou_threshold(float32_t threshold);
+
+    /**
+     * Set a limit for the maximum number of boxes per class.
+     *
+     * @param[in] max_proposals_per_class    NMS max proposals per class to set.
+     * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
+     * @note This function will fail in cases where there is no output with NMS operations on the CPU.
+     */
+    hailo_status set_nms_max_proposals_per_class(uint32_t max_proposals_per_class);
+
     InferVStreams(const InferVStreams &other) = delete;
     InferVStreams &operator=(const InferVStreams &other) = delete;
     InferVStreams &operator=(InferVStreams &&other) = delete;
index 6b8d029a9f02451e7a2345a597344bb7caeea9bd..d80a062f0c70eb9b5bc94a10f8bb78a779aa4519 100644 (file)
@@ -75,8 +75,6 @@ public:
     virtual const std::string &get_network_group_name() const = 0;
 
     virtual Expected<Buffer> get_intermediate_buffer(const IntermediateBufferKey &key) = 0;
-    // TODO HRT-10799: remove when enable batch switch flow for hailo15
-    virtual hailo_status set_keep_nn_config_during_reset(const bool keep_nn_config_during_reset) = 0;
 
     /**
      * @return The number of invalid frames.
@@ -231,9 +229,8 @@ public:
     /**
      * Creates input virtual stream params.
      *
-     * @param[in]  quantized                Whether the data fed into the chip is already quantized. True means
-     *                                      the data is already quantized. False means it's HailoRT's responsibility
-     *                                      to quantize (scale) the data.
+     * @param[in] quantized                 Deprecated parameter that will be ignored. Determine whether to quantize (scale)
+     *                                      the data will be decided by the src-data and dst-data types.
      * @param[in]  format_type              The default format type for all input virtual streams.
      * @param[in]  timeout_ms               The default timeout in milliseconds for all input virtual streams.
      * @param[in]  queue_size               The default queue size for all input virtual streams.
@@ -241,6 +238,8 @@ public:
      *                                      If not passed, all the networks in the network group will be addressed.
      * @return Upon success, returns Expected of a map of name to vstream params.
      *         Otherwise, returns Unexpected of ::hailo_status error.
+     * @note The argument @a quantized is deprecated and its usage is ignored. Determine whether to quantize (scale) the data will be decided by
+     *       the src-data and dst-data types.
      */
     virtual Expected<std::map<std::string, hailo_vstream_params_t>> make_input_vstream_params(
         bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
@@ -249,9 +248,8 @@ public:
     /**
      * Creates output virtual stream params.
      *
-     * @param[in]  quantized                Whether the data returned from the chip is already quantized. True means
-     *                                      the data is already quantized. False means it's HailoRT's responsibility
-     *                                      to quantize (scale) the data.
+     * @param[in] quantized                 Deprecated parameter that will be ignored. Determine whether to de-quantize (rescale)
+     *                                      the data will be decided by the src-data and dst-data types.
      * @param[in]  format_type              The default format type for all output virtual streams.
      * @param[in]  timeout_ms               The default timeout in milliseconds for all output virtual streams.
      * @param[in]  queue_size               The default queue size for all output virtual streams.
@@ -259,6 +257,8 @@ public:
      *                                      If not passed, all the networks in the network group will be addressed.
      * @return Upon success, returns Expected of a map of name to vstream params.
      *         Otherwise, returns Unexpected of ::hailo_status error.
+     * @note The argument @a quantized is deprecated and its usage is ignored. Determine whether to de-quantize (rescale) the data will be decided by
+     *       the src-data and dst-data types.
      */
     virtual Expected<std::map<std::string, hailo_vstream_params_t>> make_output_vstream_params(
         bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
@@ -267,14 +267,15 @@ public:
     /**
      * Creates output virtual stream params. The groups are splitted with respect to their low-level streams.
      *
-     * @param[in]  quantized                Whether the data returned from the chip is already quantized. True means
-     *                                      the data is already quantized. False means it's HailoRT's responsibility
-     *                                      to quantize (scale) the data.
+     * @param[in] quantized                 Deprecated parameter that will be ignored. Determine whether to de-quantize (rescale)
+     *                                      the data will be decided by the src-data and dst-data types.
      * @param[in]  format_type              The default format type for all output virtual streams.
      * @param[in]  timeout_ms               The default timeout in milliseconds for all output virtual streams.
      * @param[in]  queue_size               The default queue size for all output virtual streams.
      * @return Upon success, returns Expected of a vector of maps, mapping name to vstream params, where each map represents a params group.
      *         Otherwise, returns Unexpected of ::hailo_status error.
+     * @note The argument @a quantized is deprecated and its usage is ignored. Determine whether to de-quantize (rescale) the data will be decided by
+     *       the src-data and dst-data types.
      */
     virtual Expected<std::vector<std::map<std::string, hailo_vstream_params_t>>> make_output_vstream_params_groups(
         bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size) = 0;
@@ -396,16 +397,19 @@ public:
 
     virtual Expected<HwInferResults> run_hw_infer_estimator() = 0;
 
-    virtual hailo_status before_fork() { return HAILO_SUCCESS; }
-    virtual hailo_status after_fork_in_parent() { return HAILO_SUCCESS; }
-    virtual hailo_status after_fork_in_child() { return HAILO_SUCCESS; }
-
     virtual Expected<std::vector<std::string>> get_sorted_output_names() = 0;
     virtual Expected<std::vector<std::string>> get_stream_names_from_vstream_name(const std::string &vstream_name) = 0;
     virtual Expected<std::vector<std::string>> get_vstream_names_from_stream_name(const std::string &stream_name) = 0;
 
-    static Expected<std::shared_ptr<ConfiguredNetworkGroup>> duplicate_network_group_client(uint32_t handle, const std::string &network_group_name);
+    static Expected<std::shared_ptr<ConfiguredNetworkGroup>> duplicate_network_group_client(uint32_t ng_handle, uint32_t vdevice_handle,
+        const std::string &network_group_name);
     virtual Expected<uint32_t> get_client_handle() const;
+    virtual Expected<uint32_t> get_vdevice_client_handle() const;
+
+    virtual hailo_status before_fork();
+    virtual hailo_status after_fork_in_parent();
+    virtual hailo_status after_fork_in_child();
+
 protected:
     ConfiguredNetworkGroup() = default;
 
index 8d80c1c734ac0b2fa79ad6cccbac2d358a4bc42c..253ca45b2f63bab146d79fed69c07d9789d0f421 100644 (file)
@@ -16,6 +16,8 @@
 #include <math.h>
 #include <fenv.h>
 
+static const float32_t INVALID_QP_VALUE = 0;
+
 #ifdef _MSC_VER
 #include <immintrin.h>
 #endif
@@ -240,6 +242,22 @@ public:
         }
     }
 
+    /**
+     * Returns whether or not qp is valid
+     *
+     * @param[in]     quant_info      A ::hailo_quant_info_t object.
+     *
+     * @return a bool, Indicates whether or not qp is valid.
+     * @note QP will be invalid in case HEF file was compiled with multiple QPs, and then the user will try working with API for single QP.
+     *       For example - if HEF was compiled with multiple QPs and then the user calls hailo_get_input_stream_info,
+     *       The ::hailo_quant_info_t object of the ::hailo_stream_info_t object will be invalid.
+     */
+    static inline bool is_qp_valid(const hailo_quant_info_t &quant_info)
+    {
+        return !((quant_info.qp_zp == INVALID_QP_VALUE) && (quant_info.qp_scale == INVALID_QP_VALUE) 
+            && (quant_info.limvals_min == INVALID_QP_VALUE) && (quant_info.limvals_max == INVALID_QP_VALUE));
+    }
+
 private:
     template <typename T, typename Q>
     static inline Q quantize_input(T number, hailo_quant_info_t quant_info)
index d3252a03b2ca0c3f5cc07dea2870f1d5cc65b5c8..d5ee93efaf56e32a52bb7fb6242e5b02494d89be 100644 (file)
@@ -24,9 +24,9 @@
 namespace hailort
 {
 
-// Forward declaration
-struct LayerInfo;
-
+#define INVALID_CORE_OP_HANDLE (UINT32_MAX)
+using device_id_t = std::string;
+using vdevice_core_op_handle_t = uint32_t;
 
 /*! Input (host to device) stream representation */
 class HAILORTAPI InputStream
@@ -114,7 +114,7 @@ public:
      * @note @a buffer is expected to be in the format dictated by this.stream_info.format
      * @note @a buffer.size() is expected to be get_frame_size().
      */
-    virtual hailo_status write(const MemoryView &buffer);
+    virtual hailo_status write(const MemoryView &buffer) = 0;
 
     /**
      * Writes the entire buffer to the stream without transformations.
@@ -126,7 +126,7 @@ public:
      * @note @a buffer is expected to be in the format dictated by this.stream_info.format
      * @note @a size is expected to be get_frame_size().
      */
-    virtual hailo_status write(const void *buffer, size_t size);
+    virtual hailo_status write(const void *buffer, size_t size) = 0;
 
     /**
      * Waits until the stream is ready to launch a new write_async() operation. Each stream contains some limited sized
@@ -226,6 +226,14 @@ public:
         return m_stream_info;
     }
 
+    /**
+     * @returns the quant_infos - quant info per feature.
+     */
+    virtual const std::vector<hailo_quant_info_t> &get_quant_infos() const
+    {
+        return m_quant_infos;
+    }
+
     /**
      * @returns the stream's hw frame size in bytes.
      */
@@ -254,19 +262,9 @@ protected:
     InputStream() = default;
     InputStream(InputStream &&) = delete;
 
-    // Note: Implement write_impl for the actual stream interaction in sub classes
-    virtual hailo_status write_impl(const MemoryView &buffer) = 0;
-
-    virtual hailo_status activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers) = 0;
-    virtual hailo_status deactivate_stream() = 0;
-
     hailo_stream_info_t m_stream_info;
+    std::vector<hailo_quant_info_t> m_quant_infos;
     uint8_t m_dataflow_manager_id;
-
-private:
-    friend class HefConfigurator;
-    friend class ConfiguredNetworkGroupBase;
-    friend class CoreOp;
 };
 
 /*! Output (device to host) stream representation */
@@ -347,6 +345,14 @@ public:
         return m_stream_info;
     }
 
+    /**
+     * @returns the quant_infos - quant info per feature.
+     */
+    virtual const std::vector<hailo_quant_info_t> &get_quant_infos() const
+    {
+        return m_quant_infos;
+    }
+
     /**
      * @returns the stream's hw frame size.
      */
@@ -381,7 +387,7 @@ public:
      * @note Upon return, @a buffer is expected to be in the format dictated by this.get_info().format
      * @note @a size is expected to be get_frame_size().
      */
-    virtual hailo_status read(MemoryView buffer);
+    virtual hailo_status read(MemoryView buffer) = 0;
 
     /**
      * Reads the entire buffer from the stream without transformations
@@ -394,7 +400,7 @@ public:
      * @note Upon return, @a buffer is expected to be in the format dictated by this.get_info().format
      * @note @a size is expected to be get_frame_size().
      */
-    virtual hailo_status read(void *buffer, size_t size);
+    virtual hailo_status read(void *buffer, size_t size) = 0;
 
     /**
      * Waits until the stream is ready to launch a new read_async() operation. Each stream contains some limited sized
@@ -492,27 +498,15 @@ protected:
     OutputStream() = default;
     OutputStream(OutputStream&&) = delete;
 
-    virtual hailo_status activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers) = 0;
-    virtual hailo_status deactivate_stream() = 0;
-    virtual hailo_status read_impl(MemoryView &buffer) = 0;
-
     hailo_stream_info_t m_stream_info;
+    std::vector<hailo_quant_info_t> m_quant_infos;
     uint8_t m_dataflow_manager_id;
     std::atomic<uint32_t> m_invalid_frames_count;
 
-protected:
-    hailo_status read_nms(void *buffer, size_t offset, size_t size);
-
 private:
-    virtual const LayerInfo& get_layer_info() = 0;
     void increase_invalid_frames_count(uint32_t value);
 
-    friend class HefConfigurator;
-    friend class ConfiguredNetworkGroupBase;
     friend class HwReadElement;
-    friend class OutputDemuxer;
-    friend class CoreOp;
-    friend class NMSStreamReader;
 };
 
 } /* namespace hailort */
index db1d7cf0db294888aa0bb0c3a498b3acdfe81a1b..2a777996a3ce836c153a780286e9f5b898d48895 100644 (file)
@@ -29,6 +29,22 @@ class HAILORTAPI InputTransformContext final
 {
 public:
 
+    /**
+     * Creates input transform_context.
+     * 
+     * @param[in] src_image_shape          The shape of the src buffer to be transformed.
+     * @param[in] src_format               The format of the src buffer to be transformed.
+     * @param[in] dst_image_shape          The shape of the dst buffer that receives the transformed data.
+     * @param[in] dst_format               The format of the dst buffer that receives the transformed data.
+     * @param[in] dst_quant_infos          A vector of ::hailo_quant_info_t object containing quantization information per feature.
+     *                                     Might also contain a vector with a single ::hailo_quant_info_t object.
+     * @return Upon success, returns Expected of a pointer to InputTransformContext.
+     *         Otherwise, returns Unexpected of ::hailo_status error.
+     */
+    static Expected<std::unique_ptr<InputTransformContext>> create(const hailo_3d_image_shape_t &src_image_shape,
+        const hailo_format_t &src_format, const hailo_3d_image_shape_t &dst_image_shape,
+        const hailo_format_t &dst_format, const std::vector<hailo_quant_info_t> &dst_quant_infos);
+
     /**
      * Creates input transform_context.
      * 
@@ -39,10 +55,12 @@ public:
      * @param[in] dst_quant_info           A ::hailo_quant_info_t object containing quantization information.
      * @return Upon success, returns Expected of a pointer to InputTransformContext.
      *         Otherwise, returns Unexpected of ::hailo_status error.
+     * @note This function is deprecated
      */
     static Expected<std::unique_ptr<InputTransformContext>> create(const hailo_3d_image_shape_t &src_image_shape,
         const hailo_format_t &src_format, const hailo_3d_image_shape_t &dst_image_shape,
-        const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info);
+        const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info)
+        DEPRECATED("The use of a single hailo_quant_info_t in this function is deprecated. Please pass a vector of hailo_quant_info_t instead.");
 
     /**
      * Creates input transform_context.
@@ -59,16 +77,28 @@ public:
      * Creates input transform_context.
      * 
      * @param[in] stream_info    Creates transform_context that fits this stream info.
-     * @param[in] quantized      Whether the data fed into the transform_context is already quantized. True means
-     *                           the data is already quantized. False means it's transform_context responsibility to
-     *                           quantize (scale) the data.
+     * @param[in] quantized      Deprecated parameter that will be ignored. Determine weather to quantize (scale)
+     *                           the data will be decided by the src-data and dst-data types.
      * @param[in] format_type    The type of the buffer sent to the transform_context.
      * @return Upon success, returns Expected of a pointer to InputTransformContext.
      *         Otherwise, returns Unexpected of ::hailo_status error.
+     * @note The argument @a quantized is deprecated and its usage is ignored. Determine weather to quantize (scale) the data will be decided by
+     *       the src-data and dst-data types.
      */
     static Expected<std::unique_ptr<InputTransformContext>> create(const hailo_stream_info_t &stream_info, bool quantized,
         hailo_format_type_t format_type);
 
+    /**
+     * Creates input transform_context by output stream
+     * 
+     * @param[in] input_stream  Creates transform_context that fits this input stream.
+     * @param[in] transform_params  A ::hailo_transform_params_t object containing user transformation parameters.
+     * @return Upon success, returns Expected of a pointer to InputTransformContext.
+     *         Otherwise, returns Unexpected of ::hailo_status error.
+     */
+    static Expected<std::unique_ptr<InputTransformContext>> create(InputStream &input_stream,
+    const hailo_transform_params_t &transform_params);
+
     /**
      * Transforms an input frame referred by @a src directly to the buffer referred by @a dst.
      * 
@@ -88,6 +118,21 @@ public:
      */
     size_t get_dst_frame_size() const;
 
+    /**
+     * Check whether or not a transformation is needed. 
+     *
+     * @param[in] src_image_shape          The shape of the src buffer (host shape).
+     * @param[in] src_format               The format of the src buffer (host format).
+     * @param[in] dst_image_shape          The shape of the dst buffer (hw shape).
+     * @param[in] dst_format               The format of the dst buffer (hw format).
+     * @param[in] quant_infos              A vector of ::hailo_quant_info_t object containing quantization information per feature.
+     * @return Returns Expected of boolean, whether or not a transformation is needed.
+     * @note In case the function returns false, the src frame is ready to be sent to HW without any transformation.
+     */
+    static Expected<bool> is_transformation_required(const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
+        const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format,
+        const std::vector<hailo_quant_info_t> &quant_infos);
+
     /**
      * Check whether or not a transformation is needed. 
      *
@@ -98,10 +143,12 @@ public:
      * @param[in] quant_info               A ::hailo_quant_info_t object containing quantization information.
      * @return Returns whether or not a transformation is needed.
      * @note In case the function returns false, the src frame is ready to be sent to HW without any transformation.
+     * @note This function is deprecated.
      */
     static bool is_transformation_required(const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
         const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format,
-        const hailo_quant_info_t &quant_info);
+        const hailo_quant_info_t &quant_info)
+        DEPRECATED("The use of a single hailo_quant_info_t in this function is deprecated. Please pass a vector of hailo_quant_info_t instead.");
 
     /**
      * @return A human-readable description of the transformation parameters.
@@ -111,7 +158,7 @@ public:
 private:
     InputTransformContext(size_t src_frame_size, const hailo_3d_image_shape_t &src_image_shape,
         const hailo_format_t &src_format, size_t dst_frame_size, const hailo_3d_image_shape_t &dst_image_shape,
-        const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, Buffer &&quant_buffer,
+        const hailo_format_t &dst_format, const std::vector<hailo_quant_info_t> &dst_quant_infos, Buffer &&quant_buffer,
         Buffer &&transpose_buffer, const bool should_quantize, const bool should_transpose, const bool should_reorder);
 
     inline MemoryView quant_buffer() {
@@ -133,7 +180,7 @@ private:
     const size_t m_dst_frame_size;
     const hailo_3d_image_shape_t m_dst_image_shape;
     const hailo_format_t m_dst_format;
-    const hailo_quant_info_t m_dst_quant_info;
+    const std::vector<hailo_quant_info_t> m_dst_quant_infos;
     const bool m_should_quantize;
     const bool m_should_transpose;
     const bool m_should_reorder;
@@ -152,6 +199,23 @@ public:
     OutputTransformContext(const OutputTransformContext &) = delete;
     OutputTransformContext& operator=(const OutputTransformContext &) = delete;
 
+    /**
+     * Creates output transform_context.
+     * 
+     * @param[in] src_image_shape          The shape of the src buffer to be transformed.
+     * @param[in] src_format               The format of the src buffer to be transformed.
+     * @param[in] dst_image_shape          The shape of the dst buffer that receives the transformed data.
+     * @param[in] dst_format               The format of the dst buffer that receives the transformed data.
+     * @param[in] dst_quant_infos          A vector of ::hailo_quant_info_t object containing quantization information per feature.
+     *                                     Might also contain a vector with a single ::hailo_quant_info_t object.
+     * @param[in] nms_info                 A ::hailo_nms_info_t object containing nms information.
+     * @return Upon success, returns Expected of a pointer to OutputTransformContext.
+     *         Otherwise, returns Unexpected of ::hailo_status error.
+     */
+    static Expected<std::unique_ptr<OutputTransformContext>> create(const hailo_3d_image_shape_t &src_image_shape,
+        const hailo_format_t &src_format, const hailo_3d_image_shape_t &dst_image_shape,
+        const hailo_format_t &dst_format, const std::vector<hailo_quant_info_t> &dst_quant_infos, const hailo_nms_info_t &nms_info);
+
     /**
      * Creates output transform_context.
      * 
@@ -163,13 +227,16 @@ public:
      * @param[in] nms_info                 A ::hailo_nms_info_t object containing nms information.
      * @return Upon success, returns Expected of a pointer to OutputTransformContext.
      *         Otherwise, returns Unexpected of ::hailo_status error.
+     * @note This function is deprecated.
      */
     static Expected<std::unique_ptr<OutputTransformContext>> create(const hailo_3d_image_shape_t &src_image_shape,
         const hailo_format_t &src_format, const hailo_3d_image_shape_t &dst_image_shape,
-        const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, const hailo_nms_info_t &nms_info);
+        const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, const hailo_nms_info_t &nms_info)
+        DEPRECATED("The use of a single hailo_quant_info_t in this function is deprecated. Please pass a vector of hailo_quant_info_t instead.");
 
     /**
      * Creates output transform_context.
+     * 
      * @param[in] stream_info       Creates transform_context that fits this stream info.
      * @param[in] transform_params  A ::hailo_transform_params_t object containing user transformation parameters.
      * @return Upon success, returns Expected of a pointer to OutputTransformContext.
@@ -182,16 +249,28 @@ public:
      * Creates output transform_context with default transform parameters
      * 
      * @param[in] stream_info    Creates transform_context that fits this stream info.
-     * @param[in] quantized      Whether the data returned from the transform_context should be quantized. True
-     *                           means that the data returned to the user is still quantized. False means it's the
-     *                           transform_context responsibility to de-quantize (rescale) the data.
+     * @param[in] quantized      Deprecated parameter that will be ignored. Determine weather to de-quantize (rescale)
+     *                           the data will be decided by the src-data and dst-data types.
      * @param[in] format_type    The type of the buffer returned from the transform_context
      * @return Upon success, returns Expected of a pointer to OutputTransformContext.
      *         Otherwise, returns Unexpected of ::hailo_status error.
+     * @note The argument @a quantized is deprecated and its usage is ignored. Determine weather to de-quantize (rescale) the data will be decided by
+     *       the src-data and dst-data types.
      */
     static Expected<std::unique_ptr<OutputTransformContext>> create(const hailo_stream_info_t &stream_info, bool quantized,
         hailo_format_type_t format_type);
 
+    /**
+     * Creates output transform_context by output stream
+     * 
+     * @param[in] output_stream  Creates transform_context that fits this output stream.
+     * @param[in] transform_params  A ::hailo_transform_params_t object containing user transformation parameters.
+     * @return Upon success, returns Expected of a pointer to OutputTransformContext.
+     *         Otherwise, returns Unexpected of ::hailo_status error.
+     */
+    static Expected<std::unique_ptr<OutputTransformContext>> create(OutputStream &output_stream,
+    const hailo_transform_params_t &transform_params);
+
     /**
      * Transforms an output frame referred by @a src directly to the buffer referred by @a dst.
      * 
@@ -211,6 +290,21 @@ public:
      */
     size_t get_dst_frame_size() const;
 
+    /**
+     * Check whether or not a transformation is needed.
+     * 
+     * @param[in] src_image_shape          The shape of the src buffer (hw shape).
+     * @param[in] src_format               The format of the src buffer (hw format).
+     * @param[in] dst_image_shape          The shape of the dst buffer (host shape).
+     * @param[in] dst_format               The format of the dst buffer (host format).
+     * @param[in] quant_infos              A vector of ::hailo_quant_info_t object containing quantization information per feature.
+     * @return Returns Expected of boolean, whether or not a transformation is needed.
+     * @note In case the function returns false, the src frame is already in the required format without any transformation.
+     */
+    static Expected<bool> is_transformation_required(const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
+        const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, 
+        const std::vector<hailo_quant_info_t> &quant_infos);
+
     /**
      * Check whether or not a transformation is needed.
      * 
@@ -221,10 +315,12 @@ public:
      * @param[in] quant_info               A ::hailo_quant_info_t object containing quantization information.
      * @return Returns whether or not a transformation is needed.
      * @note In case the function returns false, the src frame is already in the required format without any transformation.
+     * @note This function is deprecated.
      */
     static bool is_transformation_required(const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
         const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, 
-        const hailo_quant_info_t &quant_info);
+        const hailo_quant_info_t &quant_info)
+        DEPRECATED("The use of a single hailo_quant_info_t in this function is deprecated. Please pass a vector of hailo_quant_info_t instead.");
 
     /**
      * @return A human-readable description of the transformation parameters.
@@ -233,14 +329,14 @@ public:
 
 protected:
     OutputTransformContext(size_t src_frame_size, const hailo_format_t &src_format, size_t dst_frame_size,
-        const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, const bool should_quantize,
+        const hailo_format_t &dst_format, const std::vector<hailo_quant_info_t> &dst_quant_infos, const bool should_quantize,
         const bool should_transpose, const bool should_reorder);
 
     const size_t m_src_frame_size;
     const hailo_format_t m_src_format;
     const size_t m_dst_frame_size;
     const hailo_format_t m_dst_format;
-    const hailo_quant_info_t m_dst_quant_info;
+    const std::vector<hailo_quant_info_t> m_dst_quant_infos;
     const bool m_should_quantize;
     const bool m_should_transpose;
     const bool m_should_reorder;
index 0aa7bd3dfa24d3bdd9b911975e00c44c527a4b22..b8566dba2b4d075b40ba5a03ae761fb43c726ee8 100644 (file)
@@ -15,6 +15,7 @@
 #include "hailo/hef.hpp"
 #include "hailo/network_group.hpp"
 #include "hailo/device.hpp"
+#include "hailo/infer_model.hpp"
 
 
 /** hailort namespace */
@@ -64,6 +65,8 @@ public:
     virtual Expected<ConfiguredNetworkGroupVector> configure(Hef &hef,
         const NetworkGroupsParamsMap &configure_params={}) = 0;
 
+    virtual Expected<InferModel> create_infer_model(const std::string &hef_path) = 0;
+
     /**
      * Gets the underlying physical devices.
      * 
@@ -108,9 +111,9 @@ public:
      */
     Expected<ConfigureNetworkParams> create_configure_params(Hef &hef, const std::string &network_group_name) const;
 
-    virtual hailo_status before_fork() { return HAILO_SUCCESS; }
-    virtual hailo_status after_fork_in_parent() { return HAILO_SUCCESS; }
-    virtual hailo_status after_fork_in_child() { return HAILO_SUCCESS; }
+    virtual hailo_status before_fork();
+    virtual hailo_status after_fork_in_parent();
+    virtual hailo_status after_fork_in_child();
 
     virtual ~VDevice() = default;
     VDevice(const VDevice &) = delete;
@@ -118,6 +121,8 @@ public:
     VDevice(VDevice &&) = delete;
     VDevice &operator=(VDevice &&other) = delete;
 
+    static bool service_over_ip_mode();
+
 protected:
     VDevice() = default;
 };
index 0cfe56334683a6d61d8272f81a0efda9e23a40f4..e9d97f860fcde5c32560ee0747591250c5497684 100644 (file)
@@ -25,7 +25,7 @@ class PipelineElement;
 class HAILORTAPI InputVStream
 {
 public:
-    static Expected<InputVStream> create(const hailo_vstream_info_t &vstream_info,
+    static Expected<InputVStream> create(const hailo_vstream_info_t &vstream_info, const std::vector<hailo_quant_info_t> &quant_infos,
         const hailo_vstream_params_t &vstream_params, std::shared_ptr<PipelineElement> pipeline_entry,
         std::shared_ptr<SinkElement> pipeline_exit, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
         std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, EventPtr core_op_activated_event,
@@ -44,6 +44,15 @@ public:
      */
     hailo_status write(const MemoryView &buffer);
 
+    /**
+     * Writes @a buffer to hailo device.
+     *
+     * @param[in] buffer            The buffer containing pointers to the planes where the data to
+     *                              be sent to the device is stored.
+     * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
+     */
+    hailo_status write(const hailo_pix_buffer_t &buffer);
+
     /**
      * Flushes the vstream pipeline buffers. This will block until the vstream pipeline is clear.
      *
@@ -92,6 +101,11 @@ public:
      */
     const hailo_vstream_info_t &get_info() const;
 
+    /**
+     * @returns the stream's vector of quantization infos.
+     */
+    const std::vector<hailo_quant_info_t> &get_quant_infos() const;
+
     /**
      * @return ::hailo_format_t object containing the user buffer format.
      */
@@ -157,10 +171,12 @@ public:
      */
     const std::vector<std::shared_ptr<PipelineElement>> &get_pipeline() const;
 
+    bool is_aborted();
+    bool is_multi_planar();
+
     hailo_status before_fork();
     hailo_status after_fork_in_parent();
     hailo_status after_fork_in_child();
-    bool is_aborted();
 
     // Added to match the same API as InputStream. Will be filled when async API will be implemented for vstreams.
     using TransferDoneCallback = void(*);
@@ -183,10 +199,10 @@ class HAILORTAPI OutputVStream
 {
 public:
     static Expected<OutputVStream> create(
-        const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
-        std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
-        std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event,
-        EventPtr core_op_activated_event, AccumulatorPtr pipeline_latency_accumulator);
+        const hailo_vstream_info_t &vstream_info, const std::vector<hailo_quant_info_t> &quant_infos,
+        const hailo_vstream_params_t &vstream_params, std::shared_ptr<PipelineElement> pipeline_entry,
+        std::vector<std::shared_ptr<PipelineElement>> &&pipeline, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+        EventPtr shutdown_event, EventPtr core_op_activated_event, AccumulatorPtr pipeline_latency_accumulator);
     OutputVStream(OutputVStream &&other) noexcept = default;
     OutputVStream &operator=(OutputVStream &&other) noexcept = default;
     virtual ~OutputVStream() = default;
@@ -242,6 +258,11 @@ public:
      */
     const hailo_vstream_info_t &get_info() const;
 
+    /**
+     * @returns the stream's vector of quantization infos.
+     */
+    const std::vector<hailo_quant_info_t> &get_quant_infos() const;
+
     /**
      * @return ::hailo_format_t object containing the user buffer format.
      */
@@ -307,10 +328,40 @@ public:
      */
     const std::vector<std::shared_ptr<PipelineElement>> &get_pipeline() const;
 
+    /**
+     * Set NMS score threshold, used for filtering out candidates. Any box with score<TH is suppressed.
+     *
+     * @param[in] threshold        NMS score threshold to set.
+     * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
+     * @note This function will fail in cases where the output vstream has no NMS operations on the CPU.
+     */
+    hailo_status set_nms_score_threshold(float32_t threshold);
+
+    /**
+     * Set NMS intersection over union overlap Threshold,
+     * used in the NMS iterative elimination process where potential duplicates of detected items are suppressed.
+     *
+     * @param[in] threshold        NMS IoU threshold to set.
+     * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
+     * @note This function will fail in cases where the output vstream has no NMS operations on the CPU.
+     */
+    hailo_status set_nms_iou_threshold(float32_t threshold);
+
+    /**
+     * Set a limit for the maximum number of boxes per class.
+     *
+     * @param[in] max_proposals_per_class    NMS max proposals per class to set.
+     * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
+     * @note This function will fail in cases where the output vstream has no NMS operations on the CPU.
+     */
+    hailo_status set_nms_max_proposals_per_class(uint32_t max_proposals_per_class);
+
+
+    bool is_aborted();
+
     hailo_status before_fork();
     hailo_status after_fork_in_parent();
     hailo_status after_fork_in_child();
-    bool is_aborted();
 
     // Added to match the same API as InputStream. Will be filled when async API will be implemented for vstreams.
     using TransferDoneCallback = void(*);
index 8a9656f883976d77356f1d49ab39c154db4d016b..2629f2cbd853db593f726ee476c88c0fc365a1e0 100644 (file)
@@ -24,10 +24,13 @@ message ProtoMonStreamFramesInfo {
     ProtoMonStreamDirection stream_direction = 2;
     int32 buffer_frames_size = 3;
     int32 pending_frames_count = 4;
+    int32 min_pending_frames_count = 5;
+    int32 max_pending_frames_count = 6;
+    double avg_pending_frames_count = 7;
 }
 
 message ProtoMonNetworkFrames {
-    string network_name = 1; 
+    string network_name = 1;
     repeated ProtoMonStreamFramesInfo streams_frames_infos = 2;
 }
 
index 1c44fd08d42c374bbf57cf8b0cdd4d5185cdd165..51b0c90b3d7a1c9669fdf2b84ccd1254e7dd8f1d 100644 (file)
@@ -5,6 +5,8 @@ find_package(Threads REQUIRED)
 include(GNUInstallDirs)
 include(CMakePackageConfigHelpers)
 include(${CMAKE_CURRENT_SOURCE_DIR}/../../cmake/common_compiler_options.cmake)
+include(${HAILO_EXTERNALS_CMAKE_SCRIPTS}/spdlog.cmake)
+include(${HAILO_EXTERNALS_CMAKE_SCRIPTS}/readerwriterqueue.cmake)
 
 FUNCTION(relative_to_absolute_paths output)
     SET(listVar "")
@@ -81,6 +83,7 @@ endif()
 
 target_link_libraries(libhailort PRIVATE Threads::Threads)
 target_link_libraries(libhailort PRIVATE hef_proto)
+target_link_libraries(libhailort PRIVATE profiler_proto)
 target_link_libraries(libhailort PRIVATE scheduler_mon_proto)
 target_link_libraries(libhailort PRIVATE spdlog::spdlog)
 target_link_libraries(libhailort PRIVATE readerwriterqueue)
@@ -89,6 +92,7 @@ if(HAILO_BUILD_SERVICE)
     target_link_libraries(libhailort PRIVATE hailort_rpc_grpc_proto)
 endif()
 if(CMAKE_SYSTEM_NAME STREQUAL QNX)
+    include(${HAILO_EXTERNALS_CMAKE_SCRIPTS}/pevents.cmake)
     target_link_libraries(libhailort PRIVATE pevents pci)
 endif()
 
@@ -109,6 +113,7 @@ set(HAILORT_PUBLIC_HEADERS
     ${HAILORT_INC_DIR}/hailo/transform.hpp
     ${HAILORT_INC_DIR}/hailo/vstream.hpp
     ${HAILORT_INC_DIR}/hailo/inference_pipeline.hpp
+    ${HAILORT_INC_DIR}/hailo/infer_model.hpp
     ${HAILORT_INC_DIR}/hailo/runtime_statistics.hpp
     ${HAILORT_INC_DIR}/hailo/network_rate_calculator.hpp
     ${HAILORT_INC_DIR}/hailo/vdevice.hpp
index c87e0b07ca027b31f688c2846e1ee9d4fc3a816f..f9a2d87344953800e41b3876f747efb8ea69601d 100644 (file)
@@ -16,8 +16,6 @@
 
 #include "common/utils.hpp"
 
-#include "core_op/core_op.hpp"
-
 
 namespace hailort
 {
index 0967e7837e5af4cf9ad2b7df8be8d0bf9596b9c2..56f512ab9f49deda3f2b68670a6a6132d22c2693 100644 (file)
 #include "core_op/resource_manager/resource_manager.hpp"
 #include "hef/hef_internal.hpp"
 #include "eth/eth_stream.hpp"
-#include "vdma/vdma_stream_base.hpp"
+#include "vdma/vdma_stream.hpp"
 #include "mipi/mipi_stream.hpp"
 #include "device_common/control_protocol.hpp"
+#include "stream_common/nms_stream.hpp"
+#include "stream_common/remote_process_stream.hpp"
 
 
 namespace hailort
 {
 
-ActivatedCoreOp::ActivatedCoreOp(const hailo_activate_network_group_params_t &network_group_params,
-        std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
-        std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,         
-        EventPtr &&core_op_activated_event, hailo_status &status) :
-    m_network_group_params(network_group_params),
-    m_core_op_activated_event(std::move(core_op_activated_event)),
-    m_input_streams(input_streams),
-    m_output_streams(output_streams)
-{
-    status = validate_network_group_params(network_group_params);
-    if (HAILO_SUCCESS != status) {
-        LOGGER__ERROR("Failed to validate network_group params");
-        return;
-    }
-}
-
-uint32_t ActivatedCoreOp::get_invalid_frames_count()
-{
-    uint32_t total_invalid_frames_count = 0;
-    for (auto& name_stream_pair : m_output_streams) {
-        total_invalid_frames_count += name_stream_pair.second->get_invalid_frames_count();
-    }
-    return total_invalid_frames_count;
-}
-
-// TODO: Implement function (HRT-3174)
-hailo_status ActivatedCoreOp::validate_network_group_params(
-    const hailo_activate_network_group_params_t &/*network_group_params*/)
-{
-    return HAILO_SUCCESS;
-}
-
 CoreOp::CoreOp(
-    const ConfigureNetworkParams &config_params, std::shared_ptr<CoreOpMetadata> metadata, hailo_status &status) :
+    const ConfigureNetworkParams &config_params, std::shared_ptr<CoreOpMetadata> metadata,
+    ActiveCoreOpHolder &active_core_op_holder, hailo_status &status) :
         m_config_params(config_params),
+        m_active_core_op_holder(active_core_op_holder),
         m_min_configured_batch_size(get_smallest_configured_batch_size(config_params)),
         m_activation_time_accumulator(),
         m_deactivation_time_accumulator(),
-        m_metadata(metadata)
+        m_metadata(metadata),
+        m_vdevice_core_op_handle(INVALID_CORE_OP_HANDLE)
 {
     auto event = Event::create_shared(Event::State::not_signalled);
-    if (nullptr == event) {
+    if (!event) {
         LOGGER__ERROR("Failed to create activation event");
-        status = HAILO_INTERNAL_FAILURE;
+        status = event.status();
         return;
     }
-    m_core_op_activated_event = std::move(std::move(event));
+    m_core_op_activated_event = event.release();
 
     m_activation_time_accumulator = make_shared_nothrow<FullAccumulator<double>>("activation_time");
     if (nullptr == m_activation_time_accumulator) {
@@ -91,13 +64,6 @@ CoreOp::CoreOp(
     status = HAILO_SUCCESS;
 }
 
-Expected<std::unique_ptr<ActivatedNetworkGroup>> CoreOp::activate(const hailo_activate_network_group_params_t &network_group_params)
-{
-    static const auto RESET_PENDING_STREAM_TRANSFERS = false;
-    return create_activated_network_group(network_group_params, CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE,
-        RESET_PENDING_STREAM_TRANSFERS);
-}
-
 Expected<std::chrono::nanoseconds> get_latency(LatencyMeterPtr &latency_meter, bool clear)
 {
     auto hw_latency = latency_meter->get_latency(clear);
@@ -119,6 +85,9 @@ Expected<LatencyMeasurementResult> CoreOp::get_latency_measurement(const std::st
     auto latency_meters = latency_meters_exp.release();
 
     if (network_name.empty()) {
+        if (1 != m_input_streams.size()) {
+            return make_unexpected(HAILO_NOT_AVAILABLE);
+        }
         std::chrono::nanoseconds latency_sum(0);
         uint32_t measurements_count = 0;
         for (auto &latency_meter_pair : *latency_meters.get()) {
@@ -150,17 +119,144 @@ Expected<LatencyMeasurementResult> CoreOp::get_latency_measurement(const std::st
     return result;
 }
 
+hailo_status CoreOp::activate(uint16_t dynamic_batch_size)
+{
+    auto start_time = std::chrono::steady_clock::now();
+
+    CHECK(!is_scheduled(), HAILO_INVALID_OPERATION,
+        "Manually activate a core-op is not allowed when the core-op scheduler is active!");
+
+    // Check that no network is currently activated
+    CHECK(!m_active_core_op_holder.is_any_active(), HAILO_INVALID_OPERATION,
+        "Cant activate network because a network is already activated");
+    m_active_core_op_holder.set(*this);
+
+    auto status = activate_impl(dynamic_batch_size);
+    if (HAILO_SUCCESS != status) {
+        auto deactivate_status = deactivate_impl();
+        if (HAILO_SUCCESS != deactivate_status) {
+            LOGGER__ERROR("Failed deactivate {}", deactivate_status);
+        }
+        m_active_core_op_holder.clear();
+    }
+    if (HAILO_STREAM_ABORTED_BY_USER == status) {
+        return status;
+    }
+    CHECK_SUCCESS(status);
+
+    const auto elapsed_time_ms = std::chrono::duration<double, std::milli>(
+        std::chrono::steady_clock::now() - start_time).count();
+
+    status = m_core_op_activated_event->signal();
+    if (HAILO_SUCCESS != status) {
+        auto deactivate_status = deactivate_impl();
+        if (HAILO_SUCCESS != deactivate_status) {
+            LOGGER__ERROR("Failed deactivate {}", deactivate_status);
+        }
+        m_active_core_op_holder.clear();
+    }
+    CHECK_SUCCESS(status, "Failed to signal network activation event");
+
+    LOGGER__INFO("Activating {} took {} milliseconds. Note that the function is asynchronous and"
+                 " thus the network is not fully activated yet.", name(), elapsed_time_ms);
+    m_activation_time_accumulator->add_data_point(elapsed_time_ms);
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status CoreOp::deactivate()
+{
+    const auto start_time = std::chrono::steady_clock::now();
+
+    CHECK(!is_scheduled(), HAILO_INVALID_OPERATION,
+        "Manually deactivate a core-op is not allowed when the core-op scheduler is active!");
+
+    auto core_op_ref = m_active_core_op_holder.get();
+    CHECK_EXPECTED_AS_STATUS(core_op_ref, "Trying to deactivate while no network is running");
+
+    CHECK(this == std::addressof(core_op_ref->get()), HAILO_INTERNAL_FAILURE,
+        "Trying to deactivate different core-op");
+    m_active_core_op_holder.clear();
+
+    m_core_op_activated_event->reset();
+
+    auto deactivate_status = deactivate_impl();
+    if (HAILO_SUCCESS != deactivate_status) {
+        LOGGER__ERROR("Failed deactivating core-op (status {})", deactivate_status);
+    }
+
+    const auto elapsed_time_ms = std::chrono::duration<double, std::milli>(
+        std::chrono::steady_clock::now() - start_time).count();
+    LOGGER__INFO("Deactivating took {} ms", elapsed_time_ms);
+    m_deactivation_time_accumulator->add_data_point(elapsed_time_ms);
+
+    return deactivate_status;
+}
+
 Expected<LayerInfo> CoreOp::get_layer_info(const std::string &stream_name)
 {
-    for (auto layer_info : m_metadata->get_all_layer_infos()) {
+    for (const auto &layer_info : m_metadata->get_all_layer_infos()) {
+        if (layer_info.is_multi_planar) {
+            for (const auto &plane : layer_info.planes) {
+                if (plane.name == stream_name) {
+                    auto cpy = plane;
+                    return cpy;
+                }
+            }
+        }
         if (layer_info.name == stream_name) {
-            return layer_info;
+            auto cpy = layer_info;
+            return cpy;
         }
     }
     LOGGER__ERROR("Failed to find layer with name {}", stream_name);
     return make_unexpected(HAILO_NOT_FOUND);
 }
 
+bool CoreOp::is_nms()
+{
+    for (auto layer_info : m_metadata->get_output_layer_infos()) {
+        if (HAILO_FORMAT_ORDER_HAILO_NMS == layer_info.format.order) {
+            return true;
+        }
+    }
+    return false;
+}
+
+hailo_status CoreOp::add_input_stream(std::shared_ptr<InputStreamBase> &&stream,
+    const hailo_stream_parameters_t &stream_params)
+{
+    if ((stream_params.flags & HAILO_STREAM_FLAGS_ASYNC) != 0) {
+        // When the user forces async streams, we use NOT_OWNING mode.
+        auto status = stream->set_buffer_mode(StreamBufferMode::NOT_OWNING);
+        CHECK_SUCCESS(status);
+    } else {
+        // When the user forces async streams, we use OWNING mode.
+        auto status = stream->set_buffer_mode(StreamBufferMode::OWNING);
+        CHECK_SUCCESS(status);
+    }
+
+    m_input_streams.emplace(stream->name(), std::move(stream));
+    return HAILO_SUCCESS;
+}
+
+hailo_status CoreOp::add_output_stream(std::shared_ptr<OutputStreamBase> &&stream,
+    const hailo_stream_parameters_t &stream_params)
+{
+    if ((stream_params.flags & HAILO_STREAM_FLAGS_ASYNC) != 0) {
+        // When the user forces async streams, we use NOT_OWNING mode.
+        auto status = stream->set_buffer_mode(StreamBufferMode::NOT_OWNING);
+        CHECK_SUCCESS(status);
+    } else {
+        // When the user forces async streams, we use OWNING mode.
+        auto status = stream->set_buffer_mode(StreamBufferMode::OWNING);
+        CHECK_SUCCESS(status);
+    }
+
+    m_output_streams.emplace(stream->name(), std::move(stream));
+    return HAILO_SUCCESS;
+}
+
 uint16_t CoreOp::get_smallest_configured_batch_size(const ConfigureNetworkParams &config_params)
 {
     // There are two possible situations:
@@ -184,25 +280,27 @@ uint16_t CoreOp::get_smallest_configured_batch_size(const ConfigureNetworkParams
     return (UINT16_MAX == min_batch_size) ? DEFAULT_ACTUAL_BATCH_SIZE : min_batch_size;
 }
 
-Expected<std::unique_ptr<ActivatedNetworkGroup>> CoreOp::activate_with_batch(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers)
-{
-    return create_activated_network_group(HailoRTDefaults::get_active_network_group_params(), dynamic_batch_size,
-        resume_pending_stream_transfers);
-}
-
 const std::string &CoreOp::name() const
 {
     return m_metadata->core_op_name();
 }
 
-hailo_status CoreOp::activate_low_level_streams(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers)
+hailo_status CoreOp::activate_low_level_streams()
 {
     for (auto &name_pair : m_input_streams) {
-        auto status = name_pair.second->activate_stream(dynamic_batch_size, resume_pending_stream_transfers);
+        auto status = name_pair.second->activate_stream();
+        if (HAILO_STREAM_ABORTED_BY_USER == status) {
+            LOGGER__INFO("Stream {} activation failed because it was aborted by user", name_pair.first);
+            return status;
+        }
         CHECK_SUCCESS(status);
     }
     for (auto &name_pair : m_output_streams) {
-        auto status = name_pair.second->activate_stream(dynamic_batch_size, resume_pending_stream_transfers);
+        auto status = name_pair.second->activate_stream();
+        if (HAILO_STREAM_ABORTED_BY_USER == status) {
+            LOGGER__INFO("Stream {} activation failed because it was aborted by user", name_pair.first);
+            return status;
+        }
         CHECK_SUCCESS(status);
     }
 
@@ -240,19 +338,61 @@ const SupportedFeatures &CoreOp::get_supported_features()
 Expected<uint16_t> CoreOp::get_stream_batch_size(const std::string &stream_name)
 {
     for (const auto &layer_info : m_metadata->get_all_layer_infos()) {
-        if (layer_info.name == stream_name) {
+        auto stream_under_multi_planes_layer = (layer_info.is_multi_planar && std::any_of(layer_info.planes.begin(), layer_info.planes.end(),
+            [&stream_name](const auto &plane){ return plane.name == stream_name; }));
+        if ((layer_info.name == stream_name) || (stream_under_multi_planes_layer)) {
             for (auto const &network_params_pair : m_config_params.network_params_by_name) {
                 if (network_params_pair.first == layer_info.network_name) {
                     auto batch_size = network_params_pair.second.batch_size;
-                    return batch_size;
+                    return (batch_size == HAILO_DEFAULT_BATCH_SIZE) ? DEFAULT_ACTUAL_BATCH_SIZE : batch_size;
                 }
             }
         }
     }
-    LOGGER__ERROR("Failed to find network name output stream {}", stream_name);
+    LOGGER__ERROR("Failed to find batch for stream {}", stream_name);
     return make_unexpected(HAILO_NOT_FOUND);
 }
 
+bool CoreOp::is_default_batch_size() const
+{
+    for (auto const &network_params_pair : m_config_params.network_params_by_name) {
+        if (network_params_pair.second.batch_size != HAILO_DEFAULT_BATCH_SIZE) {
+            return false;
+        }
+    }
+
+    return true;
+}
+
+Expected<Buffer> CoreOp::get_intermediate_buffer(const IntermediateBufferKey &)
+{
+    LOGGER__ERROR("Getting intermediate buffer is not supported for this core op");
+    return make_unexpected(HAILO_NOT_SUPPORTED);
+}
+
+hailo_status CoreOp::wrap_streams_for_remote_process()
+{
+    for (auto &input_stream_pair : m_input_streams) {
+        auto base_stream = input_stream_pair.second;
+
+        auto remote_proc_stream = RemoteProcessInputStream::create(base_stream);
+        CHECK_EXPECTED_AS_STATUS(remote_proc_stream);
+
+        input_stream_pair.second = remote_proc_stream.release();
+    }
+
+    for (auto &output_stream_pair : m_output_streams) {
+        auto base_stream = output_stream_pair.second;
+
+        auto remote_proc_stream = RemoteProcessOutputStream::create(base_stream);
+        CHECK_EXPECTED_AS_STATUS(remote_proc_stream);
+
+        output_stream_pair.second = remote_proc_stream.release();
+    }
+
+    return HAILO_SUCCESS;
+}
+
 bool CoreOp::is_multi_context() const
 {
     return m_metadata->supported_features().multi_context;
@@ -263,118 +403,140 @@ const ConfigureNetworkParams CoreOp::get_config_params() const
     return m_config_params;
 }
 
-hailo_status CoreOp::create_input_stream_from_config_params(Device &device,
+Expected<std::shared_ptr<InputStreamBase>> CoreOp::create_input_stream_from_config_params(Device &device,
     const hailo_stream_parameters_t &stream_params, const std::string &stream_name)
 {
     auto layer_info = get_layer_info(stream_name);
-    CHECK_EXPECTED_AS_STATUS(layer_info);
+    CHECK_EXPECTED(layer_info);
 
-    CHECK(device.is_stream_interface_supported(stream_params.stream_interface), HAILO_INVALID_OPERATION,
+    CHECK_AS_EXPECTED(device.is_stream_interface_supported(stream_params.stream_interface), HAILO_INVALID_OPERATION,
         "Device does not supports the given stream interface streams. Please update input_stream_params for stream {}.",
         stream_name);
 
+    std::shared_ptr<InputStreamBase> input_stream = nullptr;
     switch (stream_params.stream_interface) {
         case HAILO_STREAM_INTERFACE_PCIE:
             // Fallthrough
         case HAILO_STREAM_INTERFACE_INTEGRATED:
-            return create_vdma_input_stream(device, stream_name, layer_info.value(), stream_params);
-        
+            {
+                auto input_stream_exp = create_vdma_input_stream(device, stream_name, layer_info.value(), stream_params);
+                CHECK_EXPECTED(input_stream_exp);
+                input_stream = input_stream_exp.release();
+                break;
+            }
+
         case HAILO_STREAM_INTERFACE_ETH:
             {
-                auto input_stream = EthernetInputStream::create(device,
+                auto input_stream_exp = EthernetInputStream::create(device,
                     layer_info.value(), stream_params.eth_input_params, m_core_op_activated_event);
-                CHECK_EXPECTED_AS_STATUS(input_stream);
-                m_input_streams.insert(make_pair(stream_name, input_stream.release()));
-                return HAILO_SUCCESS;
+                CHECK_EXPECTED(input_stream_exp);
+                input_stream = input_stream_exp.release();
+                break;
             }
-        
+
         case HAILO_STREAM_INTERFACE_MIPI:
             {
-                auto input_stream = MipiInputStream::create(device,
+                auto input_stream_exp = MipiInputStream::create(device,
                     layer_info.value(), stream_params.mipi_input_params, m_core_op_activated_event);
-                CHECK_EXPECTED_AS_STATUS(input_stream);
-                m_input_streams.insert(make_pair(stream_name, input_stream.release()));
-                return HAILO_SUCCESS;
+                CHECK_EXPECTED(input_stream_exp);
+                input_stream = input_stream_exp.release();
+                break;
             }
-        
+
         default:
             LOGGER__ERROR("{} interface is not supported.", stream_params.stream_interface);
-            return HAILO_NOT_IMPLEMENTED;
+            return make_unexpected(HAILO_NOT_IMPLEMENTED);
     }
+
+    return input_stream;
 }
 
-hailo_status CoreOp::create_vdma_input_stream(Device &device, const std::string &stream_name,
+Expected<std::shared_ptr<InputStreamBase>> CoreOp::create_vdma_input_stream(Device &device, const std::string &stream_name,
     const LayerInfo &layer_info, const hailo_stream_parameters_t &stream_params)
 {
     // Make sure the downcast is safe
-    CHECK((Device::Type::INTEGRATED == device.get_type()) || (Device::Type::PCIE == device.get_type()),
+    CHECK_AS_EXPECTED((Device::Type::INTEGRATED == device.get_type()) || (Device::Type::PCIE == device.get_type()),
         HAILO_INTERNAL_FAILURE, "Invalid device type");
     VdmaDevice *vdma_device = reinterpret_cast<VdmaDevice*>(&device);
-    
-    auto batch_size_exp = get_stream_batch_size(stream_name);
-    CHECK_EXPECTED_AS_STATUS(batch_size_exp);
-    auto vdma_channel_ptr_exp = get_boundary_vdma_channel_by_stream_name(stream_name);
-    CHECK_EXPECTED_AS_STATUS(vdma_channel_ptr_exp, "Failed to get vdma channel for output stream {}", stream_name);
 
-    auto input_stream = VdmaInputStreamBase::create(stream_params.stream_interface, *vdma_device, vdma_channel_ptr_exp.value(),
-        layer_info, stream_params, batch_size_exp.value(), m_core_op_activated_event);
-    CHECK_EXPECTED_AS_STATUS(input_stream);
-    m_input_streams.insert(make_pair(stream_name, input_stream.release()));
+    auto vdma_channel_ptr_exp = get_boundary_vdma_channel_by_stream_name(stream_name);
+    CHECK_EXPECTED(vdma_channel_ptr_exp, "Failed to get vdma channel for output stream {}", stream_name);
 
-    return HAILO_SUCCESS;
+    return VdmaInputStream::create(stream_params.stream_interface, *vdma_device, vdma_channel_ptr_exp.value(),
+        layer_info, m_core_op_activated_event);
 }
 
-hailo_status CoreOp::create_output_stream_from_config_params(Device &device,
+Expected<std::shared_ptr<OutputStreamBase>> CoreOp::create_output_stream_from_config_params(Device &device,
     const hailo_stream_parameters_t &stream_params, const std::string &stream_name)
 {
     auto layer_info = get_layer_info(stream_name);
-    CHECK_EXPECTED_AS_STATUS(layer_info);
+    CHECK_EXPECTED(layer_info);
 
-    CHECK(device.is_stream_interface_supported(stream_params.stream_interface), HAILO_INVALID_OPERATION,
+    CHECK_AS_EXPECTED(device.is_stream_interface_supported(stream_params.stream_interface), HAILO_INVALID_OPERATION,
         "Device does not supports the given stream interface streams. Please update input_stream_params for stream {}.",
         stream_name);
 
+    std::shared_ptr<OutputStreamBase> output_stream = nullptr;
     switch (stream_params.stream_interface) {
         case HAILO_STREAM_INTERFACE_PCIE:
             // Fallthrough
         case HAILO_STREAM_INTERFACE_INTEGRATED:
-            return create_vdma_output_stream(device, stream_name, layer_info.value(), stream_params);
-        
+            {
+                auto output_stream_exp = create_vdma_output_stream(device, stream_name, layer_info.value(), stream_params);
+                CHECK_EXPECTED(output_stream_exp);
+                output_stream = output_stream_exp.release();
+                break;
+            }
+
         case HAILO_STREAM_INTERFACE_ETH:
             {
-                auto output_stream =  EthernetOutputStream::create(device,
+                auto output_stream_exp =  EthernetOutputStream::create(device,
                     layer_info.value(), stream_params.eth_output_params, 
                     m_core_op_activated_event);
-                CHECK_EXPECTED_AS_STATUS(output_stream);
-                m_output_streams.insert(make_pair(stream_name, output_stream.release()));
-                return HAILO_SUCCESS;
+                CHECK_EXPECTED(output_stream_exp);
+                output_stream = output_stream_exp.release();
+                break;
             }
-        
+
         default:
             LOGGER__ERROR("{} interface is not supported.", stream_params.stream_interface);
-            return HAILO_NOT_IMPLEMENTED;
+            return make_unexpected(HAILO_NOT_IMPLEMENTED);
+    }
+
+    if (HAILO_FORMAT_ORDER_HAILO_NMS == layer_info->format.order) {
+        // In NMS we create some new stream object that wraps the original stream (and converts
+        // bbox/burst reads into frame reads).
+        // After HRT-10553 is implemented, we won't need this wrapper anymore.
+        auto base_stream = std::move(output_stream);
+
+        const auto batch_size = get_smallest_configured_batch_size(m_config_params);
+        const auto max_queue_size = batch_size * MAX_ACTIVE_TRANSFERS_SCALE;
+
+        auto nms_stream = NmsOutputStream::create(base_stream, layer_info.value(), max_queue_size,
+            m_core_op_activated_event);
+        CHECK_EXPECTED(nms_stream);
+        output_stream = nms_stream.release();
     }
+
+    return output_stream;
 }
 
-hailo_status CoreOp::create_vdma_output_stream(Device &device, const std::string &stream_name,
+Expected<std::shared_ptr<OutputStreamBase>> CoreOp::create_vdma_output_stream(Device &device, const std::string &stream_name,
     const LayerInfo &layer_info, const hailo_stream_parameters_t &stream_params)
 {
     // Make sure the downcast is safe
-    CHECK((Device::Type::INTEGRATED == device.get_type()) || (Device::Type::PCIE == device.get_type()),
+    CHECK_AS_EXPECTED((Device::Type::INTEGRATED == device.get_type()) || (Device::Type::PCIE == device.get_type()),
         HAILO_INTERNAL_FAILURE, "Invalid device type");
     VdmaDevice *vdma_device = reinterpret_cast<VdmaDevice*>(&device);
 
     auto batch_size_exp = get_stream_batch_size(stream_name);
-    CHECK_EXPECTED_AS_STATUS(batch_size_exp);
-    auto vdma_channel_ptr_exp = get_boundary_vdma_channel_by_stream_name(stream_name);
-    CHECK_EXPECTED_AS_STATUS(vdma_channel_ptr_exp, "Failed to get vdma channel for output stream {}", stream_name);
+    CHECK_EXPECTED(batch_size_exp);
 
-    auto output_stream = VdmaOutputStreamBase::create(stream_params.stream_interface, *vdma_device, vdma_channel_ptr_exp.value(),
-        layer_info, batch_size_exp.value(), stream_params, m_core_op_activated_event);
-    CHECK_EXPECTED_AS_STATUS(output_stream);
-    m_output_streams.insert(make_pair(stream_name, output_stream.release()));
+    auto vdma_channel_ptr_exp = get_boundary_vdma_channel_by_stream_name(stream_name);
+    CHECK_EXPECTED(vdma_channel_ptr_exp, "Failed to get vdma channel for output stream {}", stream_name);
 
-    return HAILO_SUCCESS;
+    return VdmaOutputStream::create(stream_params.stream_interface, *vdma_device, vdma_channel_ptr_exp.value(),
+        layer_info, m_core_op_activated_event);
 }
 
 hailo_status CoreOp::create_streams_from_config_params(Device &device)
@@ -383,17 +545,23 @@ hailo_status CoreOp::create_streams_from_config_params(Device &device)
         switch (stream_parameters_pair.second.direction) {
             case HAILO_H2D_STREAM:
                 {
-                    auto status = create_input_stream_from_config_params(device,
+                    auto stream = create_input_stream_from_config_params(device,
                         stream_parameters_pair.second,
                         stream_parameters_pair.first);
+                    CHECK_EXPECTED_AS_STATUS(stream);
+
+                    auto status = add_input_stream(stream.release(), stream_parameters_pair.second);
                     CHECK_SUCCESS(status);
                 }
                 break;
             case HAILO_D2H_STREAM:
                 {
-                    auto status = create_output_stream_from_config_params(device,
+                    auto stream = create_output_stream_from_config_params(device,
                         stream_parameters_pair.second,
                         stream_parameters_pair.first);
+                    CHECK_EXPECTED_AS_STATUS(stream);
+
+                    auto status = add_output_stream(stream.release(), stream_parameters_pair.second);
                     CHECK_SUCCESS(status);
                 }
                 break;
@@ -415,7 +583,7 @@ Expected<InputStreamRefVector> CoreOp::get_input_streams_by_network(const std::s
     for (auto &stream_info : input_stream_infos.value()) {
         auto stream_ref = get_input_stream_by_name(stream_info.name);
         CHECK_EXPECTED(stream_ref);
-        result.push_back(stream_ref.release());
+        result.emplace_back(stream_ref.release());
     }
     return result;
 }
@@ -429,7 +597,7 @@ Expected<OutputStreamRefVector> CoreOp::get_output_streams_by_network(const std:
     for (auto &stream_info : output_stream_infos.value()) {
         auto stream_ref = get_output_stream_by_name(stream_info.name);
         CHECK_EXPECTED(stream_ref);
-        result.push_back(stream_ref.release());
+        result.emplace_back(stream_ref.release());
     }
     return result;
 }
@@ -452,7 +620,7 @@ OutputStreamRefVector CoreOp::get_output_streams()
     return result;
 }
 
-ExpectedRef<InputStream> CoreOp::get_input_stream_by_name(const std::string& name)
+ExpectedRef<InputStreamBase> CoreOp::get_input_stream_by_name(const std::string& name)
 {
     auto iterator = m_input_streams.find(name);
     if (m_input_streams.end() == iterator) {
@@ -460,10 +628,10 @@ ExpectedRef<InputStream> CoreOp::get_input_stream_by_name(const std::string& nam
         return make_unexpected(HAILO_NOT_FOUND);
     }
 
-    return std::ref<InputStream>(*iterator->second);
+    return std::ref<InputStreamBase>(*iterator->second);
 }
 
-ExpectedRef<OutputStream> CoreOp::get_output_stream_by_name(const std::string& name)
+ExpectedRef<OutputStreamBase> CoreOp::get_output_stream_by_name(const std::string& name)
 {
     auto iterator = m_output_streams.find(name);
     if (m_output_streams.end() == iterator) {
@@ -471,7 +639,7 @@ ExpectedRef<OutputStream> CoreOp::get_output_stream_by_name(const std::string& n
         return make_unexpected(HAILO_NOT_FOUND);
     }
 
-    return std::ref<OutputStream>(*iterator->second);
+    return std::ref<OutputStreamBase>(*iterator->second);
 }
 
 std::vector<std::reference_wrapper<InputStream>> CoreOp::get_input_streams_by_interface(
@@ -480,7 +648,7 @@ std::vector<std::reference_wrapper<InputStream>> CoreOp::get_input_streams_by_in
     std::vector<std::reference_wrapper<InputStream>> results;
     for (auto &name_pair : m_input_streams) {
         if (stream_interface == name_pair.second->get_interface()) {
-            results.push_back(std::ref(*name_pair.second));
+            results.emplace_back(std::ref(*name_pair.second));
         }
     }
     return results;
@@ -492,7 +660,7 @@ std::vector<std::reference_wrapper<OutputStream>> CoreOp::get_output_streams_by_
     std::vector<std::reference_wrapper<OutputStream>> results;
     for (auto &name_pair : m_output_streams) {
         if (stream_interface == name_pair.second->get_interface()) {
-            results.push_back(std::ref(*name_pair.second));
+            results.emplace_back(std::ref(*name_pair.second));
         }
     }
     return results;
@@ -519,16 +687,16 @@ AccumulatorPtr CoreOp::get_deactivation_time_accumulator() const
     return m_deactivation_time_accumulator;
 }
 
-Expected<std::shared_ptr<InputStream>> CoreOp::get_shared_input_stream_by_name(const std::string &stream_name)
+Expected<std::shared_ptr<InputStreamBase>> CoreOp::get_shared_input_stream_by_name(const std::string &stream_name)
 {
-    CHECK_AS_EXPECTED(contains(m_input_streams, stream_name), HAILO_NOT_FOUND, "Input stream {} not found.");
+    CHECK_AS_EXPECTED(contains(m_input_streams, stream_name), HAILO_NOT_FOUND, "Input stream {} not found.", stream_name);
     auto stream_ptr = m_input_streams.at(stream_name);
     return stream_ptr;
 }
 
-Expected<std::shared_ptr<OutputStream>> CoreOp::get_shared_output_stream_by_name(const std::string &stream_name)
+Expected<std::shared_ptr<OutputStreamBase>> CoreOp::get_shared_output_stream_by_name(const std::string &stream_name)
 {
-    CHECK_AS_EXPECTED(contains(m_output_streams, stream_name), HAILO_NOT_FOUND, "Output stream {} not found.");
+    CHECK_AS_EXPECTED(contains(m_output_streams, stream_name), HAILO_NOT_FOUND, "Output stream {} not found.", stream_name);
     auto stream_ptr = m_output_streams.at(stream_name);
     return stream_ptr;
 }
index 3c00c19c0008bd1c9f57a821db29faf2c9a1c2cf..00f046ac4c1d2ad2574bafeb7d7792342fc4d3b4 100644 (file)
  *        |                  /                                |                                \                       |
  *        |         VdmaConfigCoreOp                     VDeviceCoreOp                   HcpConfigCoreOp               |  (Actual implementations)
  *        |                                                   |                                                        |
- *        |                                                   |                                                        |  
- *        |                                        vector of VdmaConfigCoreOp                                          |
- *        -------------------------------------------------------------------------------------------------------------|
- *        |                                             ActivatedCoreOp                                                |  (Base classes)
- *        |                 __________________________________|_____________________________________                   |
- *        |                /                                  |                                     \                  |
- *        |    VdmaConfigActivatedCoreOp            VDeviceActivatedCoreOp                 HcpConfigActivatedCoreOp    |  (Actual implementations)
  *        |                                                   |                                                        |
- *        |                                                   |                                                        |  
- *        |                                  vector of VdmaConfigActivatedCoreOp                                       |
+ *        |                                        vector of VdmaConfigCoreOp                                          |
  *        --------------------------------------------------------------------------------------------------------------
  **/
 
 #include "control_protocol.h"
 #include "vdma/channel/boundary_channel.hpp"
 #include "core_op/active_core_op_holder.hpp"
+#include "stream_common/stream_internal.hpp"
 
 
 namespace hailort
 {
 /** Represents a vector of InputStream ptrs */
-using InputStreamPtrVector = std::vector<std::shared_ptr<InputStream>>;
+using InputStreamPtrVector = std::vector<std::shared_ptr<InputStreamBase>>;
 
 /** Represents a vector of OutputStream ptrs */
-using OutputStreamPtrVector = std::vector<std::shared_ptr<OutputStream>>;
-
-// ActivatedCoreOp is created with `hailo_activate_network_group_params_t` for legacy reasons.
-// Currently hailo_activate_network_group_params_t is an empty struct holder,
-// when adding parameters to it, consider `hailo_activate_network_group_params_t` should hold one core op in this case.
-class ActivatedCoreOp : public ActivatedNetworkGroup
-{
-public:
-    virtual ~ActivatedCoreOp() = default;
-    ActivatedCoreOp(const ActivatedCoreOp &other) = delete;
-    ActivatedCoreOp &operator=(const ActivatedCoreOp &other) = delete;
-    ActivatedCoreOp &operator=(ActivatedCoreOp &&other) = delete;
-    ActivatedCoreOp(ActivatedCoreOp &&other) = default;
-
-    virtual uint32_t get_invalid_frames_count() override;
-
-protected:
-    hailo_activate_network_group_params_t m_network_group_params;
-
-    ActivatedCoreOp(const hailo_activate_network_group_params_t &network_group_params,
-        std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
-        std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,         
-        EventPtr &&core_op_activated_event, hailo_status &status);
-
-    EventPtr m_core_op_activated_event;
-    std::map<std::string, std::shared_ptr<InputStream>> &m_input_streams;
-    std::map<std::string, std::shared_ptr<OutputStream>> &m_output_streams;
-
-private:
-    hailo_status validate_network_group_params(const hailo_activate_network_group_params_t &network_group_params);
-};
-
+using OutputStreamPtrVector = std::vector<std::shared_ptr<OutputStreamBase>>;
 
 class CoreOp
 {
@@ -90,10 +52,6 @@ public:
         return m_metadata;
     }
 
-    Expected<std::unique_ptr<ActivatedNetworkGroup>> activate_with_batch(
-        uint16_t dynamic_batch_size = CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE,
-        bool resume_pending_stream_transfers = false);
-    virtual Expected<std::unique_ptr<ActivatedNetworkGroup>> activate(const hailo_activate_network_group_params_t &network_group_params);
     virtual hailo_status wait_for_activation(const std::chrono::milliseconds &timeout);
 
     virtual const std::string &name() const;
@@ -110,13 +68,15 @@ public:
     virtual OutputStreamRefVector get_output_streams();
     virtual std::vector<std::reference_wrapper<InputStream>> get_input_streams_by_interface(hailo_stream_interface_t stream_interface);
     virtual std::vector<std::reference_wrapper<OutputStream>> get_output_streams_by_interface(hailo_stream_interface_t stream_interface);
-    virtual ExpectedRef<InputStream> get_input_stream_by_name(const std::string& name);
-    virtual ExpectedRef<OutputStream> get_output_stream_by_name(const std::string& name);
+    virtual ExpectedRef<InputStreamBase> get_input_stream_by_name(const std::string& name);
+    virtual ExpectedRef<OutputStreamBase> get_output_stream_by_name(const std::string& name);
     virtual Expected<LatencyMeasurementResult> get_latency_measurement(const std::string &network_name="");
 
+    hailo_status activate(uint16_t dynamic_batch_size = CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE);
+    hailo_status deactivate();
 
-    virtual hailo_status activate_impl(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers = false) = 0;
-    virtual hailo_status deactivate_impl(bool keep_nn_config_during_reset = false) = 0;
+    virtual hailo_status activate_impl(uint16_t dynamic_batch_size = CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE) = 0;
+    virtual hailo_status deactivate_impl() = 0;
 
     virtual Expected<std::vector<hailo_stream_info_t>> get_all_stream_infos(const std::string &network_name="") const;
 
@@ -130,9 +90,17 @@ public:
 
     const SupportedFeatures &get_supported_features();
     Expected<uint16_t> get_stream_batch_size(const std::string &stream_name);
+    bool is_default_batch_size() const;
+
+    virtual Expected<Buffer> get_intermediate_buffer(const IntermediateBufferKey &key);
+
+    hailo_status wrap_streams_for_remote_process();
+
+    void set_vdevice_core_op_handle(vdevice_core_op_handle_t handle) { m_vdevice_core_op_handle = handle;}
+    vdevice_core_op_handle_t vdevice_core_op_handle() { return m_vdevice_core_op_handle;}
 
-    std::map<std::string, std::shared_ptr<InputStream>> m_input_streams;
-    std::map<std::string, std::shared_ptr<OutputStream>> m_output_streams;
+    std::map<std::string, std::shared_ptr<InputStreamBase>> m_input_streams;
+    std::map<std::string, std::shared_ptr<OutputStreamBase>> m_output_streams;
 
     // This function is called when a user is creating VStreams and is only relevant for VDeviceCoreOp.
     // In case a user is using VdmaConfigCoreOp or HcpConfigCoreOp this function should do nothing.
@@ -142,42 +110,47 @@ public:
     }
 
 protected:
-    CoreOp(const ConfigureNetworkParams &config_params, std::shared_ptr<CoreOpMetadata> metadata, hailo_status &status);
+    CoreOp(const ConfigureNetworkParams &config_params, std::shared_ptr<CoreOpMetadata> metadata,
+        ActiveCoreOpHolder &active_core_op_holder, hailo_status &status);
 
-    virtual Expected<std::unique_ptr<ActivatedNetworkGroup>> create_activated_network_group(
-        const hailo_activate_network_group_params_t &network_group_params, uint16_t dynamic_batch_size, bool resume_pending_stream_transfers) = 0;
-
-    hailo_status create_output_stream_from_config_params(Device &device,
+    Expected<std::shared_ptr<OutputStreamBase>> create_output_stream_from_config_params(Device &device,
         const hailo_stream_parameters_t &stream_params, const std::string &stream_name);
-    hailo_status create_input_stream_from_config_params(Device &device,
+    Expected<std::shared_ptr<InputStreamBase>> create_input_stream_from_config_params(Device &device,
         const hailo_stream_parameters_t &stream_params, const std::string &stream_name);
 
-    hailo_status activate_low_level_streams(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers);
+    hailo_status activate_low_level_streams();
     hailo_status deactivate_low_level_streams();
 
     Expected<LayerInfo> get_layer_info(const std::string &stream_name);
+    bool is_nms();
+
+    hailo_status add_input_stream(std::shared_ptr<InputStreamBase> &&stream,
+        const hailo_stream_parameters_t &stream_params);
+    hailo_status add_output_stream(std::shared_ptr<OutputStreamBase> &&stream,
+        const hailo_stream_parameters_t &stream_params);
 
     virtual Expected<std::shared_ptr<LatencyMetersMap>> get_latency_meters() = 0;
     virtual Expected<vdma::BoundaryChannelPtr> get_boundary_vdma_channel_by_stream_name(const std::string &stream_name) = 0;
+    static uint16_t get_smallest_configured_batch_size(const ConfigureNetworkParams &config_params);
 
+private:
     const ConfigureNetworkParams m_config_params;
+    ActiveCoreOpHolder &m_active_core_op_holder;
     const uint16_t m_min_configured_batch_size; // TODO: remove after HRT-6535
     EventPtr m_core_op_activated_event;
     AccumulatorPtr m_activation_time_accumulator;
     AccumulatorPtr m_deactivation_time_accumulator;
     std::shared_ptr<CoreOpMetadata> m_metadata;
+    vdevice_core_op_handle_t m_vdevice_core_op_handle;
 
-private:
-    static uint16_t get_smallest_configured_batch_size(const ConfigureNetworkParams &config_params);
-    hailo_status create_vdma_input_stream(Device &device, const std::string &stream_name,
+    Expected<std::shared_ptr<InputStreamBase>> create_vdma_input_stream(Device &device, const std::string &stream_name,
         const LayerInfo &layer_info, const hailo_stream_parameters_t &stream_params);
-    hailo_status create_vdma_output_stream(Device &device, const std::string &stream_name,
+    Expected<std::shared_ptr<OutputStreamBase>> create_vdma_output_stream(Device &device, const std::string &stream_name,
         const LayerInfo &layer_info, const hailo_stream_parameters_t &stream_params);
-    Expected<std::shared_ptr<InputStream>> get_shared_input_stream_by_name(const std::string &stream_name);
-    Expected<std::shared_ptr<OutputStream>> get_shared_output_stream_by_name(const std::string &stream_name);
+    Expected<std::shared_ptr<InputStreamBase>> get_shared_input_stream_by_name(const std::string &stream_name);
+    Expected<std::shared_ptr<OutputStreamBase>> get_shared_output_stream_by_name(const std::string &stream_name);
 
     friend class VDeviceCoreOp; // VDeviceCoreOp is using protected members and functions from other CoreOps objects
-    friend class VDeviceActivatedCoreOp; // VDeviceActivatedCoreOp is calling CoreOp's protected function `create_activated_network_group`
     friend class ConfiguredNetworkGroupBase;
 };
 
index 2f80a4855e7ade3c06b83aaf9f717da1865f060d..89dea84744ef15e7d76b6bef0288684bfe63132e 100644 (file)
@@ -14,7 +14,6 @@
 
 #include "vdma/memory/descriptor_list.hpp"
 #include "vdma/channel/channel_id.hpp"
-#include "vdma/channel/boundary_channel.hpp"
 #include "hef/layer_info.hpp"
 
 #include <array>
index 62569115173b06064356c3b7ccc40567b2821fc9..12385b5182bcbe02e9442a69dfa1d6356796de42 100644 (file)
@@ -142,8 +142,9 @@ Expected<std::unique_ptr<vdma::VdmaBuffer>> ConfigBuffer::create_sg_buffer(Hailo
     // For config channels (In Hailo15), the page size must be a multiplication of host default page size.
     // Therefore we use the flag force_default_page_size for those types of buffers.
     auto const FORCE_DEFAULT_PAGE_SIZE = true;
+    auto const FORCE_BATCH_SIZE = true;
     auto buffer_size_requirements = vdma::BufferSizesRequirements::get_sg_buffer_requirements_multiple_transfers(
-        driver.desc_max_page_size(), 1, cfg_sizes, NOT_CIRCULAR, FORCE_DEFAULT_PAGE_SIZE);
+        driver.desc_max_page_size(), 1, cfg_sizes, NOT_CIRCULAR, FORCE_DEFAULT_PAGE_SIZE, FORCE_BATCH_SIZE);
     CHECK_EXPECTED(buffer_size_requirements);
     const auto page_size = buffer_size_requirements->desc_page_size();
     const auto descs_count = buffer_size_requirements->descs_count();
index 2023de1fcf742ed7e169259175162b3ef49ac77c..3329ca56d3eb9c0637773cd7e85c9e3e838dc8cf 100644 (file)
@@ -59,34 +59,7 @@ Expected<IntermediateBuffer> IntermediateBuffer::create(HailoRTDriver &driver, u
         CHECK_EXPECTED(desc_count_local);
     }
 
-    return IntermediateBuffer(std::move(buffer_ptr), transfer_size, max_batch_size, streaming_type);
-}
-
-hailo_status IntermediateBuffer::set_dynamic_batch_size(uint16_t batch_size)
-{
-    if (m_streaming_type == StreamingType::CIRCULAR_CONTINUOS) {
-        // The buffer pattern does not depend on the batch for circular continuous buffers.
-        return HAILO_SUCCESS;
-    }
-
-    CHECK(batch_size <= m_max_batch_size, HAILO_INVALID_ARGUMENT,
-        "batch_size ({}) must be <= than m_max_batch_size ({})",
-        batch_size, m_max_batch_size);
-
-    LOGGER__TRACE("Setting intermediate buffer's batch_size to {}", batch_size);
-    const auto prev_batch_size = m_dynamic_batch_size;
-    m_dynamic_batch_size = batch_size;
-
-    auto status = m_buffer->reprogram_device_interrupts_for_end_of_batch(m_transfer_size, prev_batch_size,
-        vdma::InterruptsDomain::NONE);
-    CHECK_SUCCESS(status, "Failed reprogramming device interrupts for the end of the previous batch (size {})",
-        prev_batch_size);
-    status = m_buffer->reprogram_device_interrupts_for_end_of_batch(m_transfer_size, m_dynamic_batch_size,
-        vdma::InterruptsDomain::DEVICE);
-    CHECK_SUCCESS(status, "Failed reprogramming device interrupts for the end of the current batch (size {})",
-        m_dynamic_batch_size);
-
-    return HAILO_SUCCESS;
+    return IntermediateBuffer(std::move(buffer_ptr), transfer_size, max_batch_size);
 }
 
 Expected<Buffer> IntermediateBuffer::read()
@@ -109,11 +82,9 @@ CONTROL_PROTOCOL__host_buffer_info_t IntermediateBuffer::get_host_buffer_info()
 }
 
 IntermediateBuffer::IntermediateBuffer(std::unique_ptr<vdma::VdmaBuffer> &&buffer, uint32_t transfer_size,
-                                       uint16_t batch_size, StreamingType streaming_type) :
+                                       uint16_t batch_size) :
     m_buffer(std::move(buffer)),
     m_transfer_size(transfer_size),
-    m_max_batch_size(batch_size),
-    m_streaming_type(streaming_type),
     m_dynamic_batch_size(batch_size)
 {}
 
@@ -121,8 +92,10 @@ Expected<std::unique_ptr<vdma::VdmaBuffer>> IntermediateBuffer::create_sg_buffer
     uint32_t transfer_size, uint16_t batch_size, vdma::ChannelId d2h_channel_id, bool is_circular)
 {
     auto const DONT_FORCE_DEFAULT_PAGE_SIZE = false;
+    auto const FORCE_BATCH_SIZE = true;
     auto buffer_requirements = vdma::BufferSizesRequirements::get_sg_buffer_requirements_single_transfer(
-        driver.desc_max_page_size(), batch_size, batch_size, transfer_size, is_circular, DONT_FORCE_DEFAULT_PAGE_SIZE);
+        driver.desc_max_page_size(), batch_size, batch_size, transfer_size, is_circular, DONT_FORCE_DEFAULT_PAGE_SIZE,
+        FORCE_BATCH_SIZE);
     CHECK_EXPECTED(buffer_requirements);
     const auto desc_page_size = buffer_requirements->desc_page_size();
     const auto descs_count = buffer_requirements->descs_count();
index 0d4decadc6d25d060ed23cccac7e9acb0cf8442b..aebf2ab511f1a72beda8d0a7b2b76a471b563e97 100644 (file)
@@ -36,13 +36,11 @@ public:
     static Expected<IntermediateBuffer> create(HailoRTDriver &driver, uint32_t transfer_size,
         uint16_t max_batch_size, vdma::ChannelId d2h_channel_id, StreamingType streaming_type);
 
-    hailo_status set_dynamic_batch_size(uint16_t batch_size);
     Expected<Buffer> read();
     CONTROL_PROTOCOL__host_buffer_info_t get_host_buffer_info() const;
 
 private:
-    IntermediateBuffer(std::unique_ptr<vdma::VdmaBuffer> &&buffer, uint32_t transfer_size, uint16_t batch_size,
-        StreamingType streaming_type);
+    IntermediateBuffer(std::unique_ptr<vdma::VdmaBuffer> &&buffer, uint32_t transfer_size, uint16_t batch_size);
 
     static Expected<std::unique_ptr<vdma::VdmaBuffer>> create_sg_buffer(HailoRTDriver &driver,
         uint32_t transfer_size, uint16_t batch_size, vdma::ChannelId d2h_channel_id, bool is_circular);
@@ -55,8 +53,6 @@ private:
 
     std::unique_ptr<vdma::VdmaBuffer> m_buffer;
     const uint32_t m_transfer_size;
-    const uint16_t m_max_batch_size;
-    const StreamingType m_streaming_type;
     uint16_t m_dynamic_batch_size;
 };
 
index be3ac4c57c15c758cf9c04d19f9698f60c6d53e6..db8bc7e4dcf761e9458f3e49366e2fdd5b9c87c2 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <numeric>
 
+#define HAILO15H_NMS_MAX_CLASSES (1024)
 
 namespace hailort
 {
@@ -104,6 +105,18 @@ Expected<EdgeLayer> ContextResources::get_edge_layer_by_stream_index(const uint8
     return make_unexpected(HAILO_INTERNAL_FAILURE);
 }
 
+Expected<EdgeLayer> ContextResources::get_edge_layer_by_channel_id(const vdma::ChannelId channel_id) const
+{
+    for (const auto &edge_layer : m_edge_layers) {
+        if (channel_id == edge_layer.channel_id) {
+            return EdgeLayer(edge_layer);
+        }
+    }
+
+    LOGGER__ERROR("Edge layer does not exists for channel id {}", channel_id);
+    return make_unexpected(HAILO_INTERNAL_FAILURE);
+}
+
 Expected<DdrChannelsInfo> ContextResources::get_ddr_channels_info(uint8_t d2h_stream_index) const
 {
     for (const auto &ddr_channels_info : m_ddr_channels_infos) {
@@ -183,7 +196,10 @@ static Expected<LatencyMeterPtr> create_hw_latency_meter(const std::vector<Layer
         return make_unexpected(HAILO_INVALID_OPERATION);
     }
 
-    return make_shared_nothrow<LatencyMeter>(d2h_channel_names, MAX_IRQ_TIMESTAMPS_SIZE);
+    auto res = make_shared_nothrow<LatencyMeter>(d2h_channel_names, MAX_IRQ_TIMESTAMPS_SIZE);
+    CHECK_NOT_NULL_AS_EXPECTED(res, HAILO_OUT_OF_HOST_MEMORY);
+
+    return res;
 }
 
 static Expected<LatencyMetersMap> create_latency_meters_from_config_params( 
@@ -363,22 +379,21 @@ void ResourcesManager::process_interrupts(IrqData &&irq_data)
     }
 }
 
-// TODO: after adding NMS single int, we can create an async channel for async nms output stream (HRT-10553)
-Expected<vdma::BoundaryChannel::Type> ResourcesManager::get_boundary_vdma_channel_type(const LayerInfo &layer_info)
+Expected<uint16_t> ResourcesManager::get_batch_size() const
 {
-    CHECK_AS_EXPECTED(contains(m_config_params.stream_params_by_name, layer_info.name), HAILO_INVALID_ARGUMENT,
-        "Can't find stream params for layer {}", layer_info.name);
-    const auto async_stream = (0 != (m_config_params.stream_params_by_name.at(layer_info.name).flags & HAILO_STREAM_FLAGS_ASYNC));
-    if (async_stream) {
-        // NMS async streams use buffered channels
-        if (layer_info.format.order == HAILO_FORMAT_ORDER_HAILO_NMS) {
-            return vdma::BoundaryChannel::Type::BUFFERED;
+    uint16_t batch_size = UINT16_MAX;
+    for (auto const &network_map : m_config_params.network_params_by_name) {
+        auto const network_name_from_params = network_map.first;
+
+        if (UINT16_MAX == batch_size) {
+            batch_size = network_map.second.batch_size;
+        } else {
+            CHECK_AS_EXPECTED(batch_size == network_map.second.batch_size, HAILO_INVALID_OPERATION,
+                "The same batch size must be applied to all networks inside the network group");
         }
-        // Non-nms async streams use async channels
-        return vdma::BoundaryChannel::Type::ASYNC;
     }
-    // Buffered streams => buffered channels
-    return vdma::BoundaryChannel::Type::BUFFERED;
+    return batch_size;
+
 }
 
 hailo_status ResourcesManager::create_boundary_vdma_channel(const LayerInfo &layer_info)
@@ -399,6 +414,16 @@ hailo_status ResourcesManager::create_boundary_vdma_channel(const LayerInfo &lay
     const auto max_active_transfers_scale = (layer_info.format.order == HAILO_FORMAT_ORDER_HAILO_NMS) ?
         (nms_max_detections_per_frame * MAX_ACTIVE_TRANSFERS_SCALE) : MAX_ACTIVE_TRANSFERS_SCALE;
 
+    auto device_arch = m_vdma_device.get_architecture();
+    CHECK_EXPECTED_AS_STATUS(device_arch);
+    /* Add error in configure phase for invalid NMS parameters */
+    if (layer_info.format.order == HAILO_FORMAT_ORDER_HAILO_NMS && 
+        (device_arch.value() == HAILO_ARCH_HAILO15H || device_arch.value() == HAILO_ARCH_PLUTO)) {
+        CHECK(layer_info.nms_info.number_of_classes * layer_info.nms_info.chunks_per_frame * network_batch_size.value() < HAILO15H_NMS_MAX_CLASSES, 
+        HAILO_INVALID_ARGUMENT, "Invalid NMS parameters. Number of classes ({}) * division factor ({}) * batch size ({}) must be under {}",
+        layer_info.nms_info.number_of_classes, layer_info.nms_info.chunks_per_frame, network_batch_size.value(), HAILO15H_NMS_MAX_CLASSES);
+    }
+
     const auto min_active_trans = MIN_ACTIVE_TRANSFERS_SCALE * network_batch_size.value();
     const auto max_active_trans = (layer_info.format.order == HAILO_FORMAT_ORDER_HAILO_NMS) ?
         /* NMS Case - Value be be higher than UINT16_MAX. in this case we only limit to UART16_MAX with no error */
@@ -415,21 +440,25 @@ hailo_status ResourcesManager::create_boundary_vdma_channel(const LayerInfo &lay
     /* TODO - HRT-6829- page_size should be calculated inside the vDMA channel class create function */
     static const bool IS_CIRCULAR = true;
     const auto transfer_size = LayerInfoUtils::get_layer_transfer_size(layer_info);
-    
-    auto const DONT_FORCE_DEFAULT_PAGE_SIZE = false;
+
+    const auto DONT_FORCE_DEFAULT_PAGE_SIZE = false;
+    const auto DONT_FORCE_BATCH_SIZE = false;
+    // Hack to reduce max page size if the driver page size is equal to stream size. 
+    // In this case page size == stream size is invalid solution. 
+    // TODO - remove this WA after HRT-11747
+    const uint16_t max_page_size = (m_driver.desc_max_page_size() == layer_info.max_shmifo_size) ?
+        (m_driver.desc_max_page_size() / 2) : m_driver.desc_max_page_size();
     auto buffer_sizes_requirements = vdma::BufferSizesRequirements::get_sg_buffer_requirements_single_transfer(
-        m_driver.desc_max_page_size(), static_cast<uint16_t>(min_active_trans), static_cast<uint16_t>(max_active_trans),
-        transfer_size, IS_CIRCULAR, DONT_FORCE_DEFAULT_PAGE_SIZE);
+        max_page_size, static_cast<uint16_t>(min_active_trans), static_cast<uint16_t>(max_active_trans),
+        transfer_size, IS_CIRCULAR, DONT_FORCE_DEFAULT_PAGE_SIZE, DONT_FORCE_BATCH_SIZE);
     CHECK_EXPECTED_AS_STATUS(buffer_sizes_requirements);
 
     const auto page_size = buffer_sizes_requirements->desc_page_size();
     const auto descs_count = (nullptr != std::getenv("HAILO_CONFIGURE_FOR_HW_INFER")) ?
         MAX_DESCS_COUNT : buffer_sizes_requirements->descs_count();
 
-    auto channel_type = get_boundary_vdma_channel_type(layer_info);
-    CHECK_EXPECTED_AS_STATUS(channel_type);
     auto channel = vdma::BoundaryChannel::create(channel_id.value(), channel_direction, m_driver, descs_count, page_size,
-        layer_info.name, latency_meter, network_batch_size.value(), channel_type.release());
+        layer_info.name, latency_meter);
     CHECK_EXPECTED_AS_STATUS(channel);
 
     m_boundary_channels.emplace(channel_id.value(), channel.release());
@@ -541,21 +570,6 @@ Expected<hailo_stream_interface_t> ResourcesManager::get_default_streams_interfa
     return m_vdma_device.get_default_streams_interface();
 }
 
-hailo_status ResourcesManager::set_dynamic_batch_size(uint16_t dynamic_batch_size)
-{
-    if (CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE == dynamic_batch_size) {
-        LOGGER__TRACE("Received CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE == batch_size");
-        return HAILO_SUCCESS;
-    }
-
-    for (auto &key_buff_pair : m_intermediate_buffers) {
-        const auto status = key_buff_pair.second.set_dynamic_batch_size(dynamic_batch_size);
-        CHECK_SUCCESS(status);
-    }
-
-    return HAILO_SUCCESS;
-}
-
 Expected<uint16_t> ResourcesManager::get_network_batch_size(const std::string &network_name) const
 {
     for (auto const &network_map : m_config_params.network_params_by_name) {
@@ -607,12 +621,12 @@ hailo_status ResourcesManager::enable_state_machine(uint16_t dynamic_batch_size,
     return Control::enable_core_op(m_vdma_device, m_core_op_index, dynamic_batch_size, batch_count);
 }
 
-hailo_status ResourcesManager::reset_state_machine(bool keep_nn_config_during_reset)
+hailo_status ResourcesManager::reset_state_machine()
 {
-    auto status = Control::reset_context_switch_state_machine(m_vdma_device, keep_nn_config_during_reset);
+    auto status = Control::reset_context_switch_state_machine(m_vdma_device);
     CHECK_SUCCESS(status);
 
-    if (!keep_nn_config_during_reset && (Device::Type::INTEGRATED == m_vdma_device.get_type())) {
+    if (Device::Type::INTEGRATED == m_vdma_device.get_type()) {
         // On core device, the nn_manager is not responsible to reset the nn-core so
         // we use the SCU control for that.
         status = m_vdma_device.reset(HAILO_RESET_DEVICE_MODE_NN_CORE);
@@ -622,18 +636,10 @@ hailo_status ResourcesManager::reset_state_machine(bool keep_nn_config_during_re
     return HAILO_SUCCESS;
 }
 
-hailo_status ResourcesManager::cancel_pending_async_transfers()
+hailo_status ResourcesManager::cancel_pending_transfers()
 {
     for (const auto &boundary_channel : m_boundary_channels) {
-        if (boundary_channel.second->type() != vdma::BoundaryChannel::Type::ASYNC) {
-            continue;
-        }
-
-        // Best effort
-        const auto status = boundary_channel.second->cancel_pending_transfers();
-        if (HAILO_SUCCESS != status) {
-            LOGGER__ERROR("Failed cancellation of pending transfers on async channel {}", boundary_channel.second->stream_name());
-        }
+        boundary_channel.second->cancel_pending_transfers();
     }
     return HAILO_SUCCESS;
 }
@@ -685,11 +691,11 @@ Expected<std::pair<vdma::ChannelId, uint16_t>> ResourcesManager::create_mapped_b
     vdma::BoundaryChannelPtr boundary_channel_ptr, const HailoRTDriver::DmaDirection direction,
     const uint32_t single_transfer_size, const uint16_t dynamic_batch_size, const uint16_t batch_count)
 {
-    auto total_frames_per_run = dynamic_batch_size * batch_count;
-    auto total_run_transfer_size = total_frames_per_run * single_transfer_size;
+    const auto total_frames_per_run = dynamic_batch_size * batch_count;
 
     auto desc_list = boundary_channel_ptr->get_desc_list();
-    auto total_desc_count = desc_list->descriptors_in_buffer(total_run_transfer_size);
+    const auto descs_per_transfer = desc_list->descriptors_in_buffer(single_transfer_size);
+    const auto total_desc_count = total_frames_per_run * descs_per_transfer;
 
     CHECK_AS_EXPECTED(IS_FIT_IN_UINT16(total_desc_count), HAILO_INVALID_ARGUMENT,
         "calculated total_desc_count for vdma descriptor list is out of UINT16 range");
@@ -704,6 +710,7 @@ Expected<std::pair<vdma::ChannelId, uint16_t>> ResourcesManager::create_mapped_b
 
     auto desc_programed = program_desc_for_hw_only_flow(desc_list, single_transfer_size, dynamic_batch_size, batch_count);
     CHECK_EXPECTED(desc_programed);
+    assert(static_cast<uint16_t>(total_desc_count) == desc_programed.value());
 
     auto channel_info_pair = std::make_pair(boundary_channel_ptr->get_channel_id(), desc_programed.release());
 
@@ -747,14 +754,16 @@ Expected<uint16_t> ResourcesManager::calc_hw_infer_batch_count(uint16_t dynamic_
 {
     uint16_t batch_count = UINT16_MAX;
     for (const auto &layer_info : m_core_op_metadata->get_all_layer_infos()) {
-        const auto stream_info = LayerInfoUtils::get_stream_info_from_layer_info(layer_info);
-        uint32_t single_transfer_size = LayerInfoUtils::get_stream_transfer_size(stream_info, layer_info);
-        auto boundary_channel_ptr_exp = get_boundary_vdma_channel_by_stream_name(layer_info.name);
-        CHECK_EXPECTED(boundary_channel_ptr_exp);
-        auto boundary_channel_ptr = boundary_channel_ptr_exp.release();
-        const auto max_batch_transfers = boundary_channel_ptr->get_desc_list()->max_transfers(single_transfer_size * dynamic_batch_size);
-        // infer batch count is the lowest number of "Max transfers" per descriptor list that for all given boundary channels.
-        batch_count = MIN(batch_count, max_batch_transfers);
+        const auto &stream_infos = LayerInfoUtils::get_stream_infos_from_layer_info(layer_info);
+        for (auto &stream_info : stream_infos) {
+            uint32_t single_transfer_size = LayerInfoUtils::get_stream_transfer_size(stream_info, layer_info);
+            auto boundary_channel_ptr_exp = get_boundary_vdma_channel_by_stream_name(layer_info.name);
+            CHECK_EXPECTED(boundary_channel_ptr_exp);
+            auto boundary_channel_ptr = boundary_channel_ptr_exp.release();
+            const auto max_batch_transfers = boundary_channel_ptr->get_desc_list()->max_transfers(single_transfer_size * dynamic_batch_size);
+            // infer batch count is the lowest number of "Max transfers" per descriptor list that for all given boundary channels.
+            batch_count = MIN(batch_count, max_batch_transfers);
+        }
     }
     return batch_count;
 }
@@ -792,23 +801,28 @@ Expected<HwInferResults> ResourcesManager::run_hw_only_infer()
     channels_info.channel_count = 0;
     static constexpr auto INFER_TIMEOUT = std::chrono::milliseconds(120000);
 
-    auto batch_count = calc_hw_infer_batch_count(m_config_params.batch_size);
+    auto batch_size = get_batch_size();
+    CHECK_EXPECTED(batch_size);
+
+    auto batch_count = calc_hw_infer_batch_count(*batch_size);
     CHECK_EXPECTED(batch_count);
 
     for (const auto &layer_info : m_core_op_metadata->get_all_layer_infos()) {
         auto boundary_channel_ptr = get_boundary_vdma_channel_by_stream_name(layer_info.name);
         CHECK_EXPECTED(boundary_channel_ptr);
-        auto stream_info = LayerInfoUtils::get_stream_info_from_layer_info(layer_info);
-        auto single_transfer_size = (HAILO_FORMAT_ORDER_HAILO_NMS == stream_info.format.order) ?
-            stream_info.nms_info.bbox_size : stream_info.hw_frame_size;
-        const auto direction = (layer_info.direction == HAILO_H2D_STREAM) ?
-            HailoRTDriver::DmaDirection::H2D : HailoRTDriver::DmaDirection::D2H;
-
-        auto channel_info_pair = create_mapped_buffer_for_hw_only_infer(boundary_channel_ptr.release(), direction,
-            single_transfer_size, m_config_params.batch_size, batch_count.value());
-        CHECK_EXPECTED(channel_info_pair);
-
-        add_channel_to_hw_infer_channel_info(channel_info_pair.release(), channels_info);
+        const auto &stream_infos = LayerInfoUtils::get_stream_infos_from_layer_info(layer_info);
+        for (auto &stream_info : stream_infos) {
+            auto single_transfer_size = (HAILO_FORMAT_ORDER_HAILO_NMS == stream_info.format.order) ?
+                stream_info.nms_info.bbox_size : stream_info.hw_frame_size;
+            const auto direction = (layer_info.direction == HAILO_H2D_STREAM) ?
+                HailoRTDriver::DmaDirection::H2D : HailoRTDriver::DmaDirection::D2H;
+
+            auto channel_info_pair = create_mapped_buffer_for_hw_only_infer(boundary_channel_ptr.release(), direction,
+                single_transfer_size, *batch_size, batch_count.value());
+            CHECK_EXPECTED(channel_info_pair);
+
+            add_channel_to_hw_infer_channel_info(channel_info_pair.release(), channels_info);
+        }
     }
 
     std::condition_variable infer_done_cond;
@@ -818,7 +832,7 @@ Expected<HwInferResults> ResourcesManager::run_hw_only_infer()
     std::mutex mutex;
     std::unique_lock<std::mutex> lock(mutex);
 
-    status = Control::start_hw_only_infer(m_vdma_device, m_core_op_index, m_config_params.batch_size,
+    status = Control::start_hw_only_infer(m_vdma_device, m_core_op_index, *batch_size,
         batch_count.value(), &channels_info);
     CHECK_SUCCESS_AS_EXPECTED(status);
 
@@ -830,7 +844,7 @@ Expected<HwInferResults> ResourcesManager::run_hw_only_infer()
     auto single_frame_transfer_size = m_core_op_metadata->get_total_transfer_size();
     CHECK_EXPECTED(single_frame_transfer_size);
 
-    return hw_infer_calc_stats(batch_count.value(), m_config_params.batch_size, single_frame_transfer_size.release(),
+    return hw_infer_calc_stats(batch_count.value(), *batch_size, single_frame_transfer_size.release(),
         fw_infer_results.infer_cycles);
 }
 
index 3a6b4db06b41c0cdb6f601e426107f1e415f9be7..e18752941cd3afdcc9b30daa7a1863dd2a7721f5 100644 (file)
@@ -41,7 +41,6 @@ namespace hailort
 {
 
 #define DEFAULT_ACTUAL_BATCH_SIZE (1)
-#define MAX_NUMBER_DATA_STREAM_INDEX (20)
 
 
 struct EdgeLayer {
@@ -102,6 +101,7 @@ public:
 
     Expected<EdgeLayer> get_edge_layer_by_stream_index(const uint8_t stream_index,
         const hailo_stream_direction_t direction) const;
+    Expected<EdgeLayer> get_edge_layer_by_channel_id(const vdma::ChannelId channel_id) const;
 
     Expected<DdrChannelsInfo> get_ddr_channels_info(uint8_t d2h_stream_index) const;
     const std::vector<DdrChannelsInfo> &get_ddr_channels_infos() const;
@@ -145,7 +145,6 @@ public:
         uint8_t src_stream_index, uint8_t src_context_index, vdma::ChannelId d2h_channel_id,
         IntermediateBuffer::StreamingType streaming_type);
     ExpectedRef<IntermediateBuffer> get_intermediate_buffer(const IntermediateBufferKey &key);
-    Expected<vdma::BoundaryChannel::Type> get_boundary_vdma_channel_type(const LayerInfo &layer_info);
     hailo_status create_boundary_vdma_channel(const LayerInfo &layer_info);
 
     Expected<CONTROL_PROTOCOL__application_header_t> get_control_core_op_header();
@@ -186,12 +185,11 @@ public:
 
     Expected<Buffer> read_intermediate_buffer(const IntermediateBufferKey &key);
 
-    hailo_status set_dynamic_batch_size(uint16_t dynamic_batch_size);
     hailo_status configure();
     hailo_status enable_state_machine(uint16_t dynamic_batch_size, 
         uint16_t batch_count = CONTROL_PROTOCOL__INIFINITE_BATCH_COUNT);
-    hailo_status reset_state_machine(bool keep_nn_config_during_reset = false);
-    hailo_status cancel_pending_async_transfers();
+    hailo_status reset_state_machine();
+    hailo_status cancel_pending_transfers();
     hailo_status start_vdma_interrupts_dispatcher();
     hailo_status stop_vdma_interrupts_dispatcher();
     Expected<uint16_t> get_network_batch_size(const std::string &network_name) const;
@@ -217,6 +215,7 @@ private:
     hailo_status fill_network_batch_size(CONTROL_PROTOCOL__application_header_t &app_header);
     hailo_status fill_csm_buffer_size(CONTROL_PROTOCOL__application_header_t &app_header);
     void process_interrupts(IrqData &&irq_data);
+    Expected<uint16_t> get_batch_size() const;
 
     std::vector<ContextResources> m_contexts_resources;
     ChannelAllocator m_channel_allocator;
index b05b8333dd133f4699a4e0bf427db9a28f5492c3..bff561325b59dff2c9d71765422fba71007c40ef 100644 (file)
@@ -81,6 +81,10 @@ static bool is_logical_periph_bytes_per_buffer(const uint32_t periph_bytes_per_b
     const uint32_t max_shmifo_size, const uint32_t desc_page_size, const uint32_t max_periph_bytes_value,
     const uint16_t core_bytes_per_buffer)
 {
+    if (0 == periph_bytes_per_buffer) {
+        return false;
+    }
+
     if (is_ddr) {
         // In DDR there is no residue of descriptor - but has to divide with no remainder by core_bytes_per_buffer
         // Calculated by DFC
@@ -94,18 +98,28 @@ static bool is_logical_periph_bytes_per_buffer(const uint32_t periph_bytes_per_b
 static Expected<std::tuple<uint16_t, uint16_t>> calculate_periph_requirements(const LayerInfo &layer_info, const uint32_t desc_page_size,
     const bool is_periph_calculated_in_hailort, const uint32_t max_periph_bytes_value)
 {
-    // If extension for calculating periph values in hailort is false - copy values from core registers , otherwise 
-    // If extesnion is true - calculate them according to shape and other layer information
-    if (!is_periph_calculated_in_hailort) {
+    // If extension for calculating periph values in hailort is false and hw padding is not supported - copy values from
+    // Core registers, calculate them according to shape and other layer information
+    const bool hw_padding_supported = HefConfigurator::is_hw_padding_supported(layer_info, max_periph_bytes_value);
+    if (!is_periph_calculated_in_hailort && !hw_padding_supported) {
         return std::make_tuple(static_cast<uint16_t>(layer_info.nn_stream_config.core_bytes_per_buffer),
             static_cast<uint16_t>(layer_info.nn_stream_config.core_buffers_per_frame));
     }
 
     if (HAILO_FORMAT_ORDER_HAILO_NMS == layer_info.format.order) {
         CHECK_AS_EXPECTED(IS_FIT_IN_UINT16(layer_info.nms_info.bbox_size * layer_info.nms_info.burst_size),
-            HAILO_INVALID_HEF, "Invalid burst size");
-        return std::make_tuple(static_cast<uint16_t>(layer_info.nms_info.bbox_size * layer_info.nms_info.burst_size),
-            static_cast<uint16_t>(1));
+            HAILO_INVALID_HEF, "Invalid NMS parameters");
+        const auto nms_periph_bytes = static_cast<uint16_t>(layer_info.nms_info.bbox_size * layer_info.nms_info.burst_size);
+
+        const auto transfer_size = LayerInfoUtils::get_nms_layer_transfer_size(layer_info);
+        CHECK_AS_EXPECTED(IS_FIT_IN_UINT16(transfer_size / nms_periph_bytes), HAILO_INVALID_HEF, "Invalid NMS parameters");
+        // Will divide with no remainder seeing as transfer size is multiple of (bbox_size * burst_size)
+        assert(0 == (transfer_size % nms_periph_bytes));
+        const auto nms_periph_buffers = static_cast<uint16_t>(transfer_size / nms_periph_bytes);
+
+        // In NMS - update periph variables to represent size of frame in case of "interrupt per frame" (where we know frame size)
+        // Otherwise - size of burst / bbox (transfer size)
+        return std::make_tuple(nms_periph_bytes, nms_periph_buffers);
     }
 
     CHECK_AS_EXPECTED(IS_FIT_IN_UINT32(layer_info.hw_shape.width * layer_info.hw_shape.features *
@@ -120,19 +134,27 @@ static Expected<std::tuple<uint16_t, uint16_t>> calculate_periph_requirements(co
     // Currently takes the largest periph_bytes_per_buffer that is possible with shmifo size and desc page size
     // TODO HRT-10961 : calculate optimal periph size
     auto periph_bytes_per_buffer = HailoRTCommon::align_to(row_size, alignment);
-    while (!is_logical_periph_bytes_per_buffer(periph_bytes_per_buffer, core_frame_size, is_ddr, layer_info.max_shmifo_size,
-        desc_page_size, max_periph_bytes_value, layer_info.nn_stream_config.core_bytes_per_buffer) && (0 < periph_bytes_per_buffer)) {
+    while ((0 < periph_bytes_per_buffer) && !is_logical_periph_bytes_per_buffer(periph_bytes_per_buffer, core_frame_size,
+        is_ddr, layer_info.max_shmifo_size, desc_page_size, max_periph_bytes_value, layer_info.nn_stream_config.core_bytes_per_buffer)) {
         periph_bytes_per_buffer -= alignment;
     }
 
     CHECK_AS_EXPECTED(0 != periph_bytes_per_buffer, HAILO_INVALID_ARGUMENT, "Error, Could not find logical periph bytes per buffer value");
 
     uint32_t periph_buffers_per_frame = (core_frame_size / periph_bytes_per_buffer);
-    // In ddr if we get a periph bytes per buffer os small that the periph buffers per frame cant fit in uint16
-    // put uint16_t max - seeing as this value doesnt really affect anything and we should not fail in that case.
-    if (is_ddr && !IS_FIT_IN_UINT16(periph_buffers_per_frame)) {
-        LOGGER__DEBUG("periph buffers per frame in ddr too large for 16 bit register - putting uint16_t max");
-        periph_buffers_per_frame = UINT16_MAX;
+    // In ddr - the core make sure that row size is aligned to PERIPH_BYTES_PER_BUFFER_DDR_ALIGNMENT_SIZE but if a row
+    // Is too large to fit in core bytes per buffer - they will divide it and put it in mutliple buffers - so in order to 
+    // Get the exact size in periph buffers per frame - we must muttiply core registers and divide by periph bytes per buffer 
+    if (is_ddr) {
+        periph_buffers_per_frame = layer_info.nn_stream_config.core_bytes_per_buffer *
+            layer_info.nn_stream_config.core_buffers_per_frame / periph_bytes_per_buffer;
+
+        // if we get a periph bytes per buffer so small that the periph buffers per frame cant fit in uint16
+        // put uint16_t max - seeing as this value doesnt really affect anything and we should not fail in that case.
+        if (!IS_FIT_IN_UINT16(periph_buffers_per_frame)) {
+            LOGGER__WARNING("periph buffers per frame in DDR too large - putting uint16_t max (This may affect HW infer estimator results");
+            periph_buffers_per_frame = UINT16_MAX;
+        }
     }
     CHECK_AS_EXPECTED(IS_FIT_IN_UINT16(periph_buffers_per_frame), HAILO_INVALID_ARGUMENT);
 
@@ -150,6 +172,8 @@ static Expected<LayerInfo> update_layer_info(const LayerInfo &original_layer_inf
         local_layer_info.max_shmifo_size = hw_consts.default_initial_credit_size;
     }
 
+    local_layer_info.nn_stream_config.is_periph_calculated_in_hailort = is_periph_calculated_in_hailort;
+
     // If Hw padding supported dont update periph registers because they were updated in get_hw_padding
     // TODO HRT-11006 : currently check is_hw_padding_supported and the feature_padding_payload because in MIPI Input stream
     // Even if is_hw_padding_supported is true we will not use hw padding.
@@ -157,18 +181,13 @@ static Expected<LayerInfo> update_layer_info(const LayerInfo &original_layer_inf
     CHECK_EXPECTED(max_periph_bytes_from_hef);
     const auto max_periph_bytes = MIN(max_periph_bytes_from_hef.value(), local_layer_info.max_shmifo_size);
 
-    const bool hw_padding_supported = HefConfigurator::is_hw_padding_supported(local_layer_info,
-        max_periph_bytes) && (0 != original_layer_info.nn_stream_config.feature_padding_payload);
-    if (!hw_padding_supported) {
-        // Update periph values
-        const auto periph_requirements = calculate_periph_requirements(local_layer_info, buffer_info.desc_page_size,
-            is_periph_calculated_in_hailort, max_periph_bytes);
-        CHECK_EXPECTED(periph_requirements);
+    const auto periph_requirements = calculate_periph_requirements(local_layer_info, buffer_info.desc_page_size,
+        is_periph_calculated_in_hailort, max_periph_bytes);
+    CHECK_EXPECTED(periph_requirements);
 
-        // Calculate and update value of periph bytes per buffer and periph buffers per frame
-        local_layer_info.nn_stream_config.periph_bytes_per_buffer = std::get<0>(periph_requirements.value());
-        local_layer_info.nn_stream_config.periph_buffers_per_frame = std::get<1>(periph_requirements.value());
-    }
+    // Calculate and update value of periph bytes per buffer and periph buffers per frame
+    local_layer_info.nn_stream_config.periph_bytes_per_buffer = std::get<0>(periph_requirements.value());
+    local_layer_info.nn_stream_config.periph_buffers_per_frame = std::get<1>(periph_requirements.value());
 
     auto updated_local_layer_info = calculate_credit_params(hw_consts, buffer_info.desc_page_size, should_optimize_credits,
         local_layer_info);
@@ -177,7 +196,7 @@ static Expected<LayerInfo> update_layer_info(const LayerInfo &original_layer_inf
     return updated_local_layer_info;
 }
 
-static hailo_status fill_boundary_input_layer(ContextResources &context_resources, 
+static hailo_status fill_boundary_input_layer_impl(ContextResources &context_resources,
     ResourcesManager &resources_manager, const LayerInfo layer_info, const CONTROL_PROTOCOL__hw_consts_t &hw_consts,
     const ProtoHEFHwArch &hw_arch, bool should_optimize_credits)
 {
@@ -201,6 +220,21 @@ static hailo_status fill_boundary_input_layer(ContextResources &context_resource
     return HAILO_SUCCESS;
 }
 
+static hailo_status fill_boundary_input_layer(ContextResources &context_resources,
+    ResourcesManager &resources_manager, const LayerInfo layer_info, const CONTROL_PROTOCOL__hw_consts_t &hw_consts,
+    const ProtoHEFHwArch &hw_arch, bool should_optimize_credits)
+{
+    if (layer_info.is_multi_planar) {
+        for (auto &plane : layer_info.planes) {
+            auto status = fill_boundary_input_layer_impl(context_resources, resources_manager, plane, hw_consts, hw_arch, should_optimize_credits);
+            CHECK_SUCCESS(status);
+        }
+        return HAILO_SUCCESS;
+    }
+
+    return fill_boundary_input_layer_impl(context_resources, resources_manager, layer_info, hw_consts, hw_arch, should_optimize_credits);
+}
+
 static hailo_status fill_inter_context_input_layer(ContextResources &context_resources,
     ResourcesManager &resources_manager, const LayerInfo &layer_info, const CONTROL_PROTOCOL__hw_consts_t &hw_consts,
     const ProtoHEFHwArch &hw_arch, bool should_optimize_credits)
@@ -314,21 +348,10 @@ static hailo_status fill_ddr_output_layer(ContextResources &context_resources,
         HailoRTDriver::DmaDirection::D2H, layer_info.dma_engine_index);
     CHECK_EXPECTED_AS_STATUS(d2h_channel_id);
 
-    // In DDR layer there is no residue - so can ignore descriptor size
-    const auto IGNORE_DESCRIPTOR_SIZE = 0;
-    // Send layer info with updated shmifo size
-    auto layer_info_updated_shmifo = layer_info;
-    if (layer_info_updated_shmifo.max_shmifo_size == 0) {
-        layer_info_updated_shmifo.max_shmifo_size = hw_consts.default_initial_credit_size;
-    }
-
-    auto max_periph_bytes = HefConfigurator::max_periph_bytes_value(DeviceBase::hef_arch_to_device_arch(hw_arch));
-    CHECK_EXPECTED_AS_STATUS(max_periph_bytes, "Error calculating max periph bytes per buffer");
-    const auto periph_values = calculate_periph_requirements(layer_info_updated_shmifo, IGNORE_DESCRIPTOR_SIZE,
-        resources_manager.get_supported_features().periph_calculation_in_hailort, max_periph_bytes.value());
-    CHECK_EXPECTED_AS_STATUS(periph_values);
-
-    const auto row_size = std::get<0>(periph_values.value());
+    // In DDR - always use core bytes per buffer as row size
+    const auto row_size = static_cast<uint16_t>(layer_info.nn_stream_config.core_bytes_per_buffer);
+    CHECK(0 == (row_size % PERIPH_BYTES_PER_BUFFER_DDR_ALIGNMENT_SIZE), HAILO_INVALID_ARGUMENT,
+        "DDR Row size ({}) must be aligned to {}", row_size, PERIPH_BYTES_PER_BUFFER_DDR_ALIGNMENT_SIZE);
     const auto min_buffered_rows = layer_info.ddr_info.min_buffered_rows;
 
     // Allocate the ddr buffer
@@ -655,10 +678,10 @@ static hailo_status proccess_write_ccw_action(const ContextSwitchConfigActionPtr
 }
 
 // TODO HRT-10073: change to supported features list
-static bool is_hailo15_device_type(const hailo_device_architecture_t dev_arch)
+static bool is_hailo1x_device_type(const hailo_device_architecture_t dev_arch)
 {
-    // Compare with HAILO15 device arch
-    return (HAILO_ARCH_HAILO15 == dev_arch);
+    // Compare with HAILO1X device archs
+    return (HAILO_ARCH_HAILO15H == dev_arch) || (HAILO_ARCH_PLUTO == dev_arch);
 }
 
 static Expected<uint8_t> find_dummy_stream(const LayerInfo &layer_info, const ContextResources &context_resources,
@@ -675,28 +698,48 @@ static Expected<uint8_t> find_dummy_stream(const LayerInfo &layer_info, const Co
     }
 }
 
+static hailo_status add_change_vdma_to_stream_mapping_impl(const ProtoHEFHwArch &hw_arch,
+    const LayerInfo &layer_info, const ResourcesManager &resources_manager,
+    ContextResources &context_resources, uint8_t context_index,
+    std::vector<ContextSwitchConfigActionPtr> &processed_configuration_actions)
+{
+    auto vdma_channel = resources_manager.get_boundary_vdma_channel_by_stream_name(layer_info.name);
+    CHECK_EXPECTED_AS_STATUS(vdma_channel);
+
+    const auto channel_id = vdma_channel.value()->get_channel_id();
+    const bool is_dummy_stream = layer_info.context_index != context_index;
+    uint8_t stream_index = layer_info.stream_index;
+    if (is_dummy_stream) {
+        auto dummy_stream_index = find_dummy_stream(layer_info, context_resources,
+            is_hailo1x_device_type(DeviceBase::hef_arch_to_device_arch(hw_arch)));
+        CHECK_EXPECTED_AS_STATUS(dummy_stream_index);
+        stream_index = *dummy_stream_index;
+    }
+
+    auto action = ChangeVdmaToStreamMapping::create(channel_id, stream_index, is_dummy_stream);
+    CHECK_EXPECTED_AS_STATUS(action);
+    processed_configuration_actions.emplace_back(action.release());
+
+    return HAILO_SUCCESS;
+}
+
 static hailo_status add_change_vdma_to_stream_mapping(const ProtoHEFHwArch &hw_arch,
     const CoreOpMetadata &core_op_metadata, const ResourcesManager &resources_manager,
     ContextResources &context_resources, uint8_t context_index,
     std::vector<ContextSwitchConfigActionPtr> &processed_configuration_actions)
 {
     for (const auto &layer_info : core_op_metadata.get_all_layer_infos()) {
-        auto vdma_channel = resources_manager.get_boundary_vdma_channel_by_stream_name(layer_info.name);
-        CHECK_EXPECTED_AS_STATUS(vdma_channel);
-
-        const auto channel_id = vdma_channel.value()->get_channel_id();
-        const bool is_dummy_stream = layer_info.context_index != context_index;
-        uint8_t stream_index = layer_info.stream_index;
-        if (is_dummy_stream) {
-            auto dummy_stream_index = find_dummy_stream(layer_info, context_resources,
-                is_hailo15_device_type(DeviceBase::hef_arch_to_device_arch(hw_arch)));
-            CHECK_EXPECTED_AS_STATUS(dummy_stream_index);
-            stream_index = *dummy_stream_index;
+        if (layer_info.is_multi_planar) {
+            for (const auto &plane : layer_info.planes) {
+                auto status = add_change_vdma_to_stream_mapping_impl(hw_arch, plane, resources_manager,
+                    context_resources, context_index, processed_configuration_actions);
+                CHECK_SUCCESS(status);
+            }
+        } else {
+                auto status = add_change_vdma_to_stream_mapping_impl(hw_arch, layer_info, resources_manager,
+                    context_resources, context_index, processed_configuration_actions);
+                CHECK_SUCCESS(status);
         }
-
-        auto action = ChangeVdmaToStreamMapping::create(channel_id, stream_index, is_dummy_stream);
-        CHECK_EXPECTED_AS_STATUS(action);
-        processed_configuration_actions.emplace_back(action.release());
     }
 
     return HAILO_SUCCESS;
@@ -704,7 +747,8 @@ static hailo_status add_change_vdma_to_stream_mapping(const ProtoHEFHwArch &hw_a
 
 static hailo_status push_edge_layer_activation_actions(
     const ContextResources &context_resources,
-    std::vector<ContextSwitchConfigActionPtr> &actions)
+    std::vector<ContextSwitchConfigActionPtr> &actions,
+    bool push_internal_only)
 {
     // Activate the edge layer by order - first output edge layers, then ddr inputs and only then the input edge layers
     // In order to insure that input data can enter the chip only after all other elements are configured.
@@ -718,11 +762,14 @@ static hailo_status push_edge_layer_activation_actions(
         actions.emplace_back(activate_action.release());
     }
 
-    for (const auto &edge_layer : context_resources.get_edge_layers(LayerType::BOUNDARY, HAILO_D2H_STREAM)) {
-        auto activate_action = ActivateBoundaryOutputChannelAction::create(edge_layer.channel_id,
-            edge_layer.layer_info.stream_index, edge_layer.layer_info.nn_stream_config, edge_layer.buffer_info);
-        CHECK_EXPECTED_AS_STATUS(activate_action);
-        actions.emplace_back(activate_action.release());
+    if (!push_internal_only) {
+        for (const auto &edge_layer : context_resources.get_edge_layers(LayerType::BOUNDARY, HAILO_D2H_STREAM)) {
+            auto activate_action = ActivateBoundaryOutputChannelAction::create(edge_layer.channel_id,
+                edge_layer.layer_info.stream_index, edge_layer.layer_info.network_index,
+                edge_layer.layer_info.nn_stream_config, edge_layer.buffer_info);
+            CHECK_EXPECTED_AS_STATUS(activate_action);
+            actions.emplace_back(activate_action.release());
+        }
     }
 
     for (const auto &edge_layer : context_resources.get_edge_layers(LayerType::INTER_CONTEXT, HAILO_D2H_STREAM)) {
@@ -746,12 +793,14 @@ static hailo_status push_edge_layer_activation_actions(
         actions.emplace_back(activate_action.release());
     }
 
-    for (const auto &edge_layer : context_resources.get_edge_layers(LayerType::BOUNDARY, HAILO_H2D_STREAM)) {
-        auto activate_action = ActivateBoundaryInputChannelAction::create(edge_layer.channel_id,
-            edge_layer.layer_info.stream_index, edge_layer.layer_info.nn_stream_config, edge_layer.buffer_info,
-            edge_layer.layer_info.max_shmifo_size);
-        CHECK_EXPECTED_AS_STATUS(activate_action);
-        actions.emplace_back(activate_action.release());
+    if (!push_internal_only) {
+        for (const auto &edge_layer : context_resources.get_edge_layers(LayerType::BOUNDARY, HAILO_H2D_STREAM)) {
+            auto activate_action = ActivateBoundaryInputChannelAction::create(edge_layer.channel_id,
+                edge_layer.layer_info.stream_index, edge_layer.layer_info.nn_stream_config, edge_layer.buffer_info,
+                edge_layer.layer_info.max_shmifo_size);
+            CHECK_EXPECTED_AS_STATUS(activate_action);
+            actions.emplace_back(activate_action.release());
+        }
     }
 
     for (const auto &edge_layer : context_resources.get_edge_layers(LayerType::INTER_CONTEXT, HAILO_H2D_STREAM)) {
@@ -776,8 +825,10 @@ static hailo_status proccess_trigger_new_data_input_action(const ProtoHEFHwArch
     uint8_t context_index,
     std::vector<ContextSwitchConfigActionPtr> &processed_configuration_actions, bool is_single_context)
 {
+    const bool PUSH_ALL_EDGE_LAYERS = false;
     if (trigger_new_data_from_input_group_start == action_index) {
-        auto status = push_edge_layer_activation_actions(context_resources, processed_configuration_actions);
+        auto status = push_edge_layer_activation_actions(context_resources, processed_configuration_actions,
+            PUSH_ALL_EDGE_LAYERS);
         CHECK_SUCCESS(status);
 
         if (!is_single_context) {
@@ -789,6 +840,13 @@ static hailo_status proccess_trigger_new_data_input_action(const ProtoHEFHwArch
         // DDR buffer info actions need to happen after the edge layer activation actions.
         status = add_ddr_buffers_info(processed_configuration_actions, context_resources);
         CHECK_SUCCESS(status);
+
+        /* Open the boundary input channel */
+        for (const auto &edge_layer : context_resources.get_edge_layers(LayerType::BOUNDARY, HAILO_H2D_STREAM)) {
+            auto activate_action = ResumeVdmaChannel::create(edge_layer);
+            CHECK_EXPECTED_AS_STATUS(activate_action);
+            processed_configuration_actions.emplace_back(activate_action.release());
+        }
     }
 
     // Add the current action
@@ -797,9 +855,12 @@ static hailo_status proccess_trigger_new_data_input_action(const ProtoHEFHwArch
     // At the end of a consecutive group of TriggerNewDataFromDataInput actions, we can trigger the BurstCreditsTask
     // in the FW, via StartBurstCreditsTaskAction.
     if (trigger_new_data_from_input_group_end == action_index) {
-        auto start_burst_credits_task_action = StartBurstCreditsTaskAction::create();
-        CHECK_EXPECTED_AS_STATUS(start_burst_credits_task_action);
-        processed_configuration_actions.emplace_back(start_burst_credits_task_action.release());
+        auto boundary_input_edge_layers = context_resources.get_edge_layers(LayerType::BOUNDARY, HAILO_H2D_STREAM);
+        if (boundary_input_edge_layers.size() > 0) {
+            auto start_burst_credits_task_action = StartBurstCreditsTaskAction::create();
+            CHECK_EXPECTED_AS_STATUS(start_burst_credits_task_action);
+            processed_configuration_actions.emplace_back(start_burst_credits_task_action.release());
+        }
     }
 
     return HAILO_SUCCESS;
@@ -958,17 +1019,24 @@ static hailo_status write_action_list(const ContextResources & context_resources
 }
 
 static hailo_status add_edge_layer_end_of_context_actions(const ContextResources &context_resources,
-    std::vector<ContextSwitchConfigActionPtr> &actions)
+    std::vector<ContextSwitchConfigActionPtr> &actions, const bool is_batch_switch_context)
 {
     for (const auto &edge_layer : context_resources.get_edge_layers()) {
         const bool should_validate = (edge_layer.layer_info.type == LayerType::BOUNDARY);
         auto action = should_validate ?
-            ValidateChannelAction::create(edge_layer) :
-            DeactivateChannelAction::create(edge_layer);
+            ValidateChannelAction::create(edge_layer, is_batch_switch_context) :
+            DeactivateChannelAction::create(edge_layer, is_batch_switch_context);
         CHECK_EXPECTED_AS_STATUS(action);
         actions.emplace_back(action.release());
     }
 
+    /* Pause the boundary input channel */
+    for (const auto &edge_layer : context_resources.get_edge_layers(LayerType::BOUNDARY, HAILO_H2D_STREAM)) {
+        auto activate_action = PauseVdmaChannel::create(edge_layer);
+        CHECK_EXPECTED_AS_STATUS(activate_action);
+        actions.emplace_back(activate_action.release());
+    }
+
     return HAILO_SUCCESS;
 }
 
@@ -986,7 +1054,7 @@ static hailo_status fill_context_recipes_for_multi_context(const ProtoHEFHwArch
     // Parse context
     std::vector<ContextSwitchConfigActionPtr> actions = context_metadata.get_actions();
 
-    const auto support_pre_fetch = is_hailo15_device_type(DeviceBase::hef_arch_to_device_arch(hw_arch));
+    const auto support_pre_fetch = is_hailo1x_device_type(DeviceBase::hef_arch_to_device_arch(hw_arch));
     status = add_fetch_config_actions(actions, context_resources.get_config_buffers(), support_pre_fetch);
     CHECK_SUCCESS(status);
 
@@ -1004,7 +1072,8 @@ static hailo_status fill_context_recipes_for_multi_context(const ProtoHEFHwArch
         actions.emplace_back(wait_action.release());
     }
     else {
-        status = add_edge_layer_end_of_context_actions(context_resources, actions);
+        const bool NOT_BATCH_SWITCH_CONTEXT = false;
+        status = add_edge_layer_end_of_context_actions(context_resources, actions, NOT_BATCH_SWITCH_CONTEXT);
     }
 
     status = handle_repeated_actions(actions);
@@ -1017,8 +1086,15 @@ static hailo_status create_boundary_channels(ResourcesManager &resources_manager
     CoreOpMetadata &core_op_metadata)
 {
     for (const auto &layer_info : core_op_metadata.get_all_layer_infos()) {
-        auto status = resources_manager.create_boundary_vdma_channel(layer_info);
-        CHECK_SUCCESS(status);
+        if (layer_info.is_multi_planar) {
+            for (const auto &plane : layer_info.planes) {
+                auto status = resources_manager.create_boundary_vdma_channel(plane);
+                CHECK_SUCCESS(status);
+            }
+        } else {
+            auto status = resources_manager.create_boundary_vdma_channel(layer_info);
+            CHECK_SUCCESS(status);
+        }
     }
     return HAILO_SUCCESS;
 }
@@ -1045,6 +1121,10 @@ static hailo_status fill_activation_config_recepies_for_multi_context(
     }
 
     std::vector<ContextSwitchConfigActionPtr> actions;
+    auto reset_burst_task_action = ResetBurstCreditsTaskAction::create();
+    CHECK_EXPECTED_AS_STATUS(reset_burst_task_action);
+    actions.emplace_back(reset_burst_task_action.release());
+
     for (const auto &edge_layer : context_resources.get_edge_layers(LayerType::BOUNDARY)) {
         auto action = edge_layer.layer_info.direction == HAILO_H2D_STREAM ?
             OpenBoundaryInputChannelAction::create(edge_layer.channel_id, edge_layer.buffer_info) :
@@ -1088,31 +1168,134 @@ static Expected<ContextSwitchConfigActionPtr> create_switch_lcu_batch_action(con
     return SwitchLcuBatchAction::create(cluster_index, lcu_index, network_index, kernel_done_count);
 }
 
-static hailo_status fill_batch_switching_context_config_recepies_for_multi_context(
-    ContextResources &context_resources, const CoreOpMetadata &core_op_metadata)
+static hailo_status fill_batch_switching_context_edge_layers(ContextResources &context_resources, const CoreOpMetadata &core_op_metadata, ResourcesManager &resources_manager,
+    const ProtoHEFHwArch &hw_arch)
 {
-    std::vector<ContextSwitchConfigActionPtr> actions;
+    auto hw_consts = Control::get_hw_consts(resources_manager.get_device());
+    CHECK_EXPECTED_AS_STATUS(hw_consts);
+    const bool should_optimize_credits = hw_consts->should_optimize_credits &&
+        (HAILO_POWER_MODE_PERFORMANCE == resources_manager.get_power_mode());
 
-    // We need to reset the ddr buffering task when we change the batch_size (since it depends on the batch_size param)
-    auto reset_ddr_action = ResetDdrBufferingTaskAction::create();
-    CHECK_EXPECTED_AS_STATUS(reset_ddr_action);
-    actions.emplace_back(reset_ddr_action.release());
+    for (const auto &output_layer_info : core_op_metadata.dynamic_contexts()[0].get_ddr_output_layers()) {
+        auto status = fill_ddr_output_layer(context_resources, resources_manager, output_layer_info, *hw_consts, hw_arch);
+        CHECK_SUCCESS(status);
+    }
+
+    for (const auto &output_layer_info : core_op_metadata.dynamic_contexts()[0].get_boundary_output_layers()) {
+        auto status = fill_boundary_output_layer(context_resources, resources_manager, output_layer_info,
+            *hw_consts, hw_arch, should_optimize_credits);
+        CHECK_SUCCESS(status);
+    }
+
+    for (const auto &output_layer_info : core_op_metadata.dynamic_contexts()[0].get_inter_context_output_layers()) {
+        auto status = fill_inter_context_output_layer(context_resources, resources_manager, output_layer_info,
+            *hw_consts, hw_arch, should_optimize_credits);
+        CHECK_SUCCESS(status);
+    }
+
+    for (const auto &input_layer_info : core_op_metadata.dynamic_contexts()[0].get_ddr_input_layers()) {
+        auto status = fill_ddr_input_layer(context_resources, resources_manager, input_layer_info, *hw_consts, hw_arch);
+        CHECK_SUCCESS(status);
+    }
+
+    for (const auto &input_layer_info : core_op_metadata.dynamic_contexts()[0].get_boundary_input_layers()) {
+        auto status = fill_boundary_input_layer(context_resources, resources_manager, input_layer_info,
+            *hw_consts, hw_arch, should_optimize_credits);
+        CHECK_SUCCESS(status);
+    }
+
+    // Batch switching context is not support for networks where in the first dynamic context there is inter context input.
+    assert(core_op_metadata.dynamic_contexts()[0].get_inter_context_input_layers().size() == 0);
 
+    return HAILO_SUCCESS;
+}
+
+
+static hailo_status add_lcu_actions_to_batch_switch_context(ContextResources &context_resources, const CoreOpMetadata &core_op_metadata,
+    std::vector<ContextSwitchConfigActionPtr> &actions)
+{
     // Find all the enabled lcus from the preliminary context in order to create coresponding switch lcu batch actions to run
-    // In the batch switch context 
+    // In the batch switch context
     static const std::set<ContextSwitchConfigAction::Type> ENABLE_LCU_ACTIONS = {
         ContextSwitchConfigAction::Type::EnableLcuDefault,
         ContextSwitchConfigAction::Type::EnableLcuNonDefault
     };
 
-    const auto batch_switch_actions = core_op_metadata.preliminary_context().get_actions_of_type(ENABLE_LCU_ACTIONS);
-    for (const auto &action : batch_switch_actions) {
+    const auto lcu_batch_switch_actions = core_op_metadata.preliminary_context().get_actions_of_type(ENABLE_LCU_ACTIONS);
+    for (const auto &action : lcu_batch_switch_actions) {
         auto switch_lcu_batch_action = create_switch_lcu_batch_action(action, context_resources);
         CHECK_EXPECTED_AS_STATUS(switch_lcu_batch_action);
         actions.insert(actions.end(), switch_lcu_batch_action.release());
     }
 
-    auto status = handle_repeated_actions(actions);
+    return HAILO_SUCCESS;
+}
+
+static hailo_status create_change_boundary_input_batch_actions(const ContextResources &context_resources,
+    std::vector<ContextSwitchConfigActionPtr> &batch_switch_context_actions)
+{
+    for (const auto &edge_layer : context_resources.get_edge_layers(LayerType::BOUNDARY, HAILO_H2D_STREAM)) {
+        auto change_boundary_input_batch_action = ChangeBoundaryInputBatchAction::create(edge_layer.channel_id);
+        CHECK_EXPECTED_AS_STATUS(change_boundary_input_batch_action);
+        batch_switch_context_actions.emplace_back(change_boundary_input_batch_action.release());
+    }
+
+    auto start_burst_credits_task_action = StartBurstCreditsTaskAction::create();
+    CHECK_EXPECTED_AS_STATUS(start_burst_credits_task_action);
+    batch_switch_context_actions.emplace_back(start_burst_credits_task_action.release());
+
+
+    return HAILO_SUCCESS;
+}
+
+static hailo_status add_edge_layers_actions_to_batch_switch_context(ContextResources &context_resources, const CoreOpMetadata &core_op_metadata,
+    ResourcesManager &resources_manager, const ProtoHEFHwArch &hw_arch, std::vector<ContextSwitchConfigActionPtr> &actions)
+{
+    auto status = fill_batch_switching_context_edge_layers(context_resources, core_op_metadata, resources_manager, hw_arch);
+    CHECK_SUCCESS(status);
+
+    // Close all internal channels
+    const auto BATCH_SWITCHING_CONTEXT = true;
+    for (const auto &edge_layer : context_resources.get_edge_layers()) {
+        if (edge_layer.layer_info.type != LayerType::BOUNDARY) {
+            auto action = DeactivateChannelAction::create(edge_layer, BATCH_SWITCHING_CONTEXT);
+            CHECK_EXPECTED_AS_STATUS(action);
+            actions.emplace_back(action.release());
+        }
+    }
+
+    // We need to reset the ddr buffering task when we change the batch_size (since it depends on the batch_size param)
+    auto reset_ddr_action = ResetDdrBufferingTaskAction::create();
+    CHECK_EXPECTED_AS_STATUS(reset_ddr_action);
+    actions.emplace_back(reset_ddr_action.release());
+
+    // Now re-open all the internal channels
+    const bool PUSH_INTERNAL_EDGE_LAYERS = true;
+    status = push_edge_layer_activation_actions(context_resources, actions, PUSH_INTERNAL_EDGE_LAYERS);
+    CHECK_SUCCESS(status);
+
+    status = add_ddr_buffers_info(actions, context_resources);
+    CHECK_SUCCESS(status);
+
+    status = create_change_boundary_input_batch_actions(context_resources, actions);
+    CHECK_SUCCESS(status);
+
+    return HAILO_SUCCESS;
+}
+
+static hailo_status fill_batch_switching_context_config_recepies_for_multi_context(
+    ContextResources &context_resources, const CoreOpMetadata &core_op_metadata, ResourcesManager &resources_manager,
+    const ProtoHEFHwArch &hw_arch)
+{
+    std::vector<ContextSwitchConfigActionPtr> actions;
+
+    auto status = add_lcu_actions_to_batch_switch_context(context_resources, core_op_metadata, actions);
+    CHECK_SUCCESS(status);
+
+    status = add_edge_layers_actions_to_batch_switch_context(context_resources, core_op_metadata, resources_manager, hw_arch, actions);
+    CHECK_SUCCESS(status);
+
+    status = handle_repeated_actions(actions);
     CHECK_SUCCESS(status);
 
     return write_action_list(context_resources, context_resources.builder(), actions);
@@ -1136,7 +1319,7 @@ static hailo_status fill_preliminary_config_recepies_for_multi_context(const Pro
     // Parse preliminary config
     std::vector<ContextSwitchConfigActionPtr> actions = preliminary_context.get_actions();
 
-    const auto support_pre_fetch = is_hailo15_device_type(DeviceBase::hef_arch_to_device_arch(hw_arch));
+    const auto support_pre_fetch = is_hailo1x_device_type(DeviceBase::hef_arch_to_device_arch(hw_arch));
     auto status = add_fetch_config_actions(actions, context_resources.get_config_buffers(), support_pre_fetch);
     CHECK_SUCCESS(status);
 
@@ -1192,7 +1375,7 @@ Expected<std::shared_ptr<ResourcesManager>> ResourcesManagerBuilder::build(uint8
     auto batch_switching_context = resources_manager->add_new_context(CONTROL_PROTOCOL__CONTEXT_SWITCH_CONTEXT_TYPE_BATCH_SWITCHING);
     CHECK_EXPECTED(batch_switching_context);
     status = fill_batch_switching_context_config_recepies_for_multi_context(batch_switching_context.value().get(),
-        *core_op_metadata);
+        *core_op_metadata, resources_manager.value(), hw_arch);
     CHECK_SUCCESS_AS_EXPECTED(status);
 
     const bool is_single_context = core_op_metadata->dynamic_contexts().size() == 1;
index eeb33183bfb75bc0f6e60c10a23548d448e06a1b..af1fd9fd69ea3a06e8319734b3a6e090170bbd4b 100644 (file)
@@ -4,6 +4,7 @@ set(SRC_FILES
     ${CMAKE_CURRENT_SOURCE_DIR}/device.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/device_internal.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/d2h_events_parser.cpp
+    ${CMAKE_CURRENT_SOURCE_DIR}/d2h_event_queue.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/control.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/control_protocol.cpp
 )
index 9c72ebba9cea29a038bd29d71a5a91eb20fb88e7..f4a5558b7edc9655e60d904705408ea65ab76fb5 100644 (file)
@@ -2645,8 +2645,7 @@ hailo_status Control::download_context_action_list(Device &device, uint32_t netw
 
 hailo_status Control::change_context_switch_status(Device &device,
         CONTROL_PROTOCOL__CONTEXT_SWITCH_STATUS_t state_machine_status,
-        uint8_t network_group_index, uint16_t dynamic_batch_size, uint16_t batch_count,
-        bool keep_nn_config_during_reset)
+        uint8_t network_group_index, uint16_t dynamic_batch_size, uint16_t batch_count)
 {
     hailo_status status = HAILO_UNINITIALIZED;
     HAILO_COMMON_STATUS_t common_status = HAILO_COMMON_STATUS__UNINITIALIZED;
@@ -2658,8 +2657,7 @@ hailo_status Control::change_context_switch_status(Device &device,
     CONTROL_PROTOCOL__payload_t *payload = NULL;
 
     common_status = CONTROL_PROTOCOL__pack_change_context_switch_status_request(&request, &request_size,
-            device.get_control_sequence(), state_machine_status, network_group_index, dynamic_batch_size, 
-            batch_count, keep_nn_config_during_reset);
+            device.get_control_sequence(), state_machine_status, network_group_index, dynamic_batch_size, batch_count);
     status = (HAILO_COMMON_STATUS__SUCCESS == common_status) ? HAILO_SUCCESS : HAILO_INTERNAL_FAILURE;
     if (HAILO_SUCCESS != status) {
         goto exit;
@@ -2689,13 +2687,13 @@ hailo_status Control::enable_core_op(Device &device, uint8_t network_group_index
         network_group_index, dynamic_batch_size, batch_count);
 }
 
-hailo_status Control::reset_context_switch_state_machine(Device &device, bool keep_nn_config_during_reset)
+hailo_status Control::reset_context_switch_state_machine(Device &device)
 {
-    static const auto IGNORE_NETWORK_GROUP_INDEX = 0;
+    static const auto IGNORE_NETWORK_GROUP_INDEX = 255;
     static const auto IGNORE_DYNAMIC_BATCH_SIZE = 0;
     static const auto DEFAULT_BATCH_COUNT = 0;
     return Control::change_context_switch_status(device, CONTROL_PROTOCOL__CONTEXT_SWITCH_STATUS_RESET,
-        IGNORE_NETWORK_GROUP_INDEX, IGNORE_DYNAMIC_BATCH_SIZE, DEFAULT_BATCH_COUNT, keep_nn_config_during_reset);
+        IGNORE_NETWORK_GROUP_INDEX, IGNORE_DYNAMIC_BATCH_SIZE, DEFAULT_BATCH_COUNT);
 }
 
 hailo_status Control::wd_enable(Device &device, uint8_t cpu_id, bool should_enable)
index aa65dd5c497bd381ef75abbb6f40565f5e11a3ce..01a180fc880d3017b61a9c264a2d0dca5a003812 100644 (file)
@@ -300,13 +300,10 @@ public:
      *  reset context switch state machine
      * 
      * @param[in]     device - The Hailo device.
-     * @param[in]     keep_nn_config_during_reset - 
-     *                Use if in the reset flow, user wise to remain in the same network group. 
-     *                this reset flow keep most of the configuration on the network group for faster batch switching. 
      *
      * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
      */
-    static hailo_status reset_context_switch_state_machine(Device &device, bool keep_nn_config_during_reset);
+    static hailo_status reset_context_switch_state_machine(Device &device);
     /**
      *  set dataflow interrupt by control
      * 
@@ -337,7 +334,7 @@ public:
      *  Enable/disable halt transmition following Rx pause frame
      * 
      * @param[in]     device - The Hailo device.
-     * @param[in]     rx_pause_frames_enable - Bool indicating weather to enable or disable rx pause frames
+     * @param[in]     rx_pause_frames_enable - Bool indicating whether to enable or disable rx pause frames
      * @return Upon success, returns @a HAILO_SUCCESS. Otherwise, returns an @a static hailo_status error.
      */
     static hailo_status set_pause_frames(Device &device, uint8_t rx_pause_frames_enable);
@@ -408,8 +405,7 @@ private:
         const CONTROL_PROTOCOL__context_switch_context_info_single_control_t &context_info);
     static hailo_status change_context_switch_status(Device &device,
             CONTROL_PROTOCOL__CONTEXT_SWITCH_STATUS_t state_machine_status,
-            uint8_t network_group_index, uint16_t dynamic_batch_size, uint16_t batch_count,
-            bool keep_nn_config_during_reset = false);
+            uint8_t network_group_index, uint16_t dynamic_batch_size, uint16_t batch_count);
     static Expected<CONTROL_PROTOCOL__get_extended_device_information_response_t> get_extended_device_info_response(Device &device);
 };
 
index 1c5b38cf69c4465f436b8b43733f5c6b3ce66e32..aad1a2b7160fdd06d210ad0d6579e7080cb932e6 100644 (file)
@@ -1810,11 +1810,11 @@ exit:
     return status;
 }
 
-#define CONTEXT_SWITCH_SWITCH_STATUS_REQUEST_PARAMS (5)
+#define CONTEXT_SWITCH_SWITCH_STATUS_REQUEST_PARAMS (4)
 HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_change_context_switch_status_request(
         CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence, 
         CONTROL_PROTOCOL__CONTEXT_SWITCH_STATUS_t state_machine_status, uint8_t application_index,
-        uint16_t dynamic_batch_size, uint16_t batch_count, bool keep_nn_config_during_reset)
+        uint16_t dynamic_batch_size, uint16_t batch_count)
 {
     HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
     size_t local_request_size = 0;
@@ -1852,11 +1852,6 @@ HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_change_context_switch_status_reques
         BYTE_ORDER__htonl(sizeof(request->parameters.change_context_switch_status_request.batch_count));
     request->parameters.change_context_switch_status_request.batch_count = batch_count;
 
-    /* keep_nn_config_during_reset */
-    request->parameters.change_context_switch_status_request.keep_nn_config_during_reset_length = 
-        BYTE_ORDER__htonl(sizeof(request->parameters.change_context_switch_status_request.keep_nn_config_during_reset));
-    request->parameters.change_context_switch_status_request.keep_nn_config_during_reset = keep_nn_config_during_reset;
-
     *request_size = local_request_size;
     status = HAILO_COMMON_STATUS__SUCCESS;
 exit:
index ade0260f97ba012720f0e66ffa866d76270a8f7a..ae0b967438e5c40e15fa9d38a9eee52cea1ea411 100644 (file)
@@ -106,7 +106,7 @@ HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_download_context_action_list_reques
 HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_change_context_switch_status_request(
         CONTROL_PROTOCOL__request_t *request, size_t *request_size, uint32_t sequence,
         CONTROL_PROTOCOL__CONTEXT_SWITCH_STATUS_t state_machine_status, uint8_t application_index,
-        uint16_t dynamic_batch_size, uint16_t batch_count, bool keep_nn_config_during_reset);
+        uint16_t dynamic_batch_size, uint16_t batch_count);
 HAILO_COMMON_STATUS_t CONTROL_PROTOCOL__pack_wd_enable(
     CONTROL_PROTOCOL__request_t *request,
     size_t *request_size,
diff --git a/hailort/libhailort/src/device_common/d2h_event_queue.cpp b/hailort/libhailort/src/device_common/d2h_event_queue.cpp
new file mode 100644 (file)
index 0000000..c20fe8f
--- /dev/null
@@ -0,0 +1,38 @@
+/**
+ * Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file d2h_event_queue.cpp
+ **/
+
+#include "d2h_event_queue.hpp"
+
+namespace hailort
+{
+
+void D2hEventQueue::push(D2H_EVENT_MESSAGE_t t)
+{
+    {
+        std::lock_guard<std::mutex> lock(m_mutex);
+        m_queue.push(t);
+    }
+    m_queue_not_empty.notify_one();
+}
+
+D2H_EVENT_MESSAGE_t D2hEventQueue::pop()
+{
+    std::unique_lock<std::mutex> lock(m_mutex);
+    m_queue_not_empty.wait(lock, [this](){ return !m_queue.empty(); });
+    D2H_EVENT_MESSAGE_t val = m_queue.front();
+    m_queue.pop();
+    return val;
+}
+
+void D2hEventQueue::clear()
+{
+    std::unique_lock<std::mutex> lock(m_mutex);
+    m_queue = std::queue<D2H_EVENT_MESSAGE_t>();
+}
+
+} /* namespace hailort */
index 80844be11db82a8175be05f891237d771aeb92fd..d08244e866a3bf1f9cc6d8b4edcca8bfe9f2cc09 100644 (file)
@@ -1,12 +1,10 @@
 /**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Copyright (c) 2020-2023 Hailo Technologies Ltd. All rights reserved.
  * Distributed under the MIT license (https://opensource.org/licenses/MIT)
  **/
 /**
  * @file d2h_event_queue.hpp
- * @brief TODO: brief
- *
- * TODO: doc
+ * @brief Queue for d2h events
  **/
 
 #ifndef HAILO_D2H_EVENT_QUEUE_HPP_
 namespace hailort
 {
 
-class D2hEventQueue : public SafeQueue<D2H_EVENT_MESSAGE_t> {
+
+class D2hEventQueue final {
 public:
-    void clear() {
-        std::unique_lock<std::mutex> lock(m_mutex);
-        m_queue = std::queue<D2H_EVENT_MESSAGE_t>();
-    }
+    D2hEventQueue() = default;
+
+    // Add an element to the queue.
+    void push(D2H_EVENT_MESSAGE_t t);
+
+    // Get the "front"-element.
+    // If the queue is empty, wait till a element is available.
+    D2H_EVENT_MESSAGE_t pop();
+
+    void clear();
+
+protected:
+    std::queue<D2H_EVENT_MESSAGE_t> m_queue;
+    mutable std::mutex m_mutex;
+    std::condition_variable m_queue_not_empty;
 };
 
+
 } /* namespace hailort */
 
 #endif // HAILO_D2H_EVENT_QUEUE_HPP_
index 53384e2cb0a41c667664cdb76ea61f7c5a7152a2..5ac599faef3fd796fdd39e48c3a71b253bb02cad 100644 (file)
@@ -22,7 +22,8 @@
 #include "common/utils.hpp"
 #include "d2h_events.h"
 #include "byte_order.h"
-#include "common/logger_macros.hpp"
+#include "firmware_status.h"
+
 
 using namespace hailort;
 
@@ -44,6 +45,7 @@ static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_health_monitor_cpu_ecc_fatal_noti
 static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_context_switch_breakpoint_reached(D2H_EVENT_MESSAGE_t *d2h_notification_message);
 static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_health_monitor_clock_changed_event_notification(D2H_EVENT_MESSAGE_t *d2h_notification_message);
 static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_hw_infer_manager_infer_done_notification(D2H_EVENT_MESSAGE_t *d2h_notification_message);
+static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_context_switch_run_time_error_notification(D2H_EVENT_MESSAGE_t *d2h_notification_message);
 
 /**********************************************************************
  * Globals
@@ -60,7 +62,8 @@ firmware_notifications_parser_t g_firmware_notifications_parser[D2H_EVENT_ID_COU
     D2H_EVENTS__parse_health_monitor_cpu_ecc_fatal_notification,
     D2H_EVENTS__parse_context_switch_breakpoint_reached,
     D2H_EVENTS__parse_health_monitor_clock_changed_event_notification,
-    D2H_EVENTS__parse_hw_infer_manager_infer_done_notification
+    D2H_EVENTS__parse_hw_infer_manager_infer_done_notification,
+    D2H_EVENTS__parse_context_switch_run_time_error_notification
 };
 /**********************************************************************
  * Internal Functions
@@ -391,6 +394,39 @@ static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_context_switch_breakpoint_reached
     return status;
 }
 
+static HAILO_COMMON_STATUS_t D2H_EVENTS__parse_context_switch_run_time_error_notification(D2H_EVENT_MESSAGE_t *d2h_notification_message)
+{
+    HAILO_COMMON_STATUS_t status = HAILO_COMMON_STATUS__UNINITIALIZED;
+    const char *run_time_error_status_text = NULL;
+    uint32_t run_time_error_status = 0;
+
+    CHECK(D2H_EVENT_CONTEXT_SWITCH_RUN_TIME_ERROR_EVENT_PARAMETER_COUNT == d2h_notification_message->header.parameter_count,
+            HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_COUNT,
+            "d2h event invalid parameter count: {}", d2h_notification_message->header.parameter_count);
+
+    CHECK(d2h_notification_message->header.payload_length == 
+            sizeof(d2h_notification_message->message_parameters.context_switch_run_time_error_event),
+            HAILO_STATUS__D2H_EVENTS__INCORRECT_PARAMETER_LENGTH,
+            "d2h event invalid payload_length: {}", d2h_notification_message->header.payload_length);
+
+    run_time_error_status = d2h_notification_message->message_parameters.context_switch_run_time_error_event.exit_status;
+    
+    status = FIRMWARE_STATUS__get_textual((FIRMWARE_STATUS_t)run_time_error_status, &run_time_error_status_text);
+    CHECK((HAILO_COMMON_STATUS__SUCCESS == status), status, 
+        "Cannot find textual address for run time status {:#x}, status = {}", (FIRMWARE_STATUS_t)run_time_error_status, status);
+
+    LOGGER__ERROR("Got Context switch run time error on net_group index {}, batch index {}, context index {}, action index {} with status {}",
+        d2h_notification_message->message_parameters.context_switch_run_time_error_event.application_index,
+        d2h_notification_message->message_parameters.context_switch_run_time_error_event.batch_index,
+        d2h_notification_message->message_parameters.context_switch_run_time_error_event.context_index,
+        d2h_notification_message->message_parameters.context_switch_run_time_error_event.action_index,
+        run_time_error_status_text);
+
+    status = HAILO_COMMON_STATUS__SUCCESS;
+
+    return status;
+}
+
 /**********************************************************************
  * Public Functions
  **********************************************************************/
index 3045dedee2654fd53efea1958dad569f639bdd1d..99c03f5de34e9c47866b52b999da8dad06e0edc5 100644 (file)
@@ -13,6 +13,7 @@
 
 #include "device_common/control.hpp"
 #include "device_common/device_internal.hpp"
+#include "network_group/network_group_internal.hpp"
 #include "utils/sensor_config_utils.hpp"
 
 
@@ -165,9 +166,12 @@ Expected<firmware_type_t> DeviceBase::get_fw_type()
     if ((architecture.value() == HAILO_ARCH_HAILO8) || (architecture.value() == HAILO_ARCH_HAILO8L)) {
         firmware_type = FIRMWARE_TYPE_HAILO8;
     }
-    else if (architecture.value() == HAILO_ARCH_HAILO15) {
+    else if (architecture.value() == HAILO_ARCH_HAILO15H) {
         firmware_type = FIRMWARE_TYPE_HAILO15;
     }
+    else if (architecture.value() == HAILO_ARCH_PLUTO) {
+        firmware_type = FIRMWARE_TYPE_PLUTO;
+    }
     else {
         LOGGER__ERROR("Invalid device arcitecture. {}", architecture.value());
         return make_unexpected(HAILO_INVALID_DEVICE_ARCHITECTURE);
@@ -666,6 +670,9 @@ hailo_status DeviceBase::fw_notification_id_to_hailo(D2H_EVENT_ID_t fw_notificat
         case HW_INFER_MANAGER_INFER_DONE:
             *hailo_notification_id = HAILO_NOTIFICATION_ID_HW_INFER_MANAGER_INFER_DONE;
             break;
+        case CONTEXT_SWITCH_RUN_TIME_ERROR:
+            *hailo_notification_id = HAILO_NOTIFICATION_ID_CONTEXT_SWITCH_RUN_TIME_ERROR_EVENT;
+            break;
         default:
             status = HAILO_INVALID_ARGUMENT;
             goto l_exit;
@@ -714,10 +721,12 @@ bool DeviceBase::is_hef_compatible(hailo_device_architecture_t device_arch, Prot
         return (hef_arch == PROTO__HW_ARCH__HAILO8P) || (hef_arch == PROTO__HW_ARCH__HAILO8R) || (hef_arch == PROTO__HW_ARCH__HAILO8L);
     case HAILO_ARCH_HAILO8L:
         return (hef_arch == PROTO__HW_ARCH__HAILO8L);
-    case HAILO_ARCH_HAILO15:
+    case HAILO_ARCH_HAILO15H:
         // Compare with HW_ARCH__LAVENDER and HW_ARCH__GINGER to support hefs compiled for them
         return (hef_arch == PROTO__HW_ARCH__GINGER) || (hef_arch == PROTO__HW_ARCH__LAVENDER) ||
             (hef_arch == PROTO__HW_ARCH__HAILO15H);
+    case HAILO_ARCH_PLUTO:
+        return (hef_arch == PROTO__HW_ARCH__PLUTO);
     default:
         return false;
     }
@@ -739,7 +748,9 @@ hailo_device_architecture_t DeviceBase::hef_arch_to_device_arch(ProtoHEFHwArch h
     case PROTO__HW_ARCH__HAILO15H:
     case PROTO__HW_ARCH__GINGER:
     case PROTO__HW_ARCH__LAVENDER:
-        return HAILO_ARCH_HAILO15;
+        return HAILO_ARCH_HAILO15H;
+    case PROTO__HW_ARCH__PLUTO:
+        return HAILO_ARCH_PLUTO;
 
     default:
         return HAILO_ARCH_MAX_ENUM;
index 86b410f0b804d26350d84f5047d1b8634c6f15f6..4bf7ebdc59b1cacfe3511d49716345ce99846286 100644 (file)
@@ -4,7 +4,6 @@ set(SRC_FILES
     ${CMAKE_CURRENT_SOURCE_DIR}/eth_device.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/eth_stream.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/hcp_config_core_op.cpp
-    ${CMAKE_CURRENT_SOURCE_DIR}/hcp_config_activated_core_op.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/udp.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/network_rate_calculator.cpp
 )
index 9b4eeca10eb0837b50198b4843a973581b1e886f..764c87b8844a4a4680915cd8ed426d45c8b6ee3d 100644 (file)
@@ -383,9 +383,7 @@ hailo_status EthernetDevice::disable_notifications()
 
 Expected<ConfiguredNetworkGroupVector> EthernetDevice::add_hef(Hef &hef, const NetworkGroupsParamsMap &configure_params)
 {
-    // Reset FW state_machine status - can be removed?
-    static const auto REMOVE_NN_CONFIG_DURING_RESET = false;
-    auto status = Control::reset_context_switch_state_machine(*this, REMOVE_NN_CONFIG_DURING_RESET);
+    auto status = Control::reset_context_switch_state_machine(*this);
     CHECK_SUCCESS_AS_EXPECTED(status);
 
     auto added_network_groups = create_networks_group_vector(hef, configure_params);
index 8b6cada6811eea9f5efd375f1c871cee161b1fd9..56bb9bbf4dc2844ce9e04ffd9e05e073b30ac56f 100644 (file)
@@ -72,19 +72,24 @@ hailo_status EthernetInputStream::deactivate_stream()
 {
     hailo_status status = HAILO_UNINITIALIZED;
 
-    ASSERT(m_is_stream_activated);
+    if (!m_is_stream_activated) {
+        return HAILO_SUCCESS;
+    }
+
+    m_is_stream_activated = false;
 
-    // TODO: Hold a ref not a pointer
     status = Control::close_stream(m_device, m_dataflow_manager_id, true);
     CHECK_SUCCESS(status);
 
-    m_is_stream_activated = false;
+    // Aborting the stream to make sure all read/writes will exit.
+    // Note - on ethernet stream there is no true "clear_abort" - one abort was called, the socket can't be reused.
+    status = abort();
+    CHECK_SUCCESS(status);
 
     return HAILO_SUCCESS;
 }
 
-// Note: Ethernet streams don't work with dynamic batch sizes
-hailo_status EthernetInputStream::activate_stream(uint16_t /* dynamic_batch_size */, bool /* resume_pending_stream_transfers */)
+hailo_status EthernetInputStream::activate_stream()
 {
     hailo_status status = HAILO_UNINITIALIZED;
     CONTROL_PROTOCOL__config_stream_params_t params = {};
@@ -430,9 +435,9 @@ std::chrono::milliseconds EthernetInputStream::get_timeout() const
     return std::chrono::milliseconds((MILLISECONDS_IN_SECOND * m_udp.m_timeout.tv_sec) + (m_udp.m_timeout.tv_usec / MICROSECONDS_IN_MILLISECOND));
 }
 
-uint16_t EthernetInputStream::get_remote_port()
+hailo_status EthernetInputStream::abort()
 {
-    return ntohs(m_udp.m_device_address.sin_port);
+    return m_udp.abort();
 }
 
 /** Output stream **/
@@ -450,18 +455,24 @@ hailo_status EthernetOutputStream::deactivate_stream()
 {
     hailo_status status = HAILO_UNINITIALIZED;
 
-    ASSERT(m_is_stream_activated);
+    if (!m_is_stream_activated) {
+        return HAILO_SUCCESS;
+    }
+
+    m_is_stream_activated = false;
 
     status = Control::close_stream(m_device, m_dataflow_manager_id, false);
     CHECK_SUCCESS(status);
 
-    m_is_stream_activated = false;
+    // Aborting the stream to make sure all read/writes will exit.
+    // Note - on ethernet stream there is no true "clear_abort" - one abort was called, the socket can't be reused.
+    status = abort();
+    CHECK_SUCCESS(status);
 
     return HAILO_SUCCESS;
 }
 
-// Note: Ethernet streams don't work with dynamic batch sizes
-hailo_status EthernetOutputStream::activate_stream(uint16_t /* dynamic_batch_size */, bool /* resume_pending_stream_transfers */)
+hailo_status EthernetOutputStream::activate_stream()
 {
     hailo_status status = HAILO_UNINITIALIZED;
     CONTROL_PROTOCOL__config_stream_params_t params = {};
@@ -634,7 +645,7 @@ bool EthernetOutputStream::is_sync_packet(const void* buffer, size_t offset, siz
             ((hailo_output_sync_packet_t*)((uint8_t*)buffer + offset))->barker == BYTE_ORDER__ntohl(SYNC_PACKET_BARKER));
 }
 
-hailo_status EthernetOutputStream::read_impl(MemoryView &buffer)
+hailo_status EthernetOutputStream::read_impl(MemoryView buffer)
 {
     if ((buffer.size() % HailoRTCommon::HW_DATA_ALIGNMENT) != 0) {
         LOGGER__ERROR("Size must be aligned to {} (got {})", HailoRTCommon::HW_DATA_ALIGNMENT, buffer.size());
@@ -676,8 +687,7 @@ Expected<size_t> EthernetOutputStream::sync_read_raw_buffer(MemoryView &buffer)
 
 hailo_status EthernetOutputStream::fill_output_stream_ptr_with_info(const hailo_eth_output_stream_params_t &params, EthernetOutputStream *stream)
 {
-    if ((HAILO_FORMAT_ORDER_HAILO_NMS == stream->m_stream_info.format.order)
-        && (params.is_sync_enabled)) {
+    if ((HailoRTCommon::is_nms(stream->m_stream_info)) && (params.is_sync_enabled)) {
         LOGGER__WARNING("NMS is not supported with sync enabled. Setting sync flag to false");
         stream->configuration.is_sync_enabled = false;
     } else {
@@ -728,9 +738,4 @@ hailo_status EthernetOutputStream::abort()
     return m_udp.abort();
 }
 
-hailo_status EthernetInputStream::abort()
-{
-    return m_udp.abort();
-}
-
 } /* namespace hailort */
index 7702f83bd965787b2a7084cbba9aba2b35739cd4..c726519be8d054b14b7a3369d863a37028843c6d 100644 (file)
@@ -72,8 +72,14 @@ public:
     static Expected<std::unique_ptr<EthernetInputStream>> create(Device &device,
         const LayerInfo &edge_layer, const hailo_eth_input_stream_params_t &params, EventPtr core_op_activated_event);
 
-    uint16_t get_remote_port();
-    virtual hailo_status activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers) override;
+    virtual hailo_status set_buffer_mode(StreamBufferMode buffer_mode) override
+    {
+        CHECK(buffer_mode == StreamBufferMode::OWNING, HAILO_INVALID_ARGUMENT,
+            "Ethernet streams supports only sync api");
+        return HAILO_SUCCESS;
+    }
+
+    virtual hailo_status activate_stream() override;
     virtual hailo_status deactivate_stream() override;
     virtual hailo_stream_interface_t get_interface() const override { return HAILO_STREAM_INTERFACE_ETH; }
     virtual std::chrono::milliseconds get_timeout() const override;
@@ -151,7 +157,14 @@ private:
         m_device(device)
     {}
 
-    hailo_status read_impl(MemoryView &buffer) override;
+    virtual hailo_status set_buffer_mode(StreamBufferMode buffer_mode) override
+    {
+        CHECK(buffer_mode == StreamBufferMode::OWNING, HAILO_INVALID_ARGUMENT,
+            "Ethernet streams supports only sync api");
+        return HAILO_SUCCESS;
+    }
+
+    hailo_status read_impl(MemoryView buffer) override;
     hailo_status read_all_with_sync(void *buffer, size_t offset, size_t size);
     hailo_status read_all_no_sync(void *buffer, size_t offset, size_t size);
 
@@ -171,7 +184,7 @@ public:
     static Expected<std::unique_ptr<EthernetOutputStream>> create(Device &device, const LayerInfo &edge_layer,
         const hailo_eth_output_stream_params_t &params, EventPtr core_op_activated_event);
 
-    virtual hailo_status activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers) override;
+    virtual hailo_status activate_stream() override;
     virtual hailo_status deactivate_stream() override;
     virtual hailo_stream_interface_t get_interface() const override { return HAILO_STREAM_INTERFACE_ETH; }
     virtual std::chrono::milliseconds get_timeout() const override;
diff --git a/hailort/libhailort/src/eth/hcp_config_activated_core_op.cpp b/hailort/libhailort/src/eth/hcp_config_activated_core_op.cpp
deleted file mode 100644 (file)
index 9dff027..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file hcp_config_activated_core_op.cpp
- * @brief HcpConfigActivatedCoreOp implementation
- **/
-
-#include "eth/hcp_config_activated_core_op.hpp"
-#include "device_common/control.hpp"
-
-
-namespace hailort
-{
-
-Expected<HcpConfigActivatedCoreOp> HcpConfigActivatedCoreOp::create(Device &device, std::vector<WriteMemoryInfo> &config,
-        const std::string &core_op_name,
-        // hailo_activate_network_group_params_t is currently an empty holder, if anything will be added to it,
-        // it will require a check that these params will be relevant for this one core op only.
-        const hailo_activate_network_group_params_t &network_group_params,
-        std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
-        std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
-        ActiveCoreOpHolder &active_core_op_holder,
-        hailo_power_mode_t power_mode, EventPtr core_op_activated_event,
-        CoreOp &core_op)
-{
-    CHECK(!active_core_op_holder.is_any_active(), make_unexpected(HAILO_INVALID_OPERATION),
-        "core-op is currently active. You must deactivate before activating another core-op");
-
-    // Close older dataflows
-    auto status = Control::close_all_streams(device);
-    CHECK_SUCCESS_AS_EXPECTED(status);
-
-    // Reset nn_core before writing configurations
-    status = device.reset(HAILO_RESET_DEVICE_MODE_NN_CORE);
-    CHECK_SUCCESS_AS_EXPECTED(status);
-
-    for (auto &m : config) {
-        status = device.write_memory(m.address, MemoryView(m.data));
-        CHECK_SUCCESS_AS_EXPECTED(status);
-    }
-
-    HcpConfigActivatedCoreOp object(device, active_core_op_holder, core_op_name, network_group_params, input_streams, output_streams,
-        power_mode, std::move(core_op_activated_event), core_op, status);
-    CHECK_SUCCESS_AS_EXPECTED(status);
-    return object;
-}
-
-HcpConfigActivatedCoreOp::HcpConfigActivatedCoreOp(
-        Device &device,
-        ActiveCoreOpHolder &active_core_op_holder,
-        const std::string &core_op_name,
-        const hailo_activate_network_group_params_t &network_group_params,
-        std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
-        std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,    
-        hailo_power_mode_t power_mode,
-        EventPtr &&core_op_activated_event,
-        CoreOp &core_op, hailo_status &status) :
-    ActivatedCoreOp(network_group_params, input_streams, output_streams,
-                              std::move(core_op_activated_event), status),
-    m_active_core_op_holder(active_core_op_holder),
-    m_is_active(true),
-    m_power_mode(power_mode),
-    m_device(device),
-    m_core_op_name(core_op_name)
-{
-    // Validate ActivatedCoreOp status
-    if (HAILO_SUCCESS != status) {
-        return;
-    }
-    status = core_op.activate_impl(CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE);
-    if (HAILO_SUCCESS != status) {
-        LOGGER__ERROR("Failed to activate core-op");
-        return;
-    }
-}
-
-HcpConfigActivatedCoreOp::~HcpConfigActivatedCoreOp()
-{
-    if (!m_is_active) {
-        return;
-    }
-
-    auto expected_config_network_ref = m_active_core_op_holder.get();
-    if (!expected_config_network_ref.has_value()) {
-        LOGGER__ERROR("Error getting configured core-op");
-        return;
-    }
-    const auto &config_core_op = expected_config_network_ref.value();
-
-    const auto status = config_core_op.get().deactivate_impl();
-    if (HAILO_SUCCESS != status) {
-        LOGGER__ERROR("Failed to deactivate core-op");
-    }
-}
-
-// TODO: add get_core_op_name() for better code readability?
-const std::string &HcpConfigActivatedCoreOp::get_network_group_name() const
-{
-    // network_group name is the same as core_op name in this case.
-    // HcpConfigActivatedCoreOp should be used only for single core ops network groups.
-    return m_core_op_name;
-}
-
-} /* namespace hailort */
diff --git a/hailort/libhailort/src/eth/hcp_config_activated_core_op.hpp b/hailort/libhailort/src/eth/hcp_config_activated_core_op.hpp
deleted file mode 100644 (file)
index 2b820d8..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file hcp_config_activated_core_op.hpp
- * @brief Represent activated core-op from HEF. 
- *
- * This core-op can be used for control-core-op only (for etherent or pcie)
-  **/
-
-#ifndef _HAILO_CONTEXT_SWITCH_HCP_CONFIG_ACTIVATED_CORE_OP_HPP_
-#define _HAILO_CONTEXT_SWITCH_HCP_CONFIG_ACTIVATED_CORE_OP_HPP_
-
-#include "hailo/device.hpp"
-
-#include "common/utils.hpp"
-
-#include "core_op/active_core_op_holder.hpp"
-
-#include <vector>
-#include <map>
-
-
-namespace hailort
-{
-
-struct WriteMemoryInfo
-{
-    uint32_t address;
-    Buffer data;
-};
-
-class HcpConfigActivatedCoreOp : public ActivatedCoreOp
-{
-  public:
-    static Expected<HcpConfigActivatedCoreOp> create(Device &device, std::vector<WriteMemoryInfo> &config,
-        const std::string &core_op_name,
-        const hailo_activate_network_group_params_t &network_group_params,
-        std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
-        std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
-        ActiveCoreOpHolder &active_core_op_holder,
-        hailo_power_mode_t power_mode, EventPtr core_op_activated_event,
-        CoreOp &core_op);
-
-    virtual ~HcpConfigActivatedCoreOp();
-    HcpConfigActivatedCoreOp(const HcpConfigActivatedCoreOp &) = delete;
-    HcpConfigActivatedCoreOp &operator=(const HcpConfigActivatedCoreOp &) = delete;
-    HcpConfigActivatedCoreOp &operator=(HcpConfigActivatedCoreOp &&) = delete;
-    HcpConfigActivatedCoreOp(HcpConfigActivatedCoreOp &&other) noexcept :
-      ActivatedCoreOp(std::move(other)), m_active_core_op_holder(other.m_active_core_op_holder),
-      m_is_active(std::exchange(other.m_is_active, false)), m_power_mode(other.m_power_mode),
-      m_device(other.m_device), m_core_op_name(std::move(other.m_core_op_name)) {};
-
-    virtual const std::string &get_network_group_name() const override;
-
-    virtual Expected<Buffer> get_intermediate_buffer(const IntermediateBufferKey &/*key*/) override
-    {
-        LOGGER__ERROR("get_intermediate_buffer() is not supported on single_context core_ops");
-        return make_unexpected(HAILO_INVALID_OPERATION);
-    }
-
-    virtual hailo_status set_keep_nn_config_during_reset(const bool /* keep_nn_config_during_reset */) override
-    {
-        LOGGER__ERROR("set_keep_nn_config_during_reset() is not supported on single_context core_ops");
-        return HAILO_INVALID_OPERATION;
-    }
-
-  private:
-      HcpConfigActivatedCoreOp(Device &device, ActiveCoreOpHolder &active_core_op_holder,
-        const std::string &core_op_name,
-        const hailo_activate_network_group_params_t &network_group_params,
-        std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
-        std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,        
-        hailo_power_mode_t power_mode, EventPtr &&core_op_activated_event,
-        CoreOp &core_op, hailo_status &status);
-
-    ActiveCoreOpHolder &m_active_core_op_holder;
-    bool m_is_active;
-    hailo_power_mode_t m_power_mode;
-    Device &m_device;
-    std::string m_core_op_name;
-};
-
-} /* namespace hailort */
-
-#endif /* _HAILO_CONTEXT_SWITCH_HCP_CONFIG_ACTIVATED_CORE_OP_HPP_ */
index 39ad039f5f0bf5438a6d794fe31d382257efcbfb..abc2f857170dd5cddb1eb2caaccfa8adc406d5f8 100644 (file)
@@ -11,30 +11,10 @@ namespace hailort
 HcpConfigCoreOp::HcpConfigCoreOp(Device &device, ActiveCoreOpHolder &active_core_op_holder,
     std::vector<WriteMemoryInfo> &&config, const ConfigureNetworkParams &config_params, std::shared_ptr<CoreOpMetadata> metadata,
     hailo_status &status)
-        : CoreOp(config_params, metadata, status),
-    m_config(std::move(config)), m_active_core_op_holder(active_core_op_holder), m_device(device)
+        : CoreOp(config_params, metadata, active_core_op_holder, status),
+    m_config(std::move(config)), m_device(device)
 {}
 
-Expected<std::unique_ptr<ActivatedNetworkGroup>> HcpConfigCoreOp::create_activated_network_group(
-    const hailo_activate_network_group_params_t &network_group_params, uint16_t /* dynamic_batch_size */,
-    bool /* resume_pending_stream_transfers */)
-{
-    auto start_time = std::chrono::steady_clock::now();
-
-    auto activated_net_group = HcpConfigActivatedCoreOp::create(m_device, m_config, name(), network_group_params,
-        m_input_streams, m_output_streams, m_active_core_op_holder, m_config_params.power_mode,
-        m_core_op_activated_event, (*this));
-    CHECK_EXPECTED(activated_net_group);
-
-    std::unique_ptr<ActivatedNetworkGroup> activated_net_group_ptr = make_unique_nothrow<HcpConfigActivatedCoreOp>(activated_net_group.release());
-    CHECK_AS_EXPECTED(nullptr != activated_net_group_ptr, HAILO_OUT_OF_HOST_MEMORY);
-
-    auto elapsed_time_ms = std::chrono::duration<double, std::milli>(std::chrono::steady_clock::now() - start_time).count();
-    LOGGER__INFO("Activating {} took {} milliseconds. Note that the function is asynchronous and thus the network is not fully activated yet.", name(), elapsed_time_ms);
-
-    return activated_net_group_ptr;
-}
-
 Expected<hailo_stream_interface_t> HcpConfigCoreOp::get_default_streams_interface()
 {
     return m_device.get_default_streams_interface();
@@ -69,7 +49,10 @@ Expected<std::shared_ptr<LatencyMetersMap>> HcpConfigCoreOp::get_latency_meters(
 {
     /* hcp does not support latnecy. return empty map */
     LatencyMetersMap empty_map; 
-    return make_shared_nothrow<LatencyMetersMap>(empty_map);
+    auto res = make_shared_nothrow<LatencyMetersMap>(empty_map);
+    CHECK_NOT_NULL_AS_EXPECTED(res, HAILO_OUT_OF_HOST_MEMORY);
+
+    return res;
 }
 
 Expected<vdma::BoundaryChannelPtr> HcpConfigCoreOp::get_boundary_vdma_channel_by_stream_name(
@@ -86,36 +69,29 @@ Expected<HwInferResults> HcpConfigCoreOp::run_hw_infer_estimator()
     return make_unexpected(HAILO_INVALID_OPERATION);
 }
 
-hailo_status HcpConfigCoreOp::activate_impl(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers)
+hailo_status HcpConfigCoreOp::activate_impl(uint16_t /* dynamic_batch_size */)
 {
-    m_active_core_op_holder.set(*this);
+    // Close older dataflows
+    auto status = Control::close_all_streams(m_device);
+    CHECK_SUCCESS(status);
 
-    auto status = activate_low_level_streams(dynamic_batch_size, resume_pending_stream_transfers);
-    CHECK_SUCCESS(status, "Failed activating low level streams");
+    // Reset nn_core before writing configurations
+    status = m_device.reset(HAILO_RESET_DEVICE_MODE_NN_CORE);
+    CHECK_SUCCESS(status);
 
-    status = m_core_op_activated_event->signal();
-    CHECK_SUCCESS(status, "Failed to signal network activation event");
+    for (auto &m : m_config) {
+        status = m_device.write_memory(m.address, MemoryView(m.data));
+        CHECK_SUCCESS(status);
+    }
+
+    status = activate_low_level_streams();
+    CHECK_SUCCESS(status, "Failed activating low level streams");
 
     return HAILO_SUCCESS;
 }
-hailo_status HcpConfigCoreOp::deactivate_impl(bool /* keep_nn_config_during_reset */)
-{
-    auto expected_core_op_ref = m_active_core_op_holder.get();
-    CHECK(expected_core_op_ref.has_value(), HAILO_INTERNAL_FAILURE, "Error getting configured core-op");
-
-    const auto &core_op = expected_core_op_ref.value();
-    // Make sure the core-op we are deactivating is this object
-    CHECK(this == std::addressof(core_op.get()), HAILO_INTERNAL_FAILURE,
-        "Trying to deactivate different core-op");
-
-    m_active_core_op_holder.clear();
-
-    if (!m_core_op_activated_event) {
-        return HAILO_SUCCESS;
-    }
-
-    m_core_op_activated_event->reset();
 
+hailo_status HcpConfigCoreOp::deactivate_impl()
+{
     for (auto &name_pair : m_input_streams) {
         const auto status = name_pair.second->flush();
         CHECK_SUCCESS(status, "Failed to flush input stream {}", name_pair.first);
index 9ef18bd63732c7267c3dc79db88cfbf262018f42..a580a9064e0121fbbf42155881efb25852d4bfd3 100644 (file)
@@ -17,8 +17,6 @@
 
 #include "common/utils.hpp"
 
-#include "eth/hcp_config_activated_core_op.hpp"
-#include "core_op/active_core_op_holder.hpp"
 #include "core_op/core_op.hpp"
 
 #include <vector>
 namespace hailort
 {
 
+struct WriteMemoryInfo
+{
+    uint32_t address;
+    Buffer data;
+};
+
 class HcpConfigCoreOp : public CoreOp
 {
 public:
@@ -35,9 +39,6 @@ public:
         Device &device, ActiveCoreOpHolder &active_core_op_holder, std::vector<WriteMemoryInfo> &&config,
         const ConfigureNetworkParams &config_params, std::shared_ptr<CoreOpMetadata> metadata, hailo_status &status);
 
-    virtual Expected<std::unique_ptr<ActivatedNetworkGroup>> create_activated_network_group(
-        const hailo_activate_network_group_params_t &network_group_params, uint16_t dynamic_batch_size,
-        bool resume_pending_stream_transfers) override;
     virtual Expected<hailo_stream_interface_t> get_default_streams_interface() override;
 
     virtual Expected<std::shared_ptr<LatencyMetersMap>> get_latency_meters() override;
@@ -48,8 +49,8 @@ public:
     virtual hailo_status set_scheduler_threshold(uint32_t threshold, const std::string &network_name) override;
     virtual hailo_status set_scheduler_priority(uint8_t priority, const std::string &network_name) override;
 
-    virtual hailo_status activate_impl(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers) override;
-    virtual hailo_status deactivate_impl(bool keep_nn_config_during_reset) override;
+    virtual hailo_status activate_impl(uint16_t dynamic_batch_size) override;
+    virtual hailo_status deactivate_impl() override;
     virtual Expected<HwInferResults> run_hw_infer_estimator() override;
 
     virtual ~HcpConfigCoreOp() = default;
@@ -57,13 +58,12 @@ public:
     HcpConfigCoreOp &operator=(const HcpConfigCoreOp &other) = delete;
     HcpConfigCoreOp &operator=(HcpConfigCoreOp &&other) = delete;
     HcpConfigCoreOp(HcpConfigCoreOp &&other) noexcept : CoreOp(std::move(other)),
-        m_config(std::move(other.m_config)), m_active_core_op_holder(other.m_active_core_op_holder),
+        m_config(std::move(other.m_config)),
         m_device(other.m_device) {}
 
 private:
-        std::vector<WriteMemoryInfo> m_config;
-        ActiveCoreOpHolder &m_active_core_op_holder;
-        Device &m_device;
+    std::vector<WriteMemoryInfo> m_config;
+    Device &m_device;
 };
 
 } /* namespace hailort */
index 5a2c450ec38039ff30618559b940a45453171f0c..2d7def5b5c74f9eafde444f6b0d7c460e369bb90 100644 (file)
@@ -140,8 +140,10 @@ Expected<std::map<uint16_t, uint32_t>> NetworkUdpRateCalculator::get_udp_ports_r
 
     std::map<uint16_t, uint32_t> results = {};
     for (const auto &input_stream : udp_input_streams) {
-        uint16_t remote_port = 0;
-        remote_port = reinterpret_cast<EthernetInputStream*>(&(input_stream.get()))->get_remote_port();
+        const auto stream_index = input_stream.get().get_info().index;
+        CHECK_AS_EXPECTED(IS_FIT_IN_UINT16(stream_index + HailoRTCommon::ETH_INPUT_BASE_PORT), HAILO_INTERNAL_FAILURE,
+            "Invalid stream index {}", stream_index);
+        const uint16_t remote_port = static_cast<uint16_t>(stream_index + HailoRTCommon::ETH_INPUT_BASE_PORT);
         results.insert(std::make_pair(remote_port,
             rates_per_name->at(input_stream.get().name())));
     }
index 8704bed2bec9a3e2061e12e0af88dbb09f0ba812..68c2f583082ccfa178a749ee5f470da355fb8ab6 100644 (file)
@@ -21,6 +21,7 @@
 #include "hailo/event.hpp"
 #include "hailo/network_rate_calculator.hpp"
 #include "hailo/inference_pipeline.hpp"
+#include "hailo/quantization.hpp"
 
 #include "common/compiler_extensions_compat.hpp"
 #include "common/os_utils.hpp"
@@ -85,7 +86,7 @@ hailo_status hailo_get_library_version(hailo_version_t *version)
 }
 
 // TODO(oro): wrap with try/catch over C++
-// TODO: Fill eth_device_infos_length items into pcie_device_infos, 
+// TODO: Fill eth_device_infos_length items into pcie_device_infos,
 //       even if 'scan_results->size() > eth_device_infos_length' (HRT-3163)
 hailo_status hailo_scan_ethernet_devices(const char *interface_name, hailo_eth_device_info_t *eth_device_infos,
     size_t eth_device_infos_length, size_t *number_of_devices, uint32_t timeout_ms)
@@ -1489,6 +1490,12 @@ hailo_status hailo_create_input_transform_context(const hailo_stream_info_t *str
     CHECK_ARG_NOT_NULL(transform_params);
     CHECK_ARG_NOT_NULL(transform_context);
 
+    if (!Quantization::is_qp_valid(stream_info->quant_info)) {
+        LOGGER__ERROR("quant_info of stream_info is invalid as the model was compiled with multiple quant_infos. "
+                    "Please compile again or call hailo_create_input_transform_context_by_stream instead");
+        return HAILO_INVALID_ARGUMENT;
+    }
+
     auto local_transform_context = InputTransformContext::create(*stream_info, *transform_params);
     CHECK_EXPECTED_AS_STATUS(local_transform_context);
 
@@ -1496,6 +1503,21 @@ hailo_status hailo_create_input_transform_context(const hailo_stream_info_t *str
     return HAILO_SUCCESS;
 }
 
+hailo_status hailo_create_input_transform_context_by_stream(hailo_input_stream stream,
+    const hailo_transform_params_t *transform_params, hailo_input_transform_context *transform_context)
+{
+    CHECK_ARG_NOT_NULL(stream);
+    CHECK_ARG_NOT_NULL(transform_params);
+    CHECK_ARG_NOT_NULL(transform_context);
+
+    InputStream *input_stream = reinterpret_cast<InputStream*>(stream);
+    auto local_transform_context = InputTransformContext::create(*input_stream, *transform_params);
+    CHECK_EXPECTED_AS_STATUS(local_transform_context);
+
+    *transform_context = reinterpret_cast<hailo_input_transform_context>(local_transform_context.release().release());
+    return HAILO_SUCCESS;
+}
+
 hailo_status hailo_release_input_transform_context(hailo_input_transform_context transform_context)
 {
     CHECK_ARG_NOT_NULL(transform_context);
@@ -1507,6 +1529,7 @@ hailo_status hailo_is_input_transformation_required(const hailo_3d_image_shape_t
     const hailo_3d_image_shape_t *dst_image_shape, const hailo_format_t *dst_format, const hailo_quant_info_t *quant_info,
     bool *transformation_required)
 {
+    LOGGER__WARNING("Using a deprecated function. Use hailo_is_input_transformation_required2 instead");
     CHECK_ARG_NOT_NULL(src_image_shape);
     CHECK_ARG_NOT_NULL(src_format);
     CHECK_ARG_NOT_NULL(dst_image_shape);
@@ -1514,8 +1537,36 @@ hailo_status hailo_is_input_transformation_required(const hailo_3d_image_shape_t
     CHECK_ARG_NOT_NULL(quant_info);
     CHECK_ARG_NOT_NULL(transformation_required);
 
-    *transformation_required = InputTransformContext::is_transformation_required(*src_image_shape, *src_format, *dst_image_shape, *dst_format,
-        *quant_info);
+    auto exp = InputTransformContext::is_transformation_required(*src_image_shape, *src_format, *dst_image_shape, *dst_format,
+        std::vector<hailo_quant_info_t>{*quant_info}); // TODO: Get quant vector (HRT-11052)
+    CHECK_EXPECTED_AS_STATUS(exp);
+    *transformation_required  = exp.value();
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status hailo_is_input_transformation_required2(const hailo_3d_image_shape_t *src_image_shape, const hailo_format_t *src_format,
+    const hailo_3d_image_shape_t *dst_image_shape, const hailo_format_t *dst_format, const hailo_quant_info_t *quant_infos, 
+    size_t quant_infos_count, bool *transformation_required)
+{
+    CHECK_ARG_NOT_NULL(src_image_shape);
+    CHECK_ARG_NOT_NULL(src_format);
+    CHECK_ARG_NOT_NULL(dst_image_shape);
+    CHECK_ARG_NOT_NULL(dst_format);
+    CHECK_ARG_NOT_NULL(quant_infos);
+    CHECK_ARG_NOT_NULL(transformation_required);
+
+    std::vector<hailo_quant_info_t> quant_info_vector;
+    const hailo_quant_info_t* ptr = quant_infos;
+    size_t count = quant_infos_count;
+    for (size_t i = 0; i < count; ++i) {
+        const hailo_quant_info_t& quant_info = *(ptr + i);
+        quant_info_vector.push_back(quant_info);
+    }
+    auto exp = InputTransformContext::is_transformation_required(*src_image_shape, *src_format, *dst_image_shape, *dst_format, quant_info_vector);
+    CHECK_EXPECTED_AS_STATUS(exp);
+    *transformation_required  = exp.value();
+
     return HAILO_SUCCESS;
 }
 
@@ -1533,6 +1584,47 @@ hailo_status hailo_transform_frame_by_input_transform_context(hailo_input_transf
     return HAILO_SUCCESS;
 }
 
+static hailo_status convert_quant_infos_vector_to_array(std::vector<hailo_quant_info_t> quant_infos_vec, 
+    hailo_quant_info_t *quant_infos, size_t *quant_infos_count)
+{
+    size_t quant_infos_array_entries = *quant_infos_count;
+    *quant_infos_count = quant_infos_vec.size();
+
+    CHECK(quant_infos_vec.size() <= quant_infos_array_entries, HAILO_INSUFFICIENT_BUFFER,
+          "The given buffer is too small to contain all quant infos. there are {} quant infos in the given stream, given buffer size is {}",
+          quant_infos_vec.size(), quant_infos_array_entries);
+
+    std::copy(quant_infos_vec.begin(), quant_infos_vec.end(), quant_infos);
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status hailo_get_input_stream_quant_infos(hailo_input_stream stream, hailo_quant_info_t *quant_infos, size_t *quant_infos_count)
+{
+    CHECK_ARG_NOT_NULL(stream);
+    CHECK_ARG_NOT_NULL(quant_infos);
+    CHECK_ARG_NOT_NULL(quant_infos_count);
+
+    const auto quant_infos_vector = (reinterpret_cast<const InputStream*>(stream))->get_quant_infos();
+    auto status = convert_quant_infos_vector_to_array(quant_infos_vector, quant_infos, quant_infos_count);
+    CHECK_SUCCESS(status);
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status hailo_get_input_vstream_quant_infos(hailo_input_vstream vstream, hailo_quant_info_t *quant_infos, size_t *quant_infos_count)
+{
+    CHECK_ARG_NOT_NULL(vstream);
+    CHECK_ARG_NOT_NULL(quant_infos);
+    CHECK_ARG_NOT_NULL(quant_infos_count);
+
+    const auto quant_infos_vector = (reinterpret_cast<const InputVStream*>(vstream))->get_quant_infos();
+    auto status = convert_quant_infos_vector_to_array(quant_infos_vector, quant_infos, quant_infos_count);
+    CHECK_SUCCESS(status);
+
+    return HAILO_SUCCESS;
+}
+
 hailo_status hailo_create_output_transform_context(const hailo_stream_info_t *stream_info,
     const hailo_transform_params_t *transform_params, hailo_output_transform_context *transform_context)
 {
@@ -1540,6 +1632,12 @@ hailo_status hailo_create_output_transform_context(const hailo_stream_info_t *st
     CHECK_ARG_NOT_NULL(transform_params);
     CHECK_ARG_NOT_NULL(transform_context);
 
+    if (!Quantization::is_qp_valid(stream_info->quant_info)) {
+        LOGGER__ERROR("quant_info of stream_info is invalid as the model was compiled with multiple quant_infos. "
+                    "Please compile again or call hailo_create_output_transform_context_by_stream instead");
+        return HAILO_INVALID_ARGUMENT;
+    }
+
     auto local_transform_context = OutputTransformContext::create(*stream_info, *transform_params);
     CHECK_EXPECTED_AS_STATUS(local_transform_context);
 
@@ -1547,6 +1645,21 @@ hailo_status hailo_create_output_transform_context(const hailo_stream_info_t *st
     return HAILO_SUCCESS;
 }
 
+hailo_status hailo_create_output_transform_context_by_stream(hailo_output_stream stream,
+    const hailo_transform_params_t *transform_params, hailo_output_transform_context *transform_context)
+{
+    CHECK_ARG_NOT_NULL(stream);
+    CHECK_ARG_NOT_NULL(transform_params);
+    CHECK_ARG_NOT_NULL(transform_context);
+
+    OutputStream *output_stream = reinterpret_cast<OutputStream*>(stream);
+    auto local_transform_context = OutputTransformContext::create(*output_stream, *transform_params);
+    CHECK_EXPECTED_AS_STATUS(local_transform_context);
+
+    *transform_context = reinterpret_cast<hailo_output_transform_context>(local_transform_context.release().release());
+    return HAILO_SUCCESS;
+}
+
 hailo_status hailo_release_output_transform_context(hailo_output_transform_context transform_context)
 {
     CHECK_ARG_NOT_NULL(transform_context);
@@ -1565,8 +1678,37 @@ hailo_status hailo_is_output_transformation_required(const hailo_3d_image_shape_
     CHECK_ARG_NOT_NULL(quant_info);
     CHECK_ARG_NOT_NULL(transformation_required);
 
-    *transformation_required = OutputTransformContext::is_transformation_required(*src_image_shape, *src_format, *dst_image_shape, *dst_format,
-        *quant_info);
+    auto exp = OutputTransformContext::is_transformation_required(*src_image_shape, *src_format, *dst_image_shape, *dst_format,
+        std::vector<hailo_quant_info_t>{*quant_info}); // TODO: Get quant vector (HRT-11052)
+    CHECK_EXPECTED_AS_STATUS(exp);
+    *transformation_required = exp.value();
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status hailo_is_output_transformation_required2(
+    const hailo_3d_image_shape_t *src_image_shape, const hailo_format_t *src_format,
+    const hailo_3d_image_shape_t *dst_image_shape, const hailo_format_t *dst_format,
+    const hailo_quant_info_t *quant_infos, size_t quant_infos_count, bool *transformation_required)
+{
+    CHECK_ARG_NOT_NULL(src_image_shape);
+    CHECK_ARG_NOT_NULL(src_format);
+    CHECK_ARG_NOT_NULL(dst_image_shape);
+    CHECK_ARG_NOT_NULL(dst_format);
+    CHECK_ARG_NOT_NULL(quant_infos);
+    CHECK_ARG_NOT_NULL(transformation_required);
+
+    std::vector<hailo_quant_info_t> quant_info_vector;
+    const hailo_quant_info_t* ptr = quant_infos;
+    size_t count = quant_infos_count;
+    for (size_t i = 0; i < count; ++i) {
+        const hailo_quant_info_t& quant_info = *(ptr + i);
+        quant_info_vector.push_back(quant_info);
+    }
+    auto expected_tranformation_required = OutputTransformContext::is_transformation_required(*src_image_shape, *src_format, *dst_image_shape, *dst_format, quant_info_vector);
+    CHECK_EXPECTED_AS_STATUS(expected_tranformation_required);
+    *transformation_required = expected_tranformation_required.release();
+
     return HAILO_SUCCESS;
 }
 
@@ -1584,6 +1726,41 @@ hailo_status hailo_transform_frame_by_output_transform_context(hailo_output_tran
     return HAILO_SUCCESS;
 }
 
+hailo_status hailo_get_output_stream_quant_infos(hailo_output_stream stream, hailo_quant_info_t *quant_infos, size_t *quant_infos_count)
+{
+    CHECK_ARG_NOT_NULL(stream);
+    CHECK_ARG_NOT_NULL(quant_infos);
+    CHECK_ARG_NOT_NULL(quant_infos_count);
+
+    auto quant_infos_vector = (reinterpret_cast<OutputStream*>(stream))->get_quant_infos();
+    auto status = convert_quant_infos_vector_to_array(quant_infos_vector, quant_infos, quant_infos_count);
+    CHECK_SUCCESS(status);
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status hailo_get_output_vstream_quant_infos(hailo_output_vstream vstream, hailo_quant_info_t *quant_infos, size_t *quant_infos_count)
+{
+    CHECK_ARG_NOT_NULL(vstream);
+    CHECK_ARG_NOT_NULL(quant_infos);
+    CHECK_ARG_NOT_NULL(quant_infos_count);
+
+    const auto quant_infos_vector = (reinterpret_cast<const OutputVStream*>(vstream))->get_quant_infos();
+    auto status = convert_quant_infos_vector_to_array(quant_infos_vector, quant_infos, quant_infos_count);
+    CHECK_SUCCESS(status);
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status hailo_is_qp_valid(const hailo_quant_info_t quant_info, bool *is_qp_valid)
+{
+    CHECK_ARG_NOT_NULL(is_qp_valid);
+
+    *is_qp_valid = Quantization::is_qp_valid(quant_info);
+
+    return HAILO_SUCCESS;
+}
+
 hailo_status hailo_create_demuxer_by_stream(hailo_output_stream stream,
     const hailo_demux_params_t *demux_params, hailo_output_demuxer *demuxer)
 {
@@ -2082,12 +2259,22 @@ hailo_status hailo_vstream_write_raw_buffer(hailo_input_vstream input_vstream, c
 {
     CHECK_ARG_NOT_NULL(input_vstream);
     CHECK_ARG_NOT_NULL(buffer);
-    
+
     auto status = reinterpret_cast<InputVStream*>(input_vstream)->write(MemoryView::create_const(buffer, buffer_size));
     CHECK_SUCCESS(status);
     return HAILO_SUCCESS;
 }
 
+hailo_status hailo_vstream_write_pix_buffer(hailo_input_vstream input_vstream, const hailo_pix_buffer_t *buffer)
+{
+    CHECK_ARG_NOT_NULL(input_vstream);
+    CHECK_ARG_NOT_NULL(buffer);
+
+    auto status = reinterpret_cast<InputVStream*>(input_vstream)->write(*buffer);
+    CHECK_SUCCESS(status);
+    return HAILO_SUCCESS;
+}
+
 hailo_status hailo_vstream_read_raw_buffer(hailo_output_vstream output_vstream, void *dst, size_t dst_size)
 {
     CHECK_ARG_NOT_NULL(output_vstream);
@@ -2098,6 +2285,33 @@ hailo_status hailo_vstream_read_raw_buffer(hailo_output_vstream output_vstream,
     return HAILO_SUCCESS;
 }
 
+hailo_status hailo_vstream_set_nms_score_threshold(hailo_output_vstream output_vstream, float32_t threshold)
+{
+    CHECK_ARG_NOT_NULL(output_vstream);
+
+    auto status = reinterpret_cast<OutputVStream*>(output_vstream)->set_nms_score_threshold(threshold);
+    CHECK_SUCCESS(status);
+    return HAILO_SUCCESS;
+}
+
+hailo_status hailo_vstream_set_nms_iou_threshold(hailo_output_vstream output_vstream, float32_t threshold)
+{
+    CHECK_ARG_NOT_NULL(output_vstream);
+
+    auto status = reinterpret_cast<OutputVStream*>(output_vstream)->set_nms_iou_threshold(threshold);
+    CHECK_SUCCESS(status);
+    return HAILO_SUCCESS;
+}
+
+hailo_status hailo_vstream_set_nms_max_proposals_per_class(hailo_output_vstream output_vstream, uint32_t max_proposals_per_class)
+{
+    CHECK_ARG_NOT_NULL(output_vstream);
+
+    auto status = reinterpret_cast<OutputVStream*>(output_vstream)->set_nms_max_proposals_per_class(max_proposals_per_class);
+    CHECK_SUCCESS(status);
+    return HAILO_SUCCESS;
+}
+
 hailo_status hailo_release_input_vstreams(const hailo_input_vstream *input_vstreams, size_t inputs_count)
 {
     CHECK_ARG_NOT_NULL(input_vstreams);
index 527b95e498fd43986c4a33ae5a9cdc265b196fb1..9424b7c8318f28a3da07efa4be99566bd64fda28 100644 (file)
@@ -361,17 +361,9 @@ std::string HailoRTDefaults::get_network_name(const std::string &net_group_name)
 
 hailo_format_t HailoRTDefaults::expand_auto_format(const hailo_format_t &host_format, const hailo_format_t &hw_format)
 {
-    if (HAILO_FORMAT_ORDER_HAILO_NMS == hw_format.order) {
-        assert(HAILO_FORMAT_TYPE_UINT16 == hw_format.type);
-        // TODO (HRT-11082): On NMS, change meaning of auto to float
-        if (HAILO_FORMAT_TYPE_AUTO == host_format.type) {
-            LOGGER__WARNING("Received 'HAILO_FORMAT_TYPE_AUTO' for NMS output, which is currently translated as HAILO_FORMAT_TYPE_UINT16. "\
-                "Starting HailoRT version 4.15, this will change to HAILO_FORMAT_TYPE_FLOAT32");
-        }
-    }
     auto host_format_copy = host_format;
     if (HAILO_FORMAT_TYPE_AUTO == host_format_copy.type) {
-        host_format_copy.type = hw_format.type;
+        host_format_copy.type = (HAILO_FORMAT_ORDER_HAILO_NMS == hw_format.order) ? HAILO_FORMAT_TYPE_FLOAT32 : hw_format.type;
     }
     if (HAILO_FORMAT_ORDER_AUTO == host_format_copy.order) {
         host_format_copy.order = get_default_host_format_order(hw_format);
@@ -390,4 +382,4 @@ hailo_vdevice_params_t HailoRTDefaults::get_vdevice_params()
     return params;
 }
 
-} /* namespace hailort */
\ No newline at end of file
+} /* namespace hailort */
index 0279ef486ecdfdf1c14b7e1ddd3a110ca99f9365..7c8b0317d35d077962b04cc792e66a8f2119a271 100644 (file)
@@ -286,6 +286,29 @@ Expected<Buffer> StartBurstCreditsTaskAction::serialize_params(const ContextReso
     return Buffer::create(0);
 }
 
+Expected<ContextSwitchConfigActionPtr> ResetBurstCreditsTaskAction::create()
+{
+    auto result = ContextSwitchConfigActionPtr(new (std::nothrow) ResetBurstCreditsTaskAction());
+    CHECK_NOT_NULL_AS_EXPECTED(result, HAILO_OUT_OF_HOST_MEMORY);
+    return result;
+}
+
+ResetBurstCreditsTaskAction::ResetBurstCreditsTaskAction() :
+    ContextSwitchConfigAction(Type::ResetBurstCreditsTask, CONTEXT_SWITCH_DEFS__ACTION_TYPE_BURST_CREDITS_TASK_RESET)
+{}
+
+bool ResetBurstCreditsTaskAction::supports_repeated_block() const
+{
+    // We don't support repeated blocks for this action, since only one is added per group of consecutive
+    // TriggerNewDataFromDataInput actions.
+    return false;
+}
+
+Expected<Buffer> ResetBurstCreditsTaskAction::serialize_params(const ContextResources &) const
+{
+    return Buffer::create(0);
+}
+
 Expected<ContextSwitchConfigActionPtr> WaitForNetworkGroupChangeAction::create()
 {
     auto result = ContextSwitchConfigActionPtr(new (std::nothrow) WaitForNetworkGroupChangeAction());
@@ -579,7 +602,6 @@ Expected<Buffer> AllowInputDataflowAction::serialize_params(const ContextResourc
     params.stream_index = m_stream_index;
     params.network_index = edge_layer->layer_info.network_index;
     params.host_buffer_type = static_cast<CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t>(edge_layer->buffer_info.buffer_type);
-    params.periph_bytes_per_buffer = edge_layer->layer_info.nn_stream_config.periph_bytes_per_buffer;
 
     switch (edge_layer->layer_info.type) {
     case LayerType::BOUNDARY:
@@ -599,6 +621,35 @@ Expected<Buffer> AllowInputDataflowAction::serialize_params(const ContextResourc
     return Buffer::create(reinterpret_cast<uint8_t*>(&params), sizeof(params));
 }
 
+Expected<ContextSwitchConfigActionPtr> ChangeBoundaryInputBatchAction::create(const vdma::ChannelId channel_id)
+{
+    auto result = ContextSwitchConfigActionPtr(new (std::nothrow) ChangeBoundaryInputBatchAction(channel_id));
+    CHECK_NOT_NULL_AS_EXPECTED(result, HAILO_OUT_OF_HOST_MEMORY);
+    return result;
+}
+
+
+ChangeBoundaryInputBatchAction::ChangeBoundaryInputBatchAction(const vdma::ChannelId channel_id) :
+    ContextSwitchConfigAction(Type::ChangeBoundaryInputBatchAction,
+                              CONTEXT_SWITCH_DEFS__ACTION_TYPE_CHANGE_BOUNDARY_INPUT_BATCH),
+    m_channel_id(channel_id)
+{}
+
+bool ChangeBoundaryInputBatchAction::supports_repeated_block() const
+{
+    return false;
+}
+
+Expected<Buffer> ChangeBoundaryInputBatchAction::serialize_params(const ContextResources &) const
+{
+    // H2D direction because it is Input actions
+
+    CONTEXT_SWITCH_DEFS__change_boundary_input_batch_t params{};
+    params.packed_vdma_channel_id = pack_vdma_channel_id(m_channel_id);
+
+    return Buffer::create(reinterpret_cast<uint8_t*>(&params), sizeof(params));
+}
+
 Expected<ContextSwitchConfigActionPtr> WaitForModuleConfigDoneAction::create(uint8_t module_index)
 {
     auto result = ContextSwitchConfigActionPtr(new (std::nothrow) WaitForModuleConfigDoneAction(module_index));
@@ -667,7 +718,7 @@ Expected<ContextSwitchConfigActionPtr> StartDdrBufferingTaskAction::create()
 }
 
 StartDdrBufferingTaskAction::StartDdrBufferingTaskAction() :
-    ContextSwitchConfigAction(Type::StartDdrBufferingTask, CONTEXT_SWITCH_DEFS__ACTION_TYPE_DDR_BUFFERING_START)
+ContextSwitchConfigAction(Type::StartDdrBufferingTask, CONTEXT_SWITCH_DEFS__ACTION_TYPE_DDR_BUFFERING_START)
 {}
 
 bool StartDdrBufferingTaskAction::supports_repeated_block() const
@@ -763,10 +814,14 @@ Expected<Buffer> WaitOutputTransferDoneAction::serialize_params(const ContextRes
 
     CONTEXT_SWITCH_DEFS__vdma_dataflow_interrupt_data_t params{};
     params.packed_vdma_channel_id = pack_vdma_channel_id(edge_layer->channel_id);
+    params.stream_index = m_stream_index;
+    params.network_index = edge_layer->layer_info.network_index;
+    params.is_inter_context = static_cast<uint8_t>(LayerType::INTER_CONTEXT == edge_layer->layer_info.type);
+    params.host_buffer_type = static_cast<CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t>(edge_layer->buffer_info.buffer_type);
     return Buffer::create(reinterpret_cast<uint8_t*>(&params), sizeof(params));
 }
 
-Expected<ContextSwitchConfigActionPtr> OpenBoundaryInputChannelAction::create(const vdma::ChannelId &channel_id,
+Expected<ContextSwitchConfigActionPtr> OpenBoundaryInputChannelAction::create(const vdma::ChannelId channel_id,
     const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info)
 {
     auto result = ContextSwitchConfigActionPtr(new (std::nothrow) OpenBoundaryInputChannelAction(channel_id,
@@ -775,7 +830,7 @@ Expected<ContextSwitchConfigActionPtr> OpenBoundaryInputChannelAction::create(co
     return result;
 }
 
-OpenBoundaryInputChannelAction::OpenBoundaryInputChannelAction(const vdma::ChannelId &channel_id,
+OpenBoundaryInputChannelAction::OpenBoundaryInputChannelAction(const vdma::ChannelId channel_id,
     const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info) :
     ContextSwitchConfigAction(ContextSwitchConfigAction::Type::OpenBoundaryInputChannel,
                               CONTEXT_SWITCH_DEFS__ACTION_TYPE_OPEN_BOUNDARY_INPUT_CHANNEL),
@@ -789,11 +844,22 @@ bool OpenBoundaryInputChannelAction::supports_repeated_block() const
     return false;
 }
 
-Expected<Buffer> OpenBoundaryInputChannelAction::serialize_params(const ContextResources &) const
+Expected<Buffer> OpenBoundaryInputChannelAction::serialize_params(const ContextResources &context_resources) const
 {
     CONTEXT_SWITCH_DEFS__open_boundary_input_channel_data_t params{};
-    params.packed_vdma_channel_id = pack_vdma_channel_id(m_channel_id);
+
+    // H2D direction because it is Input actions
+    const auto edge_layer = context_resources.get_edge_layer_by_channel_id(m_channel_id);
+    CHECK_EXPECTED(edge_layer);
+
+    params.packed_vdma_channel_id = pack_vdma_channel_id(edge_layer->channel_id);
     params.host_buffer_info = m_host_buffer_info;
+    params.stream_index = edge_layer->layer_info.stream_index;
+    params.network_index = edge_layer->layer_info.network_index;
+    params.periph_bytes_per_buffer = edge_layer->layer_info.nn_stream_config.periph_bytes_per_buffer;
+    params.frame_periph_size = edge_layer->layer_info.nn_stream_config.periph_bytes_per_buffer *
+        edge_layer->layer_info.nn_stream_config.periph_buffers_per_frame;
+
     return Buffer::create(reinterpret_cast<uint8_t*>(&params), sizeof(params));
 }
 
@@ -839,6 +905,7 @@ static CONTEXT_SWITCH_DEFS__stream_reg_info_t parse_nn_config(const CONTROL_PROT
     reg_info.buffer_padding = nn_config.buffer_padding;
     reg_info.periph_bytes_per_buffer = nn_config.periph_bytes_per_buffer;
     reg_info.periph_buffers_per_frame = nn_config.periph_buffers_per_frame;
+    reg_info.is_periph_calculated_in_hailort = nn_config.is_periph_calculated_in_hailort;
     return reg_info;
 }
 
@@ -882,22 +949,23 @@ Expected<Buffer> ActivateBoundaryInputChannelAction::serialize_params(const Cont
 }
 
 Expected<ContextSwitchConfigActionPtr> ActivateBoundaryOutputChannelAction::create(const vdma::ChannelId &channel_id,
-    uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
+    uint8_t stream_index, uint8_t network_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
     const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info)
 {
     auto result = ContextSwitchConfigActionPtr(new (std::nothrow) ActivateBoundaryOutputChannelAction(channel_id,
-        stream_index, nn_stream_config, host_buffer_info));
+        stream_index, network_index, nn_stream_config, host_buffer_info));
     CHECK_NOT_NULL_AS_EXPECTED(result, HAILO_OUT_OF_HOST_MEMORY);
     return result;
 }
 
 ActivateBoundaryOutputChannelAction::ActivateBoundaryOutputChannelAction(const vdma::ChannelId &channel_id,
-    uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
+    uint8_t stream_index, uint8_t network_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
     const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info) :
     ContextSwitchConfigAction(ContextSwitchConfigAction::Type::ActivateBoundaryOutputChannel,
                               CONTEXT_SWITCH_DEFS__ACTION_TYPE_ACTIVATE_BOUNDARY_OUTPUT),
     m_channel_id(channel_id),
     m_stream_index(stream_index),
+    m_network_index(network_index),
     m_nn_stream_config(nn_stream_config),
     m_host_buffer_info(host_buffer_info)
 {}
@@ -913,6 +981,7 @@ Expected<Buffer> ActivateBoundaryOutputChannelAction::serialize_params(const Con
     CONTEXT_SWITCH_DEFS__activate_boundary_output_data_t params{};
     params.packed_vdma_channel_id = pack_vdma_channel_id(m_channel_id);
     params.stream_index = m_stream_index;
+    params.network_index = m_network_index;
     params.stream_reg_info = parse_nn_config(m_nn_stream_config);
     params.host_buffer_info = m_host_buffer_info;
     return Buffer::create(reinterpret_cast<uint8_t*>(&params), sizeof(params));
@@ -1078,11 +1147,19 @@ Expected<Buffer> ActivateDdrOutputChannelAction::serialize_params(const ContextR
     return Buffer::create(reinterpret_cast<uint8_t*>(&params), sizeof(params));
 }
 
-Expected<ContextSwitchConfigActionPtr> ValidateChannelAction::create(const EdgeLayer &edge_layer)
+Expected<ContextSwitchConfigActionPtr> ValidateChannelAction::create(const EdgeLayer &edge_layer,
+    const bool is_batch_switch_context)
 {
-    const bool is_inter_context = (LayerType::INTER_CONTEXT == edge_layer.layer_info.type);
+    const bool check_host_empty_num_available =
+        // In batch switch context we still have desc avail on the host side from both directions (from activate inter context input and output actions).
+        !is_batch_switch_context &&
+        // DDR and bonudary channels always has host descriptors ready to be sent.
+        (LayerType::INTER_CONTEXT == edge_layer.layer_info.type) &&
+        // For inter context output in CCB mode, the C2C always sets new avail descriptors from the host side.
+        !(edge_layer.layer_info.direction == HAILO_D2H_STREAM &&
+            static_cast<CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t>(edge_layer.buffer_info.buffer_type) == CONTROL_PROTOCOL__HOST_BUFFER_TYPE_CCB);
     auto result = ContextSwitchConfigActionPtr(new (std::nothrow) ValidateChannelAction(edge_layer.channel_id,
-        edge_layer.layer_info.direction, is_inter_context,
+        edge_layer.layer_info.direction, check_host_empty_num_available,
         static_cast<CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t>(edge_layer.buffer_info.buffer_type),
         edge_layer.layer_info.max_shmifo_size));
     CHECK_NOT_NULL_AS_EXPECTED(result, HAILO_OUT_OF_HOST_MEMORY);
@@ -1090,13 +1167,13 @@ Expected<ContextSwitchConfigActionPtr> ValidateChannelAction::create(const EdgeL
 }
 
 ValidateChannelAction::ValidateChannelAction(const vdma::ChannelId &channel_id,
-    hailo_stream_direction_t stream_direction, bool is_inter_context,
+    hailo_stream_direction_t stream_direction, bool check_host_empty_num_available,
     CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t host_buffer_type, uint32_t initial_credit_size) :
     ContextSwitchConfigAction(ContextSwitchConfigAction::Type::ValidateChannel,
                               CONTEXT_SWITCH_DEFS__ACTION_TYPE_VALIDATE_VDMA_CHANNEL),
     m_channel_id(channel_id),
     m_stream_direction(stream_direction),
-    m_is_inter_context(is_inter_context),
+    m_check_host_empty_num_available(check_host_empty_num_available),
     m_host_buffer_type(host_buffer_type),
     m_initial_credit_size(initial_credit_size)
 {}
@@ -1114,17 +1191,25 @@ Expected<Buffer> ValidateChannelAction::serialize_params(const ContextResources
     params.edge_layer_direction = m_stream_direction == HAILO_H2D_STREAM ?
         static_cast<uint8_t>(CONTEXT_SWITCH_DEFS__EDGE_LAYER_DIRECTION_HOST_TO_DEVICE) :
         static_cast<uint8_t>(CONTEXT_SWITCH_DEFS__EDGE_LAYER_DIRECTION_DEVICE_TO_HOST);
-    params.is_inter_context = m_is_inter_context;
+    params.check_host_empty_num_available = m_check_host_empty_num_available;
     params.host_buffer_type = static_cast<uint8_t>(m_host_buffer_type);
     params.initial_credit_size = m_initial_credit_size;
     return Buffer::create(reinterpret_cast<uint8_t*>(&params), sizeof(params));
 }
 
-Expected<ContextSwitchConfigActionPtr> DeactivateChannelAction::create(const EdgeLayer &edge_layer)
+Expected<ContextSwitchConfigActionPtr> DeactivateChannelAction::create(const EdgeLayer &edge_layer,
+    const bool is_batch_switch_context)
 {
-    const bool is_inter_context = (LayerType::INTER_CONTEXT == edge_layer.layer_info.type);
+    const bool check_host_empty_num_available =
+        // In batch switch context we still have desc avail on the host side from both directions (from activate inter context input and output actions).
+        !is_batch_switch_context &&
+        // DDR and bonudary channels always has host descriptors ready to be sent.
+        (LayerType::INTER_CONTEXT == edge_layer.layer_info.type) &&
+        // For inter context output in CCB mode, the C2C always sets new avail descriptors from the host side.
+        !(edge_layer.layer_info.direction == HAILO_D2H_STREAM &&
+            static_cast<CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t>(edge_layer.buffer_info.buffer_type) == CONTROL_PROTOCOL__HOST_BUFFER_TYPE_CCB);
     auto result = ContextSwitchConfigActionPtr(new (std::nothrow) DeactivateChannelAction(edge_layer.channel_id,
-        edge_layer.layer_info.direction, is_inter_context,
+        edge_layer.layer_info.direction, check_host_empty_num_available,
         static_cast<CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t>(edge_layer.buffer_info.buffer_type),
         edge_layer.layer_info.max_shmifo_size));
     CHECK_NOT_NULL_AS_EXPECTED(result, HAILO_OUT_OF_HOST_MEMORY);
@@ -1132,13 +1217,13 @@ Expected<ContextSwitchConfigActionPtr> DeactivateChannelAction::create(const Edg
 }
 
 DeactivateChannelAction::DeactivateChannelAction(const vdma::ChannelId &channel_id,
-    hailo_stream_direction_t stream_direction, bool is_inter_context,
+    hailo_stream_direction_t stream_direction, bool check_host_empty_num_available,
     CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t host_buffer_type, uint32_t initial_credit_size) :
     ContextSwitchConfigAction(ContextSwitchConfigAction::Type::DeactivateChannel,
                               CONTEXT_SWITCH_DEFS__ACTION_TYPE_DEACTIVATE_VDMA_CHANNEL),
     m_channel_id(channel_id),
     m_stream_direction(stream_direction),
-    m_is_inter_context(is_inter_context),
+    m_check_host_empty_num_available(check_host_empty_num_available),
     m_host_buffer_type(host_buffer_type),
     m_initial_credit_size(initial_credit_size)
 {}
@@ -1156,12 +1241,76 @@ Expected<Buffer> DeactivateChannelAction::serialize_params(const ContextResource
     params.edge_layer_direction = m_stream_direction == HAILO_H2D_STREAM ? 
         static_cast<uint8_t>(CONTEXT_SWITCH_DEFS__EDGE_LAYER_DIRECTION_HOST_TO_DEVICE) : 
         static_cast<uint8_t>(CONTEXT_SWITCH_DEFS__EDGE_LAYER_DIRECTION_DEVICE_TO_HOST);
-    params.is_inter_context = m_is_inter_context;
+    params.check_host_empty_num_available = m_check_host_empty_num_available;
     params.host_buffer_type = static_cast<uint8_t>(m_host_buffer_type);
     params.initial_credit_size = m_initial_credit_size;
     return Buffer::create(reinterpret_cast<uint8_t*>(&params), sizeof(params));
 }
 
+Expected<ContextSwitchConfigActionPtr> PauseVdmaChannel::create(const EdgeLayer &edge_layer)
+{
+    auto result = ContextSwitchConfigActionPtr(new (std::nothrow) PauseVdmaChannel(edge_layer.channel_id,
+        edge_layer.layer_info.direction));
+    CHECK_NOT_NULL_AS_EXPECTED(result, HAILO_OUT_OF_HOST_MEMORY);
+    return result;
+}
+
+PauseVdmaChannel::PauseVdmaChannel(const vdma::ChannelId &channel_id,
+    hailo_stream_direction_t stream_direction) :
+    ContextSwitchConfigAction(ContextSwitchConfigAction::Type::PauseVdmaChannel,
+                              CONTEXT_SWITCH_DEFS__ACTION_TYPE_PAUSE_VDMA_CHANNEL),
+    m_channel_id(channel_id),
+    m_stream_direction(stream_direction)
+{}
+
+bool PauseVdmaChannel::supports_repeated_block() const
+{
+    // Validate action shouldn't be repeated (for easier debugging).
+    return false;
+}
+
+Expected<Buffer> PauseVdmaChannel::serialize_params(const ContextResources &) const
+{
+    CONTEXT_SWITCH_DEFS__pause_vdma_channel_action_data_t params{};
+    params.packed_vdma_channel_id = pack_vdma_channel_id(m_channel_id);
+    params.edge_layer_direction = m_stream_direction == HAILO_H2D_STREAM ?
+        static_cast<uint8_t>(CONTEXT_SWITCH_DEFS__EDGE_LAYER_DIRECTION_HOST_TO_DEVICE) :
+        static_cast<uint8_t>(CONTEXT_SWITCH_DEFS__EDGE_LAYER_DIRECTION_DEVICE_TO_HOST);
+    return Buffer::create(reinterpret_cast<uint8_t*>(&params), sizeof(params));
+}
+
+Expected<ContextSwitchConfigActionPtr> ResumeVdmaChannel::create(const EdgeLayer &edge_layer)
+{
+    auto result = ContextSwitchConfigActionPtr(new (std::nothrow) ResumeVdmaChannel(edge_layer.channel_id,
+        edge_layer.layer_info.direction));
+    CHECK_NOT_NULL_AS_EXPECTED(result, HAILO_OUT_OF_HOST_MEMORY);
+    return result;
+}
+
+ResumeVdmaChannel::ResumeVdmaChannel(const vdma::ChannelId &channel_id,
+    hailo_stream_direction_t stream_direction) :
+    ContextSwitchConfigAction(ContextSwitchConfigAction::Type::ResumeVdmaChannel,
+                              CONTEXT_SWITCH_DEFS__ACTION_TYPE_RESUME_VDMA_CHANNEL),
+    m_channel_id(channel_id),
+    m_stream_direction(stream_direction)
+{}
+
+bool ResumeVdmaChannel::supports_repeated_block() const
+{
+    // Validate action shouldn't be repeated (for easier debugging).
+    return false;
+}
+
+Expected<Buffer> ResumeVdmaChannel::serialize_params(const ContextResources &) const
+{
+    CONTEXT_SWITCH_DEFS__pause_vdma_channel_action_data_t params{};
+    params.packed_vdma_channel_id = pack_vdma_channel_id(m_channel_id);
+    params.edge_layer_direction = m_stream_direction == HAILO_H2D_STREAM ?
+        static_cast<uint8_t>(CONTEXT_SWITCH_DEFS__EDGE_LAYER_DIRECTION_HOST_TO_DEVICE) :
+        static_cast<uint8_t>(CONTEXT_SWITCH_DEFS__EDGE_LAYER_DIRECTION_DEVICE_TO_HOST);
+    return Buffer::create(reinterpret_cast<uint8_t*>(&params), sizeof(params));
+}
+
 Expected<ContextSwitchConfigActionPtr> WaitDmaIdleAction::create(uint8_t stream_index)
 {
     auto result = ContextSwitchConfigActionPtr(new (std::nothrow) WaitDmaIdleAction(stream_index));
@@ -1190,6 +1339,7 @@ Expected<Buffer> WaitDmaIdleAction::serialize_params(const ContextResources &con
     params.packed_vdma_channel_id = pack_vdma_channel_id(edge_layer->channel_id);
     params.is_inter_context = static_cast<uint8_t>(LayerType::INTER_CONTEXT == edge_layer->layer_info.type);
     params.stream_index = m_stream_index;
+    params.host_buffer_type = static_cast<CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t>(edge_layer->buffer_info.buffer_type);
     return Buffer::create(reinterpret_cast<uint8_t*>(&params), sizeof(params));
 }
 
@@ -1233,20 +1383,23 @@ Expected<Buffer> WaitNmsIdleAction::serialize_params(const ContextResources &) c
     return Buffer::create(reinterpret_cast<uint8_t*>(&params), sizeof(params));
 }
 
-Expected<ContextSwitchConfigActionPtr> EnableNmsAction::create(uint8_t nms_unit_index, uint8_t network_index, uint16_t number_of_classes,
-    uint16_t burst_size)
+Expected<ContextSwitchConfigActionPtr> EnableNmsAction::create(uint8_t nms_unit_index, uint8_t network_index,
+    uint16_t number_of_classes, uint16_t burst_size, uint8_t division_factor)
 {
-    auto result = ContextSwitchConfigActionPtr(new (std::nothrow) EnableNmsAction(nms_unit_index, network_index, number_of_classes, burst_size));
+    auto result = ContextSwitchConfigActionPtr(new (std::nothrow) EnableNmsAction(nms_unit_index, network_index,
+        number_of_classes, burst_size, division_factor));
     CHECK_NOT_NULL_AS_EXPECTED(result, HAILO_OUT_OF_HOST_MEMORY);
     return result;
 }
 
-EnableNmsAction::EnableNmsAction(uint8_t nms_unit_index, uint8_t network_index, uint16_t number_of_classes, uint16_t burst_size) :
+EnableNmsAction::EnableNmsAction(uint8_t nms_unit_index, uint8_t network_index, uint16_t number_of_classes,
+    uint16_t burst_size, uint8_t division_factor) :
     ContextSwitchConfigAction(ContextSwitchConfigAction::Type::EnableNms, CONTEXT_SWITCH_DEFS__ACTION_TYPE_ENABLE_NMS),
     m_nms_unit_index(nms_unit_index),
     m_network_index(network_index),
     m_number_of_classes(number_of_classes),
-    m_burst_size(burst_size)
+    m_burst_size(burst_size),
+    m_division_factor(division_factor)
 {}
 
 Expected<Buffer> EnableNmsAction::serialize_params(const ContextResources &) const
@@ -1256,6 +1409,7 @@ Expected<Buffer> EnableNmsAction::serialize_params(const ContextResources &) con
     params.network_index = m_network_index;
     params.number_of_classes = m_number_of_classes;
     params.burst_size = m_burst_size;
+    params.division_factor = m_division_factor;
     return Buffer::create(reinterpret_cast<uint8_t*>(&params), sizeof(params));
 }
 
index defe31aa44c027d3f1c4fa43bd2e00930d4288df..63170949c03dfeb303bd937e4b620e112d070afa 100644 (file)
@@ -55,6 +55,7 @@ public:
         ResetDdrBufferingTask,
         AddRepeated,
         StartBurstCreditsTask,
+        ResetBurstCreditsTask,
         WaitForNetworkGroupChange,
         ChangeVdmaToStreamMapping,
         WaitOutputTransferDone,
@@ -73,6 +74,9 @@ public:
         EnableNms,
         WriteDataByType,
         SwitchLcuBatch,
+        ChangeBoundaryInputBatchAction,
+        PauseVdmaChannel,
+        ResumeVdmaChannel,
     };
 
     ContextSwitchConfigAction(ContextSwitchConfigAction &&) = default;
@@ -230,6 +234,23 @@ private:
     StartBurstCreditsTaskAction();
 };
 
+class ResetBurstCreditsTaskAction : public ContextSwitchConfigAction
+{
+public:
+    static Expected<ContextSwitchConfigActionPtr> create();
+
+    ResetBurstCreditsTaskAction(ResetBurstCreditsTaskAction &&) = default;
+    ResetBurstCreditsTaskAction(const ResetBurstCreditsTaskAction &) = delete;
+    ResetBurstCreditsTaskAction &operator=(ResetBurstCreditsTaskAction &&) = delete;
+    ResetBurstCreditsTaskAction &operator=(const ResetBurstCreditsTaskAction &) = delete;
+    virtual ~ResetBurstCreditsTaskAction() = default;
+    virtual bool supports_repeated_block() const override;
+    virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+    ResetBurstCreditsTaskAction();
+};
+
 class WaitForNetworkGroupChangeAction : public ContextSwitchConfigAction
 {
 public:
@@ -391,6 +412,24 @@ private:
     const uint8_t m_stream_index;
 };
 
+class ChangeBoundaryInputBatchAction : public ContextSwitchConfigAction
+{
+public:
+    static Expected<ContextSwitchConfigActionPtr> create(const vdma::ChannelId channel_id);
+    ChangeBoundaryInputBatchAction(ChangeBoundaryInputBatchAction &&) = default;
+    ChangeBoundaryInputBatchAction(const ChangeBoundaryInputBatchAction &) = delete;
+    ChangeBoundaryInputBatchAction &operator=(ChangeBoundaryInputBatchAction &&) = delete;
+    ChangeBoundaryInputBatchAction &operator=(const ChangeBoundaryInputBatchAction &) = delete;
+    virtual ~ChangeBoundaryInputBatchAction() = default;
+    virtual bool supports_repeated_block() const override;
+    virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+    explicit ChangeBoundaryInputBatchAction(const vdma::ChannelId channel_id);
+
+    const vdma::ChannelId m_channel_id;
+};
+
 class WaitForModuleConfigDoneAction : public ContextSwitchConfigAction
 {
 public:
@@ -502,14 +541,14 @@ private:
 class OpenBoundaryInputChannelAction : public ContextSwitchConfigAction
 {
 public:
-    static Expected<ContextSwitchConfigActionPtr> create(const vdma::ChannelId &channel_id,
+    static Expected<ContextSwitchConfigActionPtr> create(const vdma::ChannelId channel_id,
         const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info);
 
     virtual bool supports_repeated_block() const override;
     virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
 
 private:
-    OpenBoundaryInputChannelAction(const vdma::ChannelId &channel_id,
+    OpenBoundaryInputChannelAction(const vdma::ChannelId channel_id,
         const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info);
 
     const vdma::ChannelId m_channel_id;
@@ -560,7 +599,7 @@ class ActivateBoundaryOutputChannelAction : public ContextSwitchConfigAction
 {
 public:
     static Expected<ContextSwitchConfigActionPtr> create(const vdma::ChannelId &channel_id,
-        uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
+        uint8_t stream_index, uint8_t network_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
         const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info);
 
     virtual bool supports_repeated_block() const override;
@@ -568,11 +607,12 @@ public:
 
 private:
     ActivateBoundaryOutputChannelAction(const vdma::ChannelId &channel_id,
-        uint8_t stream_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
+        uint8_t stream_index, uint8_t network_index, const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config,
         const CONTROL_PROTOCOL__host_buffer_info_t &host_buffer_info);
 
     const vdma::ChannelId m_channel_id;
     const uint8_t m_stream_index;
+    const uint8_t m_network_index;
     const CONTROL_PROTOCOL__nn_stream_config_t m_nn_stream_config;
     const CONTROL_PROTOCOL__host_buffer_info_t m_host_buffer_info;
 };
@@ -672,18 +712,19 @@ private:
 class ValidateChannelAction : public ContextSwitchConfigAction
 {
 public:
-    static Expected<ContextSwitchConfigActionPtr> create(const EdgeLayer &edge_layer);
+    static Expected<ContextSwitchConfigActionPtr> create(const EdgeLayer &edge_layer,
+        const bool is_batch_switch_context);
 
     virtual bool supports_repeated_block() const override;
     virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
 
 private:
     ValidateChannelAction(const vdma::ChannelId &channel_id, hailo_stream_direction_t stream_direction,
-        bool is_inter_context, CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t host_buffer_type, uint32_t initial_credit_size);
+        bool check_host_empty_num_available, CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t host_buffer_type, uint32_t initial_credit_size);
 
     const vdma::ChannelId m_channel_id;
     const hailo_stream_direction_t m_stream_direction;
-    const bool m_is_inter_context;
+    const bool m_check_host_empty_num_available;
     const CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t m_host_buffer_type;
     const uint32_t m_initial_credit_size;
 };
@@ -691,22 +732,52 @@ private:
 class DeactivateChannelAction : public ContextSwitchConfigAction
 {
 public:
-    static Expected<ContextSwitchConfigActionPtr> create(const EdgeLayer &edge_layer);
+    static Expected<ContextSwitchConfigActionPtr> create(const EdgeLayer &edge_layer, const bool is_batch_switch_context);
 
     virtual bool supports_repeated_block() const override;
     virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
 
 private:
     DeactivateChannelAction(const vdma::ChannelId &channel_id, hailo_stream_direction_t stream_direction,
-        bool is_inter_context, CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t host_buffer_type, uint32_t initial_credit_size);
+        bool check_host_empty_num_available, CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t host_buffer_type, uint32_t initial_credit_size);
 
     const vdma::ChannelId m_channel_id;
     const hailo_stream_direction_t m_stream_direction;
-    const bool m_is_inter_context;
+    const bool m_check_host_empty_num_available;
     const CONTROL_PROTOCOL__HOST_BUFFER_TYPE_t m_host_buffer_type;
     const uint32_t m_initial_credit_size;
 };
 
+class PauseVdmaChannel : public ContextSwitchConfigAction
+{
+public:
+    static Expected<ContextSwitchConfigActionPtr> create(const EdgeLayer &edge_layer);
+
+    virtual bool supports_repeated_block() const override;
+    virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+    PauseVdmaChannel(const vdma::ChannelId &channel_id, hailo_stream_direction_t stream_direction);
+
+    const vdma::ChannelId m_channel_id;
+    const hailo_stream_direction_t m_stream_direction;
+};
+
+class ResumeVdmaChannel : public ContextSwitchConfigAction
+{
+public:
+    static Expected<ContextSwitchConfigActionPtr> create(const EdgeLayer &edge_layer);
+
+    virtual bool supports_repeated_block() const override;
+    virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
+
+private:
+    ResumeVdmaChannel(const vdma::ChannelId &channel_id, hailo_stream_direction_t stream_direction);
+
+    const vdma::ChannelId m_channel_id;
+    const hailo_stream_direction_t m_stream_direction;
+};
+
 class WaitDmaIdleAction : public ContextSwitchConfigAction
 {
 public:
@@ -747,7 +818,7 @@ class EnableNmsAction : public ContextSwitchConfigAction
 {
 public:
     static Expected<ContextSwitchConfigActionPtr> create(uint8_t nms_unit_index, uint8_t network_index, uint16_t number_of_classes,
-        uint16_t burst_size);
+        uint16_t burst_size, uint8_t division_factor);
     EnableNmsAction(EnableNmsAction &&) = default;
     EnableNmsAction(const EnableNmsAction &) = delete;
     EnableNmsAction &operator=(EnableNmsAction &&) = delete;
@@ -757,12 +828,13 @@ public:
     virtual Expected<Buffer> serialize_params(const ContextResources &context_resources) const override;
 
 private:
-    EnableNmsAction(uint8_t nms_unit_index, uint8_t network_index, uint16_t number_of_classes, uint16_t burst_size);
+    EnableNmsAction(uint8_t nms_unit_index, uint8_t network_index, uint16_t number_of_classes, uint16_t burst_size, uint8_t division_factor);
 
     const uint8_t m_nms_unit_index;
     const uint8_t m_network_index;
     const uint16_t m_number_of_classes;
     const uint16_t m_burst_size;
+    const uint8_t m_division_factor;
 };
 
 class WriteDataByTypeAction : public ContextSwitchConfigAction
index e10f4155dedf040fadf9dda4cba192cf5d7ada7e..c7ef727c65d4bc5fc02c04f4799c3ff401625a3f 100644 (file)
@@ -294,7 +294,8 @@ Expected<std::vector<hailo_stream_info_t>> CoreOpMetadata::get_input_stream_info
     auto input_layers = get_input_layer_infos(network_name);
     CHECK_EXPECTED(input_layers);
     for (auto &layer_info : input_layers.value()) {
-        res.push_back(LayerInfoUtils::get_stream_info_from_layer_info(layer_info));
+        const auto &stream_infos = LayerInfoUtils::get_stream_infos_from_layer_info(layer_info);
+        res.insert(res.end(), stream_infos.begin(), stream_infos.end());
     }
     return res;
 }
@@ -305,7 +306,8 @@ Expected<std::vector<hailo_stream_info_t>> CoreOpMetadata::get_output_stream_inf
     auto output_layers = get_output_layer_infos(network_name);
     CHECK_EXPECTED(output_layers);
     for (auto &layer_info : output_layers.value()) {
-        res.push_back(LayerInfoUtils::get_stream_info_from_layer_info(layer_info));
+        const auto &stream_infos = LayerInfoUtils::get_stream_infos_from_layer_info(layer_info);
+        res.insert(res.end(), stream_infos.begin(), stream_infos.end());
     }
     return res;
 }
@@ -332,6 +334,11 @@ size_t CoreOpMetadata::get_contexts_count()
     return (m_dynamic_contexts.size() + CONTROL_PROTOCOL__CONTEXT_SWITCH_NUMBER_OF_NON_DYNAMIC_CONTEXTS);
 }
 
+size_t CoreOpMetadata::get_dynamic_contexts_count()
+{
+    return m_dynamic_contexts.size();
+}
+
 Expected<size_t> CoreOpMetadata::get_total_transfer_size()
 {
     size_t total_transfer_size = 0;
@@ -367,7 +374,7 @@ void CoreOpMetadataPerArch::add_metadata(const CoreOpMetadataPtr &metadata, uint
 Expected<NetworkGroupMetadata> NetworkGroupMetadata::create(const std::string &network_group_name,
     std::map<std::string, CoreOpMetadataPerArch> &&core_ops_metadata_per_arch, std::vector<std::string> &sorted_output_names,
     SupportedFeatures &supported_features, const std::vector<std::string> &sorted_network_names,
-    std::vector<std::shared_ptr<NetFlowElement>> &net_flow_ops)
+    std::vector<hailort::net_flow::PostProcessOpMetadataPtr> &ops_metadata)
 {
     auto all_layers_infos = get_all_layer_infos(core_ops_metadata_per_arch);
     CHECK_EXPECTED(all_layers_infos);
@@ -375,8 +382,8 @@ Expected<NetworkGroupMetadata> NetworkGroupMetadata::create(const std::string &n
     std::vector<hailo_vstream_info_t> input_vstream_infos;
     std::vector<hailo_vstream_info_t> output_vstream_infos;
     for (auto &layer_info : all_layers_infos.value()) {
-        if (std::any_of(net_flow_ops.begin(), net_flow_ops.end(),
-            [&layer_info](auto &op) { return contains(op->input_streams, layer_info.name); })) {
+        if (std::any_of(ops_metadata.begin(), ops_metadata.end(),
+            [&layer_info](auto &op_metadata) { return contains(op_metadata->get_input_names(), layer_info.name); })) {
             continue; // all output_vstream_infos that relates to the op are coming from the op itself instead of layer_infos
         }
         auto vstreams_info = LayerInfoUtils::get_vstream_infos_from_layer_info(layer_info);
@@ -392,8 +399,10 @@ Expected<NetworkGroupMetadata> NetworkGroupMetadata::create(const std::string &n
                 std::make_move_iterator(vstreams_info.begin()), std::make_move_iterator(vstreams_info.end()));
         }
     }
-    for (auto &op : net_flow_ops) {
-        output_vstream_infos.push_back(op->output_vstream_info);
+    for (auto &metadata : ops_metadata) {
+        auto vstream_info = metadata->get_output_vstream_info();
+        CHECK_EXPECTED(vstream_info);
+        output_vstream_infos.push_back(vstream_info.release());
     }
 
     // Sort vstream infos by sorted_output_names
@@ -421,7 +430,7 @@ Expected<NetworkGroupMetadata> NetworkGroupMetadata::create(const std::string &n
     CHECK_SUCCESS_AS_EXPECTED(status);
 
     return NetworkGroupMetadata(network_group_name, std::move(core_ops_metadata_per_arch), sorted_output_names, supported_features, sorted_network_names,
-        input_vstream_infos, output_vstream_infos, net_flow_ops);
+        input_vstream_infos, output_vstream_infos, ops_metadata);
 }
 
 Expected<std::vector<hailo_vstream_info_t>> NetworkGroupMetadata::get_input_vstream_infos(const std::string &network_name) const
@@ -469,9 +478,9 @@ Expected<std::vector<hailo_vstream_info_t>> NetworkGroupMetadata::get_all_vstrea
 Expected<std::vector<std::string>> NetworkGroupMetadata::get_vstream_names_from_stream_name(const std::string &stream_name)
 {
     std::vector<std::string> results;
-    for (auto &pp : m_net_flow_ops) {
-        if (contains(pp->input_streams, stream_name)) {
-            for (auto &output_metadata : pp->op->outputs_metadata()) {
+    for (auto &pp : m_ops_metadata) {
+        if (contains(pp->get_input_names(), stream_name)) {
+            for (auto &output_metadata : pp->outputs_metadata()) {
                 results.push_back(output_metadata.first);
             }
             return results;
@@ -481,6 +490,13 @@ Expected<std::vector<std::string>> NetworkGroupMetadata::get_vstream_names_from_
     auto all_layers_infos = get_all_layer_infos(m_core_ops_metadata_per_arch);
     CHECK_EXPECTED(all_layers_infos);
     for (auto &layer_info : all_layers_infos.release()) {
+        if (layer_info.is_multi_planar) {
+            for (auto &plane : layer_info.planes) {
+                if (stream_name == plane.name) {
+                    return std::vector<std::string> (1, layer_info.name);
+                }
+            }
+        }
         if (stream_name == layer_info.name) {
             if (layer_info.is_defused_nms) {
                 return std::vector<std::string> (1, layer_info.fused_nms_layer[0].name);
@@ -497,9 +513,9 @@ Expected<std::vector<std::string>> NetworkGroupMetadata::get_vstream_names_from_
 Expected<std::vector<std::string>> NetworkGroupMetadata::get_stream_names_from_vstream_name(const std::string &vstream_name)
 {
     std::vector<std::string> results;
-    for (auto &pp : m_net_flow_ops) {
-        if (contains(pp->op->outputs_metadata(), vstream_name)) {
-            for (auto &input_name : pp->input_streams) {
+    for (auto &pp : m_ops_metadata) {
+        if (contains(pp->outputs_metadata(), vstream_name)) {
+            for (auto &input_name : pp->get_input_names()) {
                 results.push_back(input_name);
             }
             return results;
@@ -519,11 +535,16 @@ Expected<std::vector<std::string>> NetworkGroupMetadata::get_stream_names_from_v
                 // vstream_name is the fused-layer of the layer info
                 results.push_back(layer_info.name);
             }
-        } else if (m_supported_features.hailo_net_flow && layer_info.direction == HAILO_D2H_STREAM) {
-            results.push_back(layer_info.name);
         } else if (vstream_name == layer_info.name) {
-            // vstream_name is a regular stream
-            results.push_back(layer_info.name);
+            // Multi planar case
+            if (layer_info.is_multi_planar) {
+                for (auto &plane : layer_info.planes) {
+                    results.push_back(plane.name);
+                }
+            } else {
+                // vstream_name is a regular stream
+                results.push_back(layer_info.name);
+            }
         }
     }
     CHECK_AS_EXPECTED(0 < results.size(), HAILO_NOT_FOUND, "Did not found vstream {}", vstream_name);
@@ -537,7 +558,7 @@ Expected<std::vector<hailo_network_info_t>> NetworkGroupMetadata::get_network_in
     for (auto const &network_name : m_sorted_network_names) {
         hailo_network_info_t network_info = {};
         CHECK_AS_EXPECTED(HAILO_MAX_NETWORK_NAME_SIZE >= (network_name.length() + 1), HAILO_INTERNAL_FAILURE,
-            "The network '{}' has a too long name (max is HAILO_MAX_NETWORK_NAME_SIZE)", network_name);  
+            "The network '{}' has a too long name (max is HAILO_MAX_NETWORK_NAME_SIZE)", network_name);
         memcpy(network_info.name, network_name.c_str(), network_name.length() + 1);
 
         network_infos.push_back(network_info);
index b449679dc8934510d01a4dabab71a688b0d57fb3..365164f9dc17a472838e88284b54b78f579ce622 100644 (file)
@@ -12,6 +12,7 @@
 
 #include "hef/layer_info.hpp"
 #include "hef/context_switch_actions.hpp"
+#include "net_flow/ops/op_metadata.hpp"
 
 
 namespace hailort
@@ -103,6 +104,7 @@ public:
     Expected<std::vector<hailo_stream_info_t>> get_all_stream_infos(const std::string &network_name = "") const;
 
     size_t get_contexts_count();
+    size_t get_dynamic_contexts_count();
 
     const std::string &core_op_name() const
     {
@@ -151,8 +153,6 @@ private:
     std::map<uint32_t, CoreOpMetadataPtr> m_metadata_per_arch;
 };
 
-struct NetFlowElement;
-
 class NetworkGroupMetadata final {
 public:
     static Expected<NetworkGroupMetadata> create(const std::string &network_group_name,
@@ -160,7 +160,7 @@ public:
         std::vector<std::string> &sorted_output_names,
         SupportedFeatures &supported_features,
         const std::vector<std::string> &sorted_network_names,
-        std::vector<std::shared_ptr<hailort::NetFlowElement>> &net_flow_ops);
+        std::vector<net_flow::PostProcessOpMetadataPtr> &ops_metadata);
 
     NetworkGroupMetadata(const std::string &network_group_name,
         std::map<std::string, CoreOpMetadataPerArch> &&core_ops_metadata_per_arch,
@@ -169,7 +169,7 @@ public:
         const std::vector<std::string> &sorted_network_names,
         std::vector<hailo_vstream_info_t> &input_vstreams_infos,
         std::vector<hailo_vstream_info_t> &output_vstreams_infos,
-        std::vector<std::shared_ptr<hailort::NetFlowElement>> &net_flow_ops) :
+        std::vector<net_flow::PostProcessOpMetadataPtr> &ops_metadata) :
             m_network_group_name(network_group_name),
             m_sorted_output_names(sorted_output_names),
             m_supported_features(supported_features),
@@ -177,7 +177,7 @@ public:
             m_input_vstreams_infos(input_vstreams_infos),
             m_output_vstreams_infos(output_vstreams_infos),
             m_core_ops_metadata_per_arch(std::move(core_ops_metadata_per_arch)),
-            m_net_flow_ops(net_flow_ops)
+            m_ops_metadata(ops_metadata)
         {};
 
     Expected<std::vector<hailo_vstream_info_t>> get_input_vstream_infos(const std::string &network_name = "") const;
@@ -235,7 +235,7 @@ private:
     std::vector<hailo_vstream_info_t> m_output_vstreams_infos;
 
     std::map<std::string, CoreOpMetadataPerArch> m_core_ops_metadata_per_arch; // Key is core_op_name
-    std::vector<std::shared_ptr<NetFlowElement>> m_net_flow_ops;
+    std::vector<net_flow::PostProcessOpMetadataPtr> m_ops_metadata;
 
     friend class Hef;
     friend class ConfiguredNetworkGroupBase;
index f719c72ce904cdda5b2dc3968097e0d40e47d377..38130ef7d25a7f7fa70f0abaa8405f2d36275859 100644 (file)
@@ -15,6 +15,7 @@
 #include "hailo/device.hpp"
 #include "hailo/hailort_common.hpp"
 #include "hailo/hailort_defaults.hpp"
+#include "hailo/quantization.hpp"
 
 #include "common/string_utils.hpp"
 #include "common/utils.hpp"
 #include "common/file_utils.hpp"
 
 #include "net_flow/ops/nms_post_process.hpp"
-#include "net_flow/ops/yolo_post_process.hpp"
+#include "net_flow/ops/yolov5_post_process.hpp"
 #include "net_flow/ops/yolox_post_process.hpp"
 #include "net_flow/ops/ssd_post_process.hpp"
 #include "net_flow/ops/argmax_post_process.hpp"
 #include "net_flow/ops/softmax_post_process.hpp"
+#include "net_flow/ops/yolov5_seg_post_process.hpp"
 #include "hef/hef_internal.hpp"
 #include "vdma/pcie/pcie_device.hpp"
 #include "vdma/vdma_config_manager.hpp"
 #include "eth/hcp_config_core_op.hpp"
 #include "hef/layer_info.hpp"
 #include "device_common/control.hpp"
-#include "stream_common/nms_stream_reader.hpp"
 
 #include "byte_order.h"
 #include "context_switch_defs.h"
@@ -56,9 +57,10 @@ namespace hailort
 #define DEFAULT_BATCH_SIZE (1)
 #define SKIP_SPACE_COMMA_CHARACTERS (2)
 #define ALIGNED_TO_4_BYTES (4)
-#define DEFAULT_NMS_NO_BURST_SIZE (1)
+constexpr uint8_t DEFAULT_DIVISION_FACTOR = 1;
 
 static const uint8_t ENABLE_LCU_CONTROL_WORD[4] = {1, 0, 0, 0};
+static const hailo_quant_info_t INVALID_QP = {INVALID_QP_VALUE, INVALID_QP_VALUE, INVALID_QP_VALUE, INVALID_QP_VALUE};
 
 #define TAB ("    ")
 
@@ -78,8 +80,8 @@ static std::string get_shape_str(const hailo_stream_info_t &stream_info)
     {
     case HAILO_FORMAT_ORDER_HAILO_NMS:
         return HailoRTCommon::get_format_type_str(stream_info.format.type) + ", " + HailoRTCommon::get_format_order_str(stream_info.format.order) +
-            "(number of classes: " + std::to_string(stream_info.nms_info.number_of_classes) +
-            ", max_bboxes_per_class: "+ std::to_string(stream_info.nms_info.max_bboxes_per_class) + ")";
+            "(maximum frame size: " + std::to_string(HailoRTCommon::get_nms_hw_frame_size(stream_info.nms_info)) + ")";
+
     case HAILO_FORMAT_ORDER_NC:
         return HailoRTCommon::get_format_type_str(stream_info.format.type) + ", " + HailoRTCommon::get_format_order_str(stream_info.format.order) +
             "(" + std::to_string(stream_info.hw_shape.features) + ")";
@@ -98,9 +100,11 @@ static std::string get_shape_str(const hailo_vstream_info_t &vstream_info)
     switch (vstream_info.format.order)
     {
     case HAILO_FORMAT_ORDER_HAILO_NMS:
+    case HAILO_FORMAT_ORDER_HAILO_NMS_WITH_BYTE_MASK:
         return HailoRTCommon::get_format_type_str(vstream_info.format.type) + ", " + HailoRTCommon::get_format_order_str(vstream_info.format.order) +
             "(number of classes: " + std::to_string(vstream_info.nms_shape.number_of_classes) +
-            ", max_bboxes_per_class: " + std::to_string(vstream_info.nms_shape.max_bboxes_per_class) + ")";
+            ", maximum bounding boxes per class: " + std::to_string(vstream_info.nms_shape.max_bboxes_per_class) +
+            ", maximum frame size: " + std::to_string(HailoRTCommon::get_nms_host_frame_size(vstream_info.nms_shape, vstream_info.format)) + ")";
     case HAILO_FORMAT_ORDER_NC:
         return HailoRTCommon::get_format_type_str(vstream_info.format.type) + ", " + HailoRTCommon::get_format_order_str(vstream_info.format.order) +
             "(" + std::to_string(vstream_info.shape.features) + ")";
@@ -533,9 +537,9 @@ hailo_status Hef::Impl::fill_networks_metadata()
                     CHECK_EXPECTED_AS_STATUS(metadata_per_arch_exp);
                     auto metadata_per_arch = metadata_per_arch_exp.release();
 
-                    auto expected_net_flow_ops = create_net_flow_ops(*network_group, *metadata_per_arch, get_device_arch());
-                    CHECK_EXPECTED_AS_STATUS(expected_net_flow_ops);
-                    m_post_process_ops_per_group.insert({metadata_per_arch->core_op_name(), expected_net_flow_ops.value()});
+                    auto expected_ops_metadata = create_ops_metadata(*network_group, *metadata_per_arch, get_device_arch());
+                    CHECK_EXPECTED_AS_STATUS(expected_ops_metadata);
+                    m_post_process_ops_metadata_per_group.insert({metadata_per_arch->core_op_name(), expected_ops_metadata.value()});
                     core_op_metadata.add_metadata(metadata_per_arch, partial_clusters_layout_bitmap);
                 }
             } else {
@@ -555,8 +559,8 @@ hailo_status Hef::Impl::fill_networks_metadata()
                     CHECK_EXPECTED_AS_STATUS(metadata_per_arch_exp);
                     auto metadata_per_arch = metadata_per_arch_exp.release();
 
-                    std::vector<std::shared_ptr<NetFlowElement>> empty_ops;
-                    m_post_process_ops_per_group.insert({metadata_per_arch->core_op_name(), empty_ops});
+                    std::vector<net_flow::PostProcessOpMetadataPtr> empty_metadata_ops;
+                    m_post_process_ops_metadata_per_group.insert({metadata_per_arch->core_op_name(), empty_metadata_ops});
                     core_op_metadata.add_metadata(metadata_per_arch, partial_clusters_layout_bitmap);
                 }
             }
@@ -566,9 +570,9 @@ hailo_status Hef::Impl::fill_networks_metadata()
             CHECK_EXPECTED_AS_STATUS(metadata_per_arch_exp);
             auto metadata_per_arch = metadata_per_arch_exp.release();
 
-            auto expected_net_flow_ops = create_net_flow_ops(*network_group, *metadata_per_arch, get_device_arch());
-            CHECK_EXPECTED_AS_STATUS(expected_net_flow_ops);
-            m_post_process_ops_per_group.insert({metadata_per_arch->core_op_name(), expected_net_flow_ops.value()});
+            auto expected_ops_metadata = create_ops_metadata(*network_group, *metadata_per_arch, get_device_arch());
+            CHECK_EXPECTED_AS_STATUS(expected_ops_metadata);
+            m_post_process_ops_metadata_per_group.insert({metadata_per_arch->core_op_name(), expected_ops_metadata.value()});
             core_op_metadata.add_metadata(metadata_per_arch, partial_clusters_layout_bitmap);
         }
 
@@ -610,9 +614,8 @@ hailo_status Hef::Impl::fill_networks_metadata()
                 sorted_output_names.push_back(name);
             }
         }
-
         auto network_group_metadata = NetworkGroupMetadata::create(network_group_name, std::move(core_op_metadata_map),
-            sorted_output_names, m_supported_features, sorted_network_names, m_post_process_ops_per_group.at(network_group_name));
+            sorted_output_names, m_supported_features, sorted_network_names, m_post_process_ops_metadata_per_group.at(network_group_name));
 
         CHECK_EXPECTED_AS_STATUS(network_group_metadata);
         m_network_group_metadata.emplace(network_group_name, network_group_metadata.release());
@@ -845,7 +848,7 @@ SupportedFeatures Hef::Impl::get_supported_features(const ProtoHEFHeader &header
     return supported_features;
 }
 
-net_flow::NmsPostProcessConfig create_nms_config(const ProtoHEFOp &op_proto)
+net_flow::NmsPostProcessConfig create_post_process_nms_config(const ProtoHEFOp &op_proto)
 {
     net_flow::NmsPostProcessConfig nms_config{};
     nms_config.nms_score_th = (float32_t)op_proto.nms_op().nms_score_th();
@@ -858,14 +861,13 @@ net_flow::NmsPostProcessConfig create_nms_config(const ProtoHEFOp &op_proto)
     return nms_config;
 }
 
-Expected<std::shared_ptr<net_flow::Op>> create_yolov5_op(const ProtoHEFOp &op_proto, hailo_format_t output_format,
-    const std::map<size_t, LayerInfo> &pad_index_to_streams_info, const std::map<size_t, size_t> &input_to_output_pads)
+Expected<net_flow::YoloPostProcessConfig> create_yolov5_config(const google::protobuf::RepeatedPtrField<ProtoHEFYoloBboxDecoder> &bbox_decoders,
+    double image_height, double image_width, const std::map<size_t, LayerInfo> &pad_index_to_streams_info)
 {
-    auto nms_config = create_nms_config(op_proto);
     net_flow::YoloPostProcessConfig yolo_config{};
-    yolo_config.image_height = (float32_t)op_proto.nms_op().yolo_nms_op().image_height();
-    yolo_config.image_width = (float32_t)op_proto.nms_op().yolo_nms_op().image_width();
-    for (auto &bbox_proto : op_proto.nms_op().yolo_nms_op().bbox_decoders()) {
+    yolo_config.image_height = static_cast<float32_t>(image_height);
+    yolo_config.image_width = static_cast<float32_t>(image_width);
+    for (auto &bbox_proto : bbox_decoders) {
         std::vector<int> bbox_anchors;
         CHECK_AS_EXPECTED((bbox_proto.h().size() == bbox_proto.w().size()), HAILO_INVALID_HEF,
             "YOLOv5 height anchors count {} doesn't mach the width anchors count {}", bbox_proto.h().size(), bbox_proto.w().size());
@@ -877,12 +879,13 @@ Expected<std::shared_ptr<net_flow::Op>> create_yolov5_op(const ProtoHEFOp &op_pr
         yolo_config.anchors.insert({pad_index_to_streams_info.at(bbox_proto.pad_index()).name, bbox_anchors});
     }
 
-    std::map<std::string, net_flow::BufferMetaData> inputs_metadata;
-    std::map<std::string, net_flow::BufferMetaData> outputs_metadata;
-    net_flow::BufferMetaData output_metadata{};
-    output_metadata.format = output_format;
-    outputs_metadata.insert({op_proto.output_pads()[0].name(), output_metadata});
+    return yolo_config;
+}
 
+Expected<std::unordered_map<std::string, net_flow::BufferMetaData>> create_inputs_metadata(const ProtoHEFOp &op_proto,
+    const std::map<size_t, LayerInfo> &pad_index_to_streams_info, const std::map<size_t, size_t> &input_to_output_pads)
+{
+    std::unordered_map<std::string, net_flow::BufferMetaData> inputs_metadata;
     for (auto &input_pad : op_proto.input_pads()) {
         CHECK_AS_EXPECTED(contains(input_to_output_pads, static_cast<size_t>(input_pad.index())), HAILO_INVALID_HEF,
             "NMS op is not connected to core op");
@@ -898,23 +901,79 @@ Expected<std::shared_ptr<net_flow::Op>> create_yolov5_op(const ProtoHEFOp &op_pr
         input_metadata.padded_shape = op_input_stream.hw_shape;
         inputs_metadata.insert({op_input_stream.name, input_metadata});
     }
-    return net_flow::YOLOv5PostProcessOp::create(inputs_metadata, outputs_metadata, nms_config, yolo_config);
+
+    return inputs_metadata;
 }
 
-Expected<std::shared_ptr<net_flow::Op>> create_yolox_op(const ProtoHEFOp &op_proto, hailo_format_t output_format,
-    const std::map<size_t, LayerInfo> &pad_index_to_streams_info, const std::map<size_t, size_t> &input_to_output_pads)
+Expected<net_flow::PostProcessOpMetadataPtr> create_yolov5_op_metadata(const ProtoHEFOp &op_proto,
+    const std::map<size_t, LayerInfo> &pad_index_to_streams_info, const std::map<size_t, size_t> &input_to_output_pads,
+    const std::string &network_name)
+{
+    auto nms_config = create_post_process_nms_config(op_proto);
+
+    auto yolo_config = create_yolov5_config(op_proto.nms_op().yolo_nms_op().bbox_decoders(),
+        op_proto.nms_op().yolo_nms_op().image_height(), op_proto.nms_op().yolo_nms_op().image_width(), pad_index_to_streams_info);
+    CHECK_EXPECTED(yolo_config);
+
+    auto inputs_metadata = create_inputs_metadata(op_proto, pad_index_to_streams_info, input_to_output_pads);
+    CHECK_EXPECTED(inputs_metadata);
+
+    std::unordered_map<std::string, net_flow::BufferMetaData> outputs_metadata;
+    net_flow::BufferMetaData output_metadata{};
+    output_metadata.format = net_flow::NmsOpMetadata::expand_output_format_autos_by_op_type(
+        { HAILO_FORMAT_TYPE_AUTO, HAILO_FORMAT_ORDER_AUTO, HAILO_FORMAT_FLAGS_NONE }, net_flow::OperationType::YOLOV5);
+    outputs_metadata.insert({op_proto.output_pads()[0].name(), output_metadata});
+
+    return net_flow::Yolov5OpMetadata::create(inputs_metadata.release(), outputs_metadata, nms_config, yolo_config.release(),
+        network_name);
+}
+
+Expected<net_flow::PostProcessOpMetadataPtr> create_yolov5_seg_op_metadata(const ProtoHEFOp &op_proto,
+    const std::map<size_t, LayerInfo> &pad_index_to_streams_info, const std::map<size_t, size_t> &input_to_output_pads,
+    const std::string &network_name)
+{
+    auto nms_config = create_post_process_nms_config(op_proto);
+    auto yolov5_config = create_yolov5_config(op_proto.nms_op().yolo_seg_op().bbox_decoders(),
+        op_proto.nms_op().yolo_seg_op().image_height(), op_proto.nms_op().yolo_seg_op().image_width(), pad_index_to_streams_info);
+    CHECK_EXPECTED(yolov5_config);
+
+    auto inputs_metadata = create_inputs_metadata(op_proto, pad_index_to_streams_info, input_to_output_pads);
+    CHECK_EXPECTED(inputs_metadata);
+
+    auto proto_layer_name = op_proto.nms_op().yolo_seg_op().proto_info().proto_layer();
+    CHECK_AS_EXPECTED(contains(inputs_metadata.value(), proto_layer_name), HAILO_INVALID_HEF);
+    net_flow::YoloV5SegPostProcessConfig yolov5_seg_config =
+    {static_cast<float32_t>(op_proto.nms_op().yolo_seg_op().mask_threshold()),
+    op_proto.nms_op().yolo_seg_op().proto_info().proto_layer()};
+
+    std::unordered_map<std::string, net_flow::BufferMetaData> outputs_metadata;
+    net_flow::BufferMetaData output_metadata{};
+    output_metadata.format = net_flow::NmsOpMetadata::expand_output_format_autos_by_op_type
+        ({ HAILO_FORMAT_TYPE_AUTO, HAILO_FORMAT_ORDER_HAILO_NMS_WITH_BYTE_MASK, HAILO_FORMAT_FLAGS_NONE },
+        net_flow::OperationType::YOLOV5SEG);
+    outputs_metadata.insert({op_proto.output_pads()[0].name(), output_metadata});
+
+    return net_flow::Yolov5SegOpMetadata::create(inputs_metadata.release(), outputs_metadata, nms_config, yolov5_config.release(),
+        yolov5_seg_config, network_name);
+}
+
+Expected<net_flow::PostProcessOpMetadataPtr> create_yolox_op_metadata(const ProtoHEFOp &op_proto,
+    const std::map<size_t, LayerInfo> &pad_index_to_streams_info, const std::map<size_t, size_t> &input_to_output_pads,
+    const std::string &network_name)
 {
-    auto nms_config = create_nms_config(op_proto);
+    auto nms_config = create_post_process_nms_config(op_proto);
+
     net_flow::YoloxPostProcessConfig yolox_config{};
     yolox_config.image_height = (float32_t)op_proto.nms_op().yolox_nms_op().image_height();
     yolox_config.image_width = (float32_t)op_proto.nms_op().yolox_nms_op().image_width();
 
-    std::map<std::string, net_flow::BufferMetaData> inputs_metadata;
-    std::map<std::string, net_flow::BufferMetaData> outputs_metadata;
+    std::unordered_map<std::string, net_flow::BufferMetaData> inputs_metadata;
+    std::unordered_map<std::string, net_flow::BufferMetaData> outputs_metadata;
     net_flow::BufferMetaData output_metadata{};
-    output_metadata.format = output_format;
+    output_metadata.format = net_flow::NmsOpMetadata::expand_output_format_autos_by_op_type(
+        { HAILO_FORMAT_TYPE_AUTO, HAILO_FORMAT_ORDER_AUTO, HAILO_FORMAT_FLAGS_NONE }, net_flow::OperationType::YOLOX);
     outputs_metadata.insert({op_proto.output_pads()[0].name(), output_metadata});
-    
+
     for (auto &bbox_proto : op_proto.nms_op().yolox_nms_op().bbox_decoders()) {
         assert(contains(pad_index_to_streams_info, static_cast<size_t>(bbox_proto.reg_pad_index())));
         auto reg_name = pad_index_to_streams_info.at(bbox_proto.reg_pad_index()).name;
@@ -940,13 +999,17 @@ Expected<std::shared_ptr<net_flow::Op>> create_yolox_op(const ProtoHEFOp &op_pro
         input_metadata.padded_shape = op_input_stream.hw_shape;
         inputs_metadata.insert({op_input_stream.name, input_metadata});
     }
-    return net_flow::YOLOXPostProcessOp::create(inputs_metadata, outputs_metadata, nms_config, yolox_config);
+
+    return net_flow::YoloxOpMetadata::create(inputs_metadata, outputs_metadata, nms_config, yolox_config,
+        network_name);
 }
 
-Expected<std::shared_ptr<net_flow::Op>> create_ssd_op(const ProtoHEFOp &op_proto, hailo_format_t output_format,
-    const std::map<size_t, LayerInfo> &pad_index_to_streams_info, const std::map<size_t, size_t> &input_to_output_pads)
+Expected<net_flow::PostProcessOpMetadataPtr> create_ssd_op_metadata(const ProtoHEFOp &op_proto,
+    const std::map<size_t, LayerInfo> &pad_index_to_streams_info, const std::map<size_t, size_t> &input_to_output_pads,
+    const std::string &network_name)
 {
-    auto nms_config = create_nms_config(op_proto);
+    auto nms_config = create_post_process_nms_config(op_proto);
+
     net_flow::SSDPostProcessConfig ssd_config{};
     ssd_config.image_height = (float32_t)op_proto.nms_op().ssd_nms_op().image_height();
     ssd_config.image_width = (float32_t)op_proto.nms_op().ssd_nms_op().image_width();
@@ -980,10 +1043,11 @@ Expected<std::shared_ptr<net_flow::Op>> create_ssd_op(const ProtoHEFOp &op_proto
         ssd_config.reg_to_cls_inputs.insert({reg_name, cls_name});
     }
 
-    std::map<std::string, net_flow::BufferMetaData> inputs_metadata;
-    std::map<std::string, net_flow::BufferMetaData> outputs_metadata;
+    std::unordered_map<std::string, net_flow::BufferMetaData> inputs_metadata;
+    std::unordered_map<std::string, net_flow::BufferMetaData> outputs_metadata;
     net_flow::BufferMetaData output_metadata{};
-    output_metadata.format = output_format;
+    output_metadata.format = net_flow::NmsOpMetadata::expand_output_format_autos_by_op_type(
+        { HAILO_FORMAT_TYPE_AUTO, HAILO_FORMAT_ORDER_AUTO, HAILO_FORMAT_FLAGS_NONE }, net_flow::OperationType::SSD);
     outputs_metadata.insert({op_proto.output_pads()[0].name(), output_metadata});
 
     for (auto &input_pad : op_proto.input_pads()) {
@@ -1001,37 +1065,32 @@ Expected<std::shared_ptr<net_flow::Op>> create_ssd_op(const ProtoHEFOp &op_proto
         input_metadata.padded_shape = op_input_stream.hw_shape;
         inputs_metadata.insert({op_input_stream.name, input_metadata});
     }
-    return net_flow::SSDPostProcessOp::create(inputs_metadata, outputs_metadata, nms_config, ssd_config);
+
+    return net_flow::SSDOpMetadata::create(inputs_metadata, outputs_metadata, nms_config, ssd_config, network_name);
 }
 
-Expected<std::shared_ptr<net_flow::Op>> create_argmax_op(const ProtoHEFPad &input_pad, const ProtoHEFPad &output_pad,
-    const std::string &input_name, const std::string &output_name, const bool &is_hw_padding_supported)
+Expected<std::shared_ptr<net_flow::OpMetadata>> create_argmax_op_metadata(const LayerInfo &op_input_layer_info, const ProtoHEFPad &output_pad,
+    const std::string &output_name, const bool &is_hw_padding_supported, const std::string &network_name)
 {
     // create input meta
-    std::map<std::string, hailort::net_flow::BufferMetaData> inputs_metadata;
+    std::unordered_map<std::string, hailort::net_flow::BufferMetaData> inputs_metadata;
     hailort::net_flow::BufferMetaData input_metadata{};
-    input_metadata.shape = {input_pad.tensor_shape().height(), input_pad.tensor_shape().width(), input_pad.tensor_shape().features()};
+    input_metadata.shape = op_input_layer_info.shape;
     // If padding is done in HW, the padded shape is as the shape (TODO: Remove once HRT support hw_padding from DFC)
     if (is_hw_padding_supported) {
         input_metadata.padded_shape = input_metadata.shape;
     } else {
-        input_metadata.padded_shape = {input_pad.tensor_shape().padded_height(), input_pad.tensor_shape().padded_width(),
-            input_pad.tensor_shape().padded_features()};
+        input_metadata.padded_shape = op_input_layer_info.hw_shape;
     }
 
-    input_metadata.format.type = static_cast<hailo_format_type_t>(input_pad.format_type());
-    input_metadata.format.order = static_cast<hailo_format_order_t>(input_pad.format_order());
-    input_metadata.format.flags = HAILO_FORMAT_FLAGS_NONE;
-    input_metadata.quant_info.qp_zp = input_pad.numeric_info().qp_zp();
-    input_metadata.quant_info.qp_scale = input_pad.numeric_info().qp_scale();
-    input_metadata.quant_info.limvals_min = input_pad.numeric_info().limvals_min();
-    input_metadata.quant_info.limvals_max = input_pad.numeric_info().limvals_max();
-    inputs_metadata.insert({input_name, input_metadata});
+    input_metadata.format = op_input_layer_info.format;
+    input_metadata.quant_info = op_input_layer_info.quant_info;
+    inputs_metadata.insert({op_input_layer_info.name, input_metadata});
 
     // create output meta
-    std::map<std::string, hailort::net_flow::BufferMetaData> outputs_metadata;
+    std::unordered_map<std::string, hailort::net_flow::BufferMetaData> outputs_metadata;
     hailort::net_flow::BufferMetaData output_metadata{};
-    output_metadata.shape = {input_pad.tensor_shape().height(), input_pad.tensor_shape().width(), hailort::net_flow::ARGMAX_OUTPUT_FEATURES_SIZE};
+    output_metadata.shape = {input_metadata.shape.height, input_metadata.shape.width, hailort::net_flow::ARGMAX_OUTPUT_FEATURES_SIZE};
     output_metadata.padded_shape = output_metadata.shape;   // padded_shape is the same as the output_shape in argmax op
     output_metadata.format.order = static_cast<hailo_format_order_t>(output_pad.format_order());
     output_metadata.format.type = static_cast<hailo_format_type_t>(output_pad.format_type());
@@ -1041,32 +1100,61 @@ Expected<std::shared_ptr<net_flow::Op>> create_argmax_op(const ProtoHEFPad &inpu
     output_metadata.quant_info.limvals_max = output_pad.numeric_info().limvals_max();
     output_metadata.format.flags = HAILO_FORMAT_FLAGS_NONE;
     outputs_metadata.insert({output_name, output_metadata});
-    return net_flow::ArgmaxPostProcessOp::create(inputs_metadata, outputs_metadata);
+
+    return net_flow::ArgmaxOpMetadata::create(inputs_metadata, outputs_metadata, network_name);
+}
+
+Expected<net_flow::PostProcessOpMetadataPtr> create_iou_op_metadata(const ProtoHEFOp &op_proto,
+    const std::map<size_t, LayerInfo> &pad_index_to_streams_info, const std::map<size_t, size_t> &input_to_output_pads,
+    const std::string &network_name)
+{
+    auto op_type = net_flow::OperationType::IOU;
+    auto nms_config = create_post_process_nms_config(op_proto);
+
+    std::unordered_map<std::string, net_flow::BufferMetaData> inputs_metadata;
+    std::unordered_map<std::string, net_flow::BufferMetaData> outputs_metadata;
+    net_flow::BufferMetaData output_metadata{};
+    output_metadata.format = net_flow::NmsOpMetadata::expand_output_format_autos_by_op_type(
+        { HAILO_FORMAT_TYPE_AUTO, HAILO_FORMAT_ORDER_AUTO, HAILO_FORMAT_FLAGS_NONE }, op_type);
+    outputs_metadata.insert({op_proto.output_pads()[0].name(), output_metadata});
+
+    for (auto &input_pad : op_proto.input_pads()) {
+        CHECK_AS_EXPECTED(contains(input_to_output_pads, static_cast<size_t>(input_pad.index())), HAILO_INVALID_HEF,
+            "NMS op is not connected to core op");
+        auto output_pad_index = input_to_output_pads.at(input_pad.index());
+        CHECK_AS_EXPECTED(contains(pad_index_to_streams_info, output_pad_index), HAILO_INVALID_HEF,
+            "Pad {} of post-process {} is not connected to any core output stream",
+                input_pad.index(), op_proto.name());
+        const auto &op_input_stream = pad_index_to_streams_info.at(output_pad_index);
+        net_flow::BufferMetaData input_metadata{};
+        input_metadata.format = op_input_stream.format;
+        input_metadata.quant_info = op_input_stream.quant_info;
+        input_metadata.shape = op_input_stream.shape;
+        input_metadata.padded_shape = op_input_stream.hw_shape;
+        inputs_metadata.insert({op_input_stream.name, input_metadata});
+    }
+
+    return net_flow::NmsOpMetadata::create(inputs_metadata, outputs_metadata, nms_config,network_name, op_type, "IoU-Post-Process");
 }
 
-Expected<std::shared_ptr<net_flow::Op>> create_softmax_op(const ProtoHEFPad &input_pad, const ProtoHEFPad &output_pad,
-    const std::string &input_name, const std::string &output_name)
+Expected<std::shared_ptr<net_flow::OpMetadata>> create_softmax_op_metadata(const LayerInfo &op_input_layer_info, const ProtoHEFPad &output_pad,
+    const std::string &output_name, const std::string &network_name)
 {
     // create input meta
-    std::map<std::string, hailort::net_flow::BufferMetaData> inputs_metadata;
+    std::unordered_map<std::string, hailort::net_flow::BufferMetaData> inputs_metadata;
     hailort::net_flow::BufferMetaData input_metadata{};
-    input_metadata.shape = {input_pad.tensor_shape().height(), input_pad.tensor_shape().width(), input_pad.tensor_shape().features()};
-    input_metadata.padded_shape = input_metadata.shape;     // since softmax is connected to transform context, shape and padded shape are the same
-
-    input_metadata.format.type = static_cast<hailo_format_type_t>(input_pad.format_type());
-    input_metadata.format.order = static_cast<hailo_format_order_t>(input_pad.format_order());
-    input_metadata.format.flags = HAILO_FORMAT_FLAGS_NONE;
-    input_metadata.quant_info.qp_zp = input_pad.numeric_info().qp_zp();
-    input_metadata.quant_info.qp_scale = input_pad.numeric_info().qp_scale();
-    input_metadata.quant_info.limvals_min = input_pad.numeric_info().limvals_min();
-    input_metadata.quant_info.limvals_max = input_pad.numeric_info().limvals_max();
-    inputs_metadata.insert({input_name, input_metadata});
+    input_metadata.shape = op_input_layer_info.shape;
+    input_metadata.padded_shape = input_metadata.shape; // Since softmax is connected to transform context, shape and padded shape are the same
+
+    input_metadata.format = op_input_layer_info.format;
+    input_metadata.quant_info = op_input_layer_info.quant_info;
+    inputs_metadata.insert({op_input_layer_info.name, input_metadata});
 
     // create output meta
-    std::map<std::string, hailort::net_flow::BufferMetaData> outputs_metadata;
+    std::unordered_map<std::string, hailort::net_flow::BufferMetaData> outputs_metadata;
     hailort::net_flow::BufferMetaData output_metadata{};
-    output_metadata.shape = {input_pad.tensor_shape().height(), input_pad.tensor_shape().width(), input_pad.tensor_shape().features()};
-    output_metadata.padded_shape = output_metadata.shape;   // padded_shape is the same as the output_shape in softmax op
+    output_metadata.shape = input_metadata.shape;
+    output_metadata.padded_shape = output_metadata.shape; // padded_shape is the same as the output_shape in softmax op
     output_metadata.format.order = static_cast<hailo_format_order_t>(output_pad.format_order());
     output_metadata.format.type = static_cast<hailo_format_type_t>(output_pad.format_type());
     output_metadata.quant_info.qp_zp = output_pad.numeric_info().qp_zp();
@@ -1075,44 +1163,41 @@ Expected<std::shared_ptr<net_flow::Op>> create_softmax_op(const ProtoHEFPad &inp
     output_metadata.quant_info.limvals_max = output_pad.numeric_info().limvals_max();
     output_metadata.format.flags = HAILO_FORMAT_FLAGS_NONE;
     outputs_metadata.insert({output_name, output_metadata});
-    return net_flow::SoftmaxPostProcessOp::create(inputs_metadata, outputs_metadata);
+
+    return net_flow::SoftmaxOpMetadata::create(inputs_metadata, outputs_metadata, network_name);
 }
 
-Expected<std::shared_ptr<net_flow::Op>> create_logits_op(const ProtoHEFOp &op_proto, const std::map<size_t, size_t> &input_to_output_pads,
-    const std::map<size_t, ProtoHEFPad> &pad_index_to_pad_data, NetFlowElement &net_flow_element,
-    const std::map<size_t, LayerInfo> &pad_index_to_streams_info, const ProtoHEFHwArch &hef_arch)
+Expected<std::shared_ptr<net_flow::OpMetadata>> create_logits_op_metadata(const ProtoHEFOp &op_proto,
+    const std::map<size_t, LayerInfo> &pad_index_to_streams_info, const std::map<size_t, size_t> &input_to_output_pads,
+    const ProtoHEFHwArch &hef_arch, const std::string &network_name)
 {
     // connect input_streams to net_flow element
     CHECK_AS_EXPECTED(op_proto.input_pads().size() == 1, HAILO_INVALID_HEF, "Logits op must have 1 input only");
     CHECK_AS_EXPECTED(op_proto.output_pads().size() == 1, HAILO_INVALID_HEF, "Logits op must have 1 output only");
     auto input_pad = op_proto.input_pads()[0];
     auto output_pad = op_proto.output_pads()[0];
+
+    // Op's input_pad is fed by core's output_pad
     CHECK_AS_EXPECTED(contains(input_to_output_pads, static_cast<size_t>(input_pad.index())), HAILO_INVALID_HEF,
         "Logits op is not connected to core-op");
     auto output_pad_index = input_to_output_pads.at(input_pad.index());
     CHECK_AS_EXPECTED(contains(pad_index_to_streams_info, output_pad_index), HAILO_INVALID_HEF,
         "Pad {} of post-process {} is not connected to any core output stream", input_pad.index(), op_proto.name());
 
-    // Data of the input_pad is taken from the output_pad of the core op
-    const auto &connected_output_pad = pad_index_to_pad_data.at(output_pad_index);
-    net_flow_element.input_streams.insert(connected_output_pad.name());
     // TODO: HRT-10603
-    const auto &op_input_stream = pad_index_to_streams_info.at(output_pad_index);
+    const auto &op_input_layer_info = pad_index_to_streams_info.at(output_pad_index);
     auto max_periph_bytes_from_hef = HefConfigurator::max_periph_bytes_value(DeviceBase::hef_arch_to_device_arch(hef_arch));
     CHECK_EXPECTED(max_periph_bytes_from_hef);
-    const auto max_periph_bytes = (0 == op_input_stream.max_shmifo_size) ? max_periph_bytes_from_hef.value():
-        MIN(max_periph_bytes_from_hef.value(), op_input_stream.max_shmifo_size);
-    const auto is_hw_padding_supported = HefConfigurator::is_hw_padding_supported(op_input_stream, max_periph_bytes);
-    net_flow_element.name = op_proto.name();
+    const auto max_periph_bytes = (0 == op_input_layer_info.max_shmifo_size) ? max_periph_bytes_from_hef.value():
+        MIN(max_periph_bytes_from_hef.value(), op_input_layer_info.max_shmifo_size);
+    const auto is_hw_padding_supported = HefConfigurator::is_hw_padding_supported(op_input_layer_info, max_periph_bytes);
 
     switch (op_proto.logits_op().logits_type()) {
         case ProtoHEFLogitsType::PROTO_HEF_ARGMAX_TYPE: {
-            net_flow_element.op_type = HAILO_NET_FLOW_OP_TYPE_ARGMAX;
-            return create_argmax_op(connected_output_pad, output_pad, input_pad.name(), output_pad.name(), is_hw_padding_supported);
+            return create_argmax_op_metadata(op_input_layer_info, output_pad, output_pad.name(), is_hw_padding_supported, network_name);
         }
         case ProtoHEFLogitsType::PROTO_HEF_SOFTMAX_TYPE: {
-            net_flow_element.op_type = HAILO_NET_FLOW_OP_TYPE_SOFTMAX;
-            return create_softmax_op(connected_output_pad, output_pad, input_pad.name(), output_pad.name());
+            return create_softmax_op_metadata(op_input_layer_info, output_pad, output_pad.name(), network_name);
         }
         default: {
             LOGGER__ERROR("Invalid Net-Flow Logits-Op {}", ProtoHEFLogitsType_Name(op_proto.logits_op().logits_type()));
@@ -1120,10 +1205,11 @@ Expected<std::shared_ptr<net_flow::Op>> create_logits_op(const ProtoHEFOp &op_pr
         }
     }
 }
-Expected<std::vector<std::shared_ptr<NetFlowElement>>> Hef::Impl::create_net_flow_ops(const ProtoHEFNetworkGroup &network_group_proto,
+
+Expected<std::vector<net_flow::PostProcessOpMetadataPtr>> Hef::Impl::create_ops_metadata(const ProtoHEFNetworkGroup &network_group_proto,
     CoreOpMetadata &core_op_metadata, const ProtoHEFHwArch &hef_arch) const
 {
-    std::vector<std::shared_ptr<NetFlowElement>> result;
+    std::vector<net_flow::PostProcessOpMetadataPtr> result;
     if (!m_supported_features.hailo_net_flow) {
         return result;
     }
@@ -1138,15 +1224,9 @@ Expected<std::vector<std::shared_ptr<NetFlowElement>>> Hef::Impl::create_net_flo
     for (auto &pad_edge : network_group_proto.pad_edges()) {
         input_to_output_pads.insert({pad_edge.dst(), pad_edge.src()});
     }
-    std::map<size_t, ProtoHEFPad> pad_index_to_pad_data;
-    for (auto &op_proto : network_group_proto.ops()) {
-        for (auto &output_pad : op_proto.output_pads()) {
-            pad_index_to_pad_data.insert({output_pad.index(), output_pad});
-        }
-        for (auto &input_pad : op_proto.input_pads()) {
-            pad_index_to_pad_data.insert({input_pad.index(), input_pad});
-        }
-    }
+
+    auto net_group_name = HefUtils::get_network_group_name(network_group_proto, m_supported_features);
+    auto network_name = HailoRTDefaults::get_network_name(net_group_name);
 
     for (auto &op_proto : network_group_proto.ops()) {
         switch (op_proto.op_case()) {
@@ -1154,23 +1234,6 @@ Expected<std::vector<std::shared_ptr<NetFlowElement>>> Hef::Impl::create_net_flo
                 break;
             }
             case ProtoHEFOp::kNmsOp: {
-                hailo_format_t output_format{};
-                output_format.order = HAILO_FORMAT_ORDER_HAILO_NMS; // TODO Remove- HRT-9737
-
-                NetFlowElement net_flow_element{};
-                net_flow_element.op_type = HAILO_NET_FLOW_OP_TYPE_NMS;
-
-                // TODO: HRT-9902 - Move nms_info to be an op member instead of NetFlowElement
-                net_flow_element.nms_info = {
-                    op_proto.nms_op().classes(),
-                    op_proto.nms_op().max_proposals_per_class(),
-                    sizeof(hailo_bbox_float32_t),
-                    1, // input_division_factor
-                    false,
-                    hailo_nms_defuse_info_t(),
-                    DEFAULT_NMS_NO_BURST_SIZE,
-                    HAILO_BURST_TYPE_NO_BURST
-                };
                 for (auto &input_pad : op_proto.input_pads()) {
                     CHECK_AS_EXPECTED(contains(input_to_output_pads, static_cast<size_t>(input_pad.index())), HAILO_INVALID_HEF,
                         "NMS op is not connected to core-op");
@@ -1178,34 +1241,43 @@ Expected<std::vector<std::shared_ptr<NetFlowElement>>> Hef::Impl::create_net_flo
                     CHECK_AS_EXPECTED(contains(pad_index_to_streams_info, output_pad_index), HAILO_INVALID_HEF,
                         "Pad {} of post-process {} is not connected to any core output stream",
                             input_pad.index(), op_proto.name());
-                    const auto &op_input_stream = pad_index_to_streams_info.at(output_pad_index);
-                    net_flow_element.input_streams.insert(op_input_stream.name);  
                 }
-                std::shared_ptr<net_flow::Op> post_process_op;
+
+                net_flow::PostProcessOpMetadataPtr post_process_op_metadata;
                 switch (op_proto.nms_op().nms_op_case()) {
                     case ProtoHEFNmsOp::kYoloNmsOp: {
-                        net_flow_element.name = "YOLO-Post-Process";
-                        auto expected_post_process_op = create_yolov5_op(op_proto, output_format, pad_index_to_streams_info, input_to_output_pads);
-                        CHECK_EXPECTED(expected_post_process_op);
-                        post_process_op = expected_post_process_op.release();
+                        auto expected_post_process_op_metadata = create_yolov5_op_metadata(op_proto, pad_index_to_streams_info,
+                            input_to_output_pads, network_name);
+                        CHECK_EXPECTED(expected_post_process_op_metadata);
+                        post_process_op_metadata = expected_post_process_op_metadata.release();
                         break;
                     }
                     case ProtoHEFNmsOp::kYoloxNmsOp: {
-                        net_flow_element.name = "YOLOX-Post-Process";
-                        auto expected_post_process_op = create_yolox_op(op_proto, output_format, pad_index_to_streams_info, input_to_output_pads);
-                        CHECK_EXPECTED(expected_post_process_op);
-                        post_process_op = expected_post_process_op.release();
+                        auto expected_post_process_op_metadata = create_yolox_op_metadata(op_proto, pad_index_to_streams_info,
+                           input_to_output_pads, network_name);
+                        CHECK_EXPECTED(expected_post_process_op_metadata);
+                        post_process_op_metadata = expected_post_process_op_metadata.release();
                         break;
                     }
                     case ProtoHEFNmsOp::kSsdNmsOp: {
-                        net_flow_element.name = "SSD-Post-Process";
-                        auto expected_post_process_op = create_ssd_op(op_proto, output_format, pad_index_to_streams_info, input_to_output_pads);
-                        CHECK_EXPECTED(expected_post_process_op);
-                        post_process_op = expected_post_process_op.release();
+                        auto expected_post_process_op_metadata = create_ssd_op_metadata(op_proto, pad_index_to_streams_info,
+                           input_to_output_pads, network_name);
+                        CHECK_EXPECTED(expected_post_process_op_metadata);
+                        post_process_op_metadata = expected_post_process_op_metadata.release();
                         break;
                     }
                     case ProtoHEFNmsOp::kIouOp: {
-                        // TODO (HRT-8827)
+                        auto expected_post_process_op_metadata = create_iou_op_metadata(op_proto, pad_index_to_streams_info,
+                           input_to_output_pads, network_name);
+                        CHECK_EXPECTED(expected_post_process_op_metadata);
+                        post_process_op_metadata = expected_post_process_op_metadata.release();
+                        break;
+                    }
+                    case ProtoHEFNmsOp::kYoloSegOp: {
+                        auto expected_post_process_op_metadata = create_yolov5_seg_op_metadata(op_proto, pad_index_to_streams_info,
+                           input_to_output_pads, network_name);
+                        CHECK_EXPECTED(expected_post_process_op_metadata);
+                        post_process_op_metadata = expected_post_process_op_metadata.release();
                         break;
                     }
                     default: {
@@ -1213,51 +1285,17 @@ Expected<std::vector<std::shared_ptr<NetFlowElement>>> Hef::Impl::create_net_flo
                         return make_unexpected(HAILO_INTERNAL_FAILURE);
                     }
                 }
-                net_flow_element.op = post_process_op;
-                // Fill meta-data output vstream info
-                auto net_group_name = HefUtils::get_network_group_name(network_group_proto, m_supported_features);
-                auto network_name = HailoRTDefaults::get_network_name(net_group_name);
-                hailo_vstream_info_t net_flow_output_vstream_info{};
-                assert(op_proto.output_pads().size() == 1);
-                auto proto_output_pad = op_proto.output_pads()[0];
-                strncpy(net_flow_output_vstream_info.name, proto_output_pad.name().c_str(), proto_output_pad.name().length() + 1);
-                strncpy(net_flow_output_vstream_info.network_name, network_name.c_str(), network_name.length() + 1);
-                net_flow_output_vstream_info.direction = HAILO_D2H_STREAM;
-                net_flow_output_vstream_info.format = output_format;
-                net_flow_output_vstream_info.nms_shape.max_bboxes_per_class = op_proto.nms_op().max_proposals_per_class();
-                net_flow_output_vstream_info.nms_shape.number_of_classes = op_proto.nms_op().classes();
-                if (op_proto.nms_op().background_removal()) {
-                    net_flow_output_vstream_info.nms_shape.number_of_classes--;
-                    net_flow_element.nms_info.number_of_classes--;
-                }
-                net_flow_element.output_vstream_info = net_flow_output_vstream_info;
 
-                auto net_flow_element_ptr = make_shared_nothrow<NetFlowElement>(net_flow_element);
-                CHECK_NOT_NULL_AS_EXPECTED(net_flow_element_ptr, HAILO_OUT_OF_HOST_MEMORY);
-                result.push_back(net_flow_element_ptr);
+                result.push_back(post_process_op_metadata);
                 break;
             }
             case ProtoHEFOp::kLogitsOp: {
-                NetFlowElement net_flow_element{};
-                auto expected_logits_op = create_logits_op(op_proto, input_to_output_pads, pad_index_to_pad_data, net_flow_element,
-                    pad_index_to_streams_info, hef_arch);
-                CHECK_EXPECTED(expected_logits_op);
-                net_flow_element.op = expected_logits_op.release();
-
-                hailo_vstream_info_t net_flow_output_vstream_info{};
-                auto proto_output_pad = op_proto.output_pads()[0];
-                auto net_group_name = HefUtils::get_network_group_name(network_group_proto, m_supported_features);
-                auto network_name = HailoRTDefaults::get_network_name(net_group_name);
-                strncpy(net_flow_output_vstream_info.name, proto_output_pad.name().c_str(), proto_output_pad.name().length() + 1);
-                strncpy(net_flow_output_vstream_info.network_name, network_name.c_str(), network_name.length() + 1);
-                net_flow_output_vstream_info.direction = HAILO_D2H_STREAM;
-                net_flow_output_vstream_info.format = net_flow_element.op.get()->outputs_metadata().begin()->second.format;
-                net_flow_output_vstream_info.shape = net_flow_element.op.get()->outputs_metadata().begin()->second.shape;
-                net_flow_element.output_vstream_info = net_flow_output_vstream_info;
-
-                auto net_flow_element_ptr = make_shared_nothrow<NetFlowElement>(net_flow_element);
-                CHECK_NOT_NULL_AS_EXPECTED(net_flow_element_ptr, HAILO_OUT_OF_HOST_MEMORY);
-                result.push_back(net_flow_element_ptr);
+                auto expected_logits_op_metadata = create_logits_op_metadata(op_proto, pad_index_to_streams_info,
+                    input_to_output_pads, hef_arch, network_name);
+                CHECK_EXPECTED(expected_logits_op_metadata);
+                auto post_process_op_metadata = expected_logits_op_metadata.release();
+
+                result.push_back(post_process_op_metadata);
                 break;
             }
             default: {
@@ -1294,51 +1332,11 @@ hailo_status Hef::Impl::validate_boundary_streams_were_created(const std::string
     CHECK_EXPECTED_AS_STATUS(number_of_inputs);
     CHECK((number_of_outputs.value() == core_op->get_output_streams().size()),
         HAILO_INVALID_ARGUMENT, "passed configure_params for network group {} did not contain all output streams", network_group_name);
-    
-    return HAILO_SUCCESS;
-}
-
-hailo_status get_hw_padding_params(hailo_format_order_t format_order, uint32_t width, uint32_t features, uint32_t hw_data_bytes, 
-    uint16_t &feature_padding_payload, uint16_t &periph_bytes_per_buffer)
-{
-    uint32_t feature_padding_payload_32bit = 0; 
-    uint32_t periph_bytes_per_buffer_32bit = 0;
-
-    // TODO: HRT-3278 dont assume core_buffers_per_frame == height    
-    switch (format_order)
-    {
-    case HAILO_FORMAT_ORDER_NHCW:
-    case HAILO_FORMAT_ORDER_NHW:
-        feature_padding_payload_32bit = width * hw_data_bytes;
-        periph_bytes_per_buffer_32bit = feature_padding_payload_32bit * features;
-        break;
-    case HAILO_FORMAT_ORDER_NHWC:
-    case HAILO_FORMAT_ORDER_FCR:
-    case HAILO_FORMAT_ORDER_F8CR:
-    case HAILO_FORMAT_ORDER_NC:
-    case HAILO_FORMAT_ORDER_BAYER_RGB:
-    case HAILO_FORMAT_ORDER_12_BIT_BAYER_RGB:
-    case HAILO_FORMAT_ORDER_RGB888:
-        feature_padding_payload_32bit = features * hw_data_bytes;
-        periph_bytes_per_buffer_32bit = feature_padding_payload_32bit * width;
-        break;
-    default:
-        LOGGER__ERROR("unsupported format for HW padding");
-        return HAILO_INTERNAL_FAILURE;
-    }
-
-    CHECK(IS_FIT_IN_UINT16(feature_padding_payload_32bit), HAILO_INVALID_HEF, 
-        "frame width {} is too big", feature_padding_payload_32bit);
-    CHECK(IS_FIT_IN_UINT16(periph_bytes_per_buffer_32bit), HAILO_INVALID_HEF,
-        "unpadded bytes per buffer {} is too big", periph_bytes_per_buffer_32bit);
-
-    feature_padding_payload = static_cast<uint16_t>(feature_padding_payload_32bit);
-    periph_bytes_per_buffer = static_cast<uint16_t>(periph_bytes_per_buffer_32bit);
 
     return HAILO_SUCCESS;
 }
 
-Expected<CONTROL_PROTOCOL__nn_stream_config_t> HefConfigurator::parse_nn_stream_config(hailo_format_order_t format_order, uint32_t width, uint32_t features,
+Expected<CONTROL_PROTOCOL__nn_stream_config_t> HefConfigurator::parse_nn_stream_config(uint32_t width,
     uint32_t hw_data_bytes, uint16_t core_buffers_per_frame, uint16_t core_bytes_per_buffer, bool hw_padding_supported, bool is_ddr,
     uint16_t periph_buffers_per_frame, uint16_t periph_bytes_per_buffer)
 {
@@ -1350,16 +1348,20 @@ Expected<CONTROL_PROTOCOL__nn_stream_config_t> HefConfigurator::parse_nn_stream_
     stream_config.periph_buffers_per_frame = periph_buffers_per_frame;
     stream_config.periph_bytes_per_buffer = periph_bytes_per_buffer;
 
+    // Set default to false and in case of extension enabled update value
+    stream_config.is_periph_calculated_in_hailort = false;
+
     /* For DDR buffering - core buffers is depended on the amount of buffers per PCIe interrupt. No HW padding required */
     if (is_ddr) {
-        stream_config.core_buffers_per_frame = 1;
         stream_config.feature_padding_payload = 0;
     } else {
         if (hw_padding_supported) {
-            auto status = get_hw_padding_params(format_order, width, features, hw_data_bytes,
-                stream_config.feature_padding_payload, stream_config.periph_bytes_per_buffer);
-            CHECK_SUCCESS_AS_EXPECTED(status);
-            stream_config.periph_buffers_per_frame = core_buffers_per_frame;
+            // We currently only support HW padding in hailort with format HAILO_FORMAT_ORDER_NHCW - which is padded by feature
+            // Padding should not affect the periph register values.
+            const uint32_t feature_padding_payload_32bit = static_cast<uint32_t>(width) * static_cast<uint32_t>(hw_data_bytes);
+            CHECK_AS_EXPECTED(IS_FIT_IN_UINT16(width * hw_data_bytes), HAILO_INVALID_HEF, "frame width {} is too big",
+                feature_padding_payload_32bit);
+            stream_config.feature_padding_payload = static_cast<uint16_t>(feature_padding_payload_32bit);
         } else {
             stream_config.feature_padding_payload = 0;
         }
@@ -1378,9 +1380,6 @@ Expected<CONTROL_PROTOCOL__nn_stream_config_t> HefConfigurator::parse_nn_stream_
     CHECK_AS_EXPECTED(IS_FIT_IN_UINT16(edge_layer.core_buffers_per_frame()), HAILO_INVALID_HEF,
         "core_buffers_per_frame is too big");
 
-    auto format_order_exp = HailoRTDefaults::get_device_format_order(edge_layer.format());
-    CHECK_EXPECTED(format_order_exp);
-    auto format_order = format_order_exp.release();
     auto is_ddr = ProtoHEFEdgeConnectionType::PROTO__EDGE_CONNECTION_TYPE__DDR == edge_connection_type;
 
     CHECK_AS_EXPECTED(IS_FIT_IN_UINT32(edge_layer.padded_width() * edge_layer.padded_features() *
@@ -1388,18 +1387,24 @@ Expected<CONTROL_PROTOCOL__nn_stream_config_t> HefConfigurator::parse_nn_stream_
 
     // TODO HRT-10993: Remove these parameters for the parse_nn_stream_config function call
     // These values will get overrided in update_layer_info in resource_manager_builder - except in case of
-    // MIPI stream with hw padding supported (HRT-11030)
-    // TODO HRT-11030 - in MIPI with hw padding supported - in this case because the layer thinks hw padding is
-    // supported it wont recalculate periph values , but when creating the InputStreamBase - it will not use hw padding
-    // and then will take the initial values. Should fix this behavior
-    const uint16_t INITIAL_PERIPH_BYTES_PER_BUFFER = static_cast<uint16_t>(edge_layer.core_bytes_per_buffer());
+    // where we dont have resource manager (ethernet)
+    uint16_t initial_periph_bytes_per_buffer = static_cast<uint16_t>(edge_layer.core_bytes_per_buffer());
     const uint16_t INITIAL_PERIPH_BUFFERS_PER_FRAME = static_cast<uint16_t>(edge_layer.core_buffers_per_frame());
 
+    // If hw padding is enabled - and shape fits in uint16t - change initial periph value to be row size - in any case
+    // Will get updated if there is resource manager - and in ethernet will have either core register values - and if hw 
+    // padding will have hw padding values
+    if (hw_padding_supported) {
+        if (IS_FIT_IN_UINT16(edge_layer.width() * edge_layer.features() * edge_layer.data_bytes())) {
+            initial_periph_bytes_per_buffer = static_cast<uint16_t>(edge_layer.width() * edge_layer.features() *
+                edge_layer.data_bytes());
+        }
+    }
+
     // Width and features only used in case hw_padding is supported. In that case, they represent the HW shape (without padding)
-    return parse_nn_stream_config(format_order, edge_layer.width(), edge_layer.features(),
-        edge_layer.data_bytes(), static_cast<uint16_t>(edge_layer.core_buffers_per_frame()),
+    return parse_nn_stream_config(edge_layer.width(),edge_layer.data_bytes(), static_cast<uint16_t>(edge_layer.core_buffers_per_frame()),
         static_cast<uint16_t>(edge_layer.core_bytes_per_buffer()), hw_padding_supported, is_ddr,
-        INITIAL_PERIPH_BUFFERS_PER_FRAME, INITIAL_PERIPH_BYTES_PER_BUFFER);
+        INITIAL_PERIPH_BUFFERS_PER_FRAME, initial_periph_bytes_per_buffer);
 }
 
 Expected<CONTROL_PROTOCOL__nn_stream_config_t> HefConfigurator::parse_nn_stream_config(const LayerInfo &edge_layer, bool hw_padding_supported)
@@ -1409,12 +1414,12 @@ Expected<CONTROL_PROTOCOL__nn_stream_config_t> HefConfigurator::parse_nn_stream_
     assert(LayerType::BOUNDARY == edge_layer.type);
     const auto is_ddr = false; // This function is called only on boundary layers, so no DDR
 
-    return parse_nn_stream_config(edge_layer.format.order, edge_layer.hw_shape.width, edge_layer.hw_shape.features,
-        edge_layer.hw_data_bytes, edge_layer.nn_stream_config.core_buffers_per_frame, 
+    return parse_nn_stream_config(edge_layer.hw_shape.width, edge_layer.hw_data_bytes, edge_layer.nn_stream_config.core_buffers_per_frame, 
         edge_layer.nn_stream_config.core_bytes_per_buffer, hw_padding_supported, is_ddr, edge_layer.nn_stream_config.periph_buffers_per_frame, 
         edge_layer.nn_stream_config.periph_bytes_per_buffer);
 }
 
+// TODO HRT-11452: change to use hw consts
 Expected<uint32_t> HefConfigurator::max_periph_bytes_value(const hailo_device_architecture_t hw_arch)
 {
     switch (hw_arch) {
@@ -1422,7 +1427,8 @@ Expected<uint32_t> HefConfigurator::max_periph_bytes_value(const hailo_device_ar
         case HAILO_ARCH_HAILO8:
         case HAILO_ARCH_HAILO8L:
             return HAILO8_INBOUND_DATA_STREAM_SIZE;
-        case HAILO_ARCH_HAILO15:
+        case HAILO_ARCH_HAILO15H:
+        case HAILO_ARCH_PLUTO:
             return HAILO15_PERIPH_BYTES_PER_BUFFER_MAX_SIZE;
         default:
             LOGGER__ERROR("Unknown device architecture!");
@@ -1879,20 +1885,41 @@ hailo_status HefUtils::fill_layer_info(const ProtoHEFEdgeLayerInfo &info,
     layer_info.quant_info.qp_scale = info.numeric_info().qp_scale();
     layer_info.quant_info.qp_zp = info.numeric_info().qp_zp();
 
-    for (uint32_t i = 0; i < layer_info.shape.features; i++) {
+    int number_of_qps = (HailoRTCommon::is_nms(layer_info.format.order)) ? NMS_NUMBER_OF_QPS : layer_info.shape.features;
+    if (supported_features.output_scale_by_feature) {
+        CHECK((info.numeric_info().qp_zps().size() == number_of_qps) && (info.numeric_info().qp_scales().size() == number_of_qps),
+            HAILO_INVALID_HEF, "Invalid quantization infos vector in HEF!");
+        // We set those values to 0 to idicate that we work with scale by feature
+        layer_info.quant_info = INVALID_QP;
+    }
+
+    for (int i = 0; i < number_of_qps; i++) {
         hailo_quant_info_t quant_info = {};
+        quant_info.limvals_min = info.numeric_info().limvals_min();
+        quant_info.limvals_max = info.numeric_info().limvals_max();
         if (supported_features.output_scale_by_feature) {
             quant_info.qp_zp = static_cast<float32_t>(info.numeric_info().qp_zps()[i]);
             quant_info.qp_scale = static_cast<float32_t>(info.numeric_info().qp_scales()[i]);
         } else {
             quant_info.qp_zp =  info.numeric_info().qp_zp();
             quant_info.qp_scale =  info.numeric_info().qp_scale();
+            layer_info.quant_infos.push_back(std::move(quant_info));
+            break; // When working without scale by feature, vector length will always be one
         }
-        quant_info.limvals_min = info.numeric_info().limvals_min();
-        quant_info.limvals_max = info.numeric_info().limvals_max();
         layer_info.quant_infos.push_back(std::move(quant_info));
     }
 
+    if (HAILO_H2D_STREAM == direction) {
+        bool are_all_qps_the_same = true;
+        for (const auto &quant_info : layer_info.quant_infos) {
+            if (0 != memcmp(&quant_info, &layer_info.quant_infos[0], sizeof(quant_info))) {
+                are_all_qps_the_same = false;
+                break;
+            }
+        }
+        CHECK(are_all_qps_the_same, HAILO_INVALID_HEF, "Different quantization infos are not allowed for input streams (H2D)!");
+    }
+
     // Simulation info
     assert (1 == info.edge_layer_base().buffer_indices_size());
     layer_info.buffer_indices.cluster_index = info.edge_layer_base().buffer_indices(0).cluster_index();
@@ -2029,6 +2056,86 @@ hailo_status HefUtils::fill_mux_info(const ProtoHEFEdgeLayerMux &info,
     return HAILO_SUCCESS;
 }
 
+Expected<hailo_format_order_t> convert_planes_format_to_hailo_format_order(const ProtoHEFEPlanesFormat &planes_format)
+{
+    switch (planes_format) {
+    case ProtoHEFEPlanesFormat::PROTO__PLANES__FORMAT__NV12:
+        return HAILO_FORMAT_ORDER_NV12;
+    case ProtoHEFEPlanesFormat::PROTO__PLANES__FORMAT__NV21:
+        return HAILO_FORMAT_ORDER_NV21;
+    case ProtoHEFEPlanesFormat::PROTO__PLANES__FORMAT__I420:
+        return HAILO_FORMAT_ORDER_I420;
+    default:
+        LOGGER__ERROR("Invalid planes format");
+        return make_unexpected(HAILO_INVALID_HEF);
+    }
+}
+
+hailo_status HefUtils::fill_planes_info(const ProtoHEFEdgeLayerPlanes &info,
+    const ProtoHEFEdgeConnectionType &edge_connection_type, 
+    const ProtoHEFCoreOpMock &core_op, hailo_stream_direction_t direction,
+    bool hw_padding_supported, const uint8_t context_index, const std::string &partial_network_name, 
+    uint8_t network_index, LayerInfo &layer_info, const SupportedFeatures &supported_features, const ProtoHEFHwArch &hef_arch)
+{
+    auto layer_type = get_layer_type(edge_connection_type);
+    CHECK_EXPECTED_AS_STATUS(layer_type);
+    layer_info.type = layer_type.value();
+    layer_info.direction = direction;
+
+    layer_info.shape.height = info.height();
+    layer_info.hw_shape.height = info.height();
+    layer_info.shape.width = info.width();
+    layer_info.hw_shape.width = info.width();
+    layer_info.shape.features = info.features();
+    layer_info.hw_shape.features = info.features();
+
+    auto format_order = convert_planes_format_to_hailo_format_order(info.planes_format());
+    CHECK_EXPECTED_AS_STATUS(format_order);
+    layer_info.format.order = format_order.release();
+    layer_info.format.flags = HAILO_FORMAT_FLAGS_NONE;
+    layer_info.quant_info = {}; // quant_info doesnt make any sense as this is a logical layer
+    layer_info.quant_infos = std::vector<hailo_quant_info_t>(1); // quant_info doesnt make any sense as this is a logical layer
+
+    CHECK(HAILO_MAX_STREAM_NAME_SIZE >= (info.name().length() + 1), HAILO_INTERNAL_FAILURE,
+        "The edge layer '{}' has a too long name (max is HAILO_MAX_STREAM_NAME_SIZE)", info.name());
+    CHECK(HAILO_MAX_NETWORK_NAME_SIZE >= (partial_network_name.length() + 1), HAILO_INTERNAL_FAILURE,
+        "The network '{}' has a too long name (max is HAILO_MAX_NETWORK_NAME_SIZE)", partial_network_name);
+
+    layer_info.name = info.name();
+    layer_info.network_name = HefUtils::get_network_name(core_op, partial_network_name);
+    layer_info.network_index = network_index;
+    layer_info.is_multi_planar = true;
+    layer_info.planes.reserve(info.planes_size());
+
+    for (uint8_t i = 0; i < info.planes_size(); i++) {
+        LayerInfo temp_layer = {};
+        if (info.planes(i).edge_case() == ProtoHefEdge::kLayerInfo) {
+            auto status = fill_layer_info(info.planes(i).layer_info(), edge_connection_type, core_op,
+                direction, hw_padding_supported, context_index, partial_network_name, network_index, temp_layer,
+                supported_features, hef_arch);
+            CHECK_SUCCESS(status);
+            temp_layer.plane_index = i;
+            layer_info.planes.push_back(temp_layer);
+        } else {
+            LOGGER__ERROR("Invalid layer type - only info layers are acceptible under a planes layer");
+            return HAILO_INTERNAL_FAILURE;
+            break;
+        }
+    }
+    // hw_data_bytes doesnt make any sense as this is a logical layer. we set the hw_data_bytes of one of its underlying layers
+    layer_info.hw_data_bytes = layer_info.planes.begin()->hw_data_bytes;
+    CHECK(std::all_of(layer_info.planes.begin(), layer_info.planes.end(),
+            [&layer_info](const auto &underlying_layer) {
+                return underlying_layer.hw_data_bytes == layer_info.hw_data_bytes;
+            }),
+        HAILO_INVALID_HEF, "Not all underlying layers of {} has the same format type", layer_info.name);
+    auto type = HailoRTCommon::get_format_type(layer_info.hw_data_bytes);
+    CHECK_EXPECTED_AS_STATUS(type);
+    layer_info.format.type = type.value();
+
+    return HAILO_SUCCESS;
+}
+
 hailo_status HefUtils::fill_boundary_layers_info(
     const ProtoHEFCoreOpMock &core_op,
     const uint8_t context_index,
@@ -2311,7 +2418,14 @@ static Expected<ContextSwitchConfigActionPtr> parse_action(const ProtoHEFAction
 
             const auto nms_unit_index = static_cast<uint8_t>(proto_action.enable_nms().nms_unit_index());
 
-            return EnableNmsAction::create(nms_unit_index, network_index, number_of_classes, burst_size);
+            CHECK_AS_EXPECTED(IS_FIT_IN_UINT8(proto_action.enable_nms().division_factor()), HAILO_INVALID_HEF,
+                "Failed to parse HEF. Invalid division factor: {}.", proto_action.enable_nms().division_factor());
+
+            // If division_factor is not defined - use division_factor = 1
+            const auto division_factor = (0 == proto_action.enable_nms().division_factor()) ? 
+                DEFAULT_DIVISION_FACTOR : static_cast<uint8_t>(proto_action.enable_nms().division_factor());
+
+            return EnableNmsAction::create(nms_unit_index, network_index, number_of_classes, burst_size, division_factor);
         }
 
         case ProtoHEFAction::kWriteDataByType:
@@ -2538,6 +2652,69 @@ Expected<std::vector<ContextMetadata>> HefUtils::parse_dynamic_contexts(const Pr
     return contexts_metadata;
 }
 
+static Expected<hailo_nms_burst_type_t> get_nms_burst_mode(const ProtoHEFNmsInfo &nms_info,
+    const ProtoHEFHwArch &hef_arch)
+{
+    switch (hef_arch) {
+    case PROTO__HW_ARCH__HAILO8:
+    case PROTO__HW_ARCH__HAILO8P:
+    case PROTO__HW_ARCH__HAILO8R:
+    case PROTO__HW_ARCH__SAGE_B0:
+    case PROTO__HW_ARCH__HAILO8L:
+        // First generation of hw NMS - included in hailo8.
+        switch (nms_info.burst_type()) {
+        case PROTO__NMS_BURST_TYPE__H8_PER_CLASS:
+            return HAILO_BURST_TYPE_H8_PER_CLASS;
+        default:
+            LOGGER__ERROR("Unsupported burst type was given {} for arch {}", nms_info.burst_type(), hef_arch);
+            return make_unexpected(HAILO_INVALID_HEF);
+        }
+    case PROTO__HW_ARCH__HAILO15H:
+    case PROTO__HW_ARCH__GINGER:
+    case PROTO__HW_ARCH__LAVENDER:
+    case PROTO__HW_ARCH__PLUTO:
+        // Second generation of hw NMS - included in hailo15 and pluto.
+        switch (nms_info.burst_type()) {
+        case PROTO__NMS_BURST_TYPE__H15_PER_CLASS:
+            return HAILO_BURST_TYPE_H15_PER_CLASS;
+        case PROTO__NMS_BURST_TYPE__H15_PER_FRAME:
+            return HAILO_BURST_TYPE_H15_PER_FRAME;
+        default:
+            LOGGER__ERROR("Unsupported burst type was given {} for arch {}", nms_info.burst_type(), hef_arch);
+            return make_unexpected(HAILO_INVALID_HEF);
+        }
+    default:
+        LOGGER__ERROR("Not supported hef arch {}", hef_arch);
+        return make_unexpected(HAILO_INTERNAL_FAILURE);
+    }
+}
+
+static Expected<hailo_nms_burst_type_t> get_nms_bbox_mode(const ProtoHEFNmsInfo &nms_info,
+    const ProtoHEFHwArch &hef_arch)
+{
+    CHECK_AS_EXPECTED(0 == nms_info.burst_type(),
+        HAILO_INVALID_HEF, "Invalid HEF, nms burst extension is disabled yet burst type {} is not zero",
+        nms_info.burst_type());
+
+    switch (hef_arch) {
+    case PROTO__HW_ARCH__HAILO8:
+    case PROTO__HW_ARCH__HAILO8P:
+    case PROTO__HW_ARCH__HAILO8R:
+    case PROTO__HW_ARCH__SAGE_B0:
+    case PROTO__HW_ARCH__HAILO8L:
+        return HAILO_BURST_TYPE_H8_BBOX;
+    case PROTO__HW_ARCH__HAILO15H:
+    case PROTO__HW_ARCH__GINGER:
+    case PROTO__HW_ARCH__LAVENDER:
+    case PROTO__HW_ARCH__PLUTO:
+        return HAILO_BURST_TYPE_H15_BBOX;
+
+    default:
+        LOGGER__ERROR("Not supported hef arch {}", hef_arch);
+        return make_unexpected(HAILO_INTERNAL_FAILURE);
+    }
+}
+
 Expected<hailo_nms_info_t> HefUtils::parse_proto_nms_info(const ProtoHEFNmsInfo &proto_nms_info, const bool burst_mode_enabled,
     const ProtoHEFHwArch &hef_arch)
 {
@@ -2549,26 +2726,20 @@ Expected<hailo_nms_info_t> HefUtils::parse_proto_nms_info(const ProtoHEFNmsInfo
 
     if (burst_mode_enabled) {
         nms_info.burst_size = static_cast<uint32_t>(proto_nms_info.burst_size());
-        nms_info.burst_type = static_cast<hailo_nms_burst_type_t>(proto_nms_info.burst_type());
 
-        CHECK_AS_EXPECTED(nms_info.burst_type != HAILO_BURST_TYPE_NO_BURST, HAILO_INVALID_HEF,
-            "Invalid HEF, nms burst type is no burst but burst extension is enabled");
+        auto burst_type = get_nms_burst_mode(proto_nms_info, hef_arch);
+        CHECK_EXPECTED(burst_type);
+        nms_info.burst_type = *burst_type;
 
-        CHECK_AS_EXPECTED((nms_info.burst_size * nms_info.bbox_size) <= MAX_NMS_BURST_SIZE,
+        CHECK_AS_EXPECTED((nms_info.burst_size * nms_info.bbox_size) <= HailoRTCommon::MAX_NMS_BURST_SIZE,
             HAILO_INVALID_HEF, "Invalid HEF, nms burst size {} larger than maximum burst size {}",
-            (nms_info.burst_size * nms_info.bbox_size), MAX_NMS_BURST_SIZE);
-
-        // Validate that burst type matches architecture
-        const auto dev_arch = DeviceBase::hef_arch_to_device_arch(hef_arch);
-        CHECK_AS_EXPECTED(LayerInfoUtils::validate_nms_burst_type(nms_info.burst_type, dev_arch), HAILO_INVALID_HEF,
-            "Invalid HEF, nms burst type {} on device architecture {}", nms_info.burst_type, dev_arch);
+            (nms_info.burst_size * nms_info.bbox_size), HailoRTCommon::MAX_NMS_BURST_SIZE);
     } else {
-        CHECK_AS_EXPECTED(HAILO_BURST_TYPE_NO_BURST == static_cast<hailo_nms_burst_type_t>(proto_nms_info.burst_type()),
-            HAILO_INVALID_HEF, "Invalid HEF, nms burst extension is disabled yet burst type is {}", nms_info.burst_type);
-
-        // In case of HAILO_BURST_TYPE_NO_BURST make burst size DEFAULT_NMS_NO_BURST_SIZE
+        // In case of bbox mode make burst size DEFAULT_NMS_NO_BURST_SIZE
         nms_info.burst_size = DEFAULT_NMS_NO_BURST_SIZE;
-        nms_info.burst_type = static_cast<hailo_nms_burst_type_t>(proto_nms_info.burst_type());
+        auto burst_type = get_nms_bbox_mode(proto_nms_info, hef_arch);
+        CHECK_EXPECTED(burst_type);
+        nms_info.burst_type = *burst_type;
     }
 
     if (nms_info.chunks_per_frame == 0) {
@@ -2624,6 +2795,12 @@ Expected<LayerInfo> HefUtils::get_boundary_layer_info(const ProtoHEFCoreOpMock &
             core_op, direction, hw_padding_supported, context_index, partial_network_name.value(), network_index, result,
             supported_features, hef_arch);
         CHECK_SUCCESS_AS_EXPECTED(status);
+    } else if (ProtoHEFEdgeLayerType::PROTO__EDGE_LAYER_TYPE__PLANES == layer.edge_layer_type()) {
+        // TODO: return LayerInfo
+        auto status = fill_planes_info(layer.layer_planes(), layer.context_switch_info().edge_connection_type(), 
+            core_op, direction, hw_padding_supported, context_index, partial_network_name.value(), network_index, result,
+            supported_features, hef_arch);
+        CHECK_SUCCESS_AS_EXPECTED(status);
     } else {
         LOGGER__ERROR("Invalid layer type");
         return make_unexpected(HAILO_INTERNAL_FAILURE);
@@ -3008,6 +3185,14 @@ Expected<std::string> Hef::Impl::get_vstream_name_from_original_name(const std::
                         results = stream_name.value();
                     }
                 }
+            } else if (is_h2d_boundary_planes_layer(layer_info)) {
+                for (const auto &plane : layer_info.layer_planes().planes()) {
+                    for (const auto &name : plane.layer_info().original_names()) {
+                        if (original_name == name) {
+                            results = std::string(layer_info.layer_planes().name());
+                        }
+                    }
+                }
             }
         }
     }
@@ -3067,6 +3252,15 @@ Expected<std::vector<std::string>> Hef::Impl::get_original_names_from_vstream_na
                         return std::move(names.value());
                     }
                 }
+            } else if (is_h2d_boundary_planes_layer(layer_info)) {
+                if (vstream_name == layer_info.layer_planes().name()) {
+                    for (const auto &plane : layer_info.layer_planes().planes()) {
+                        for (const auto &name : plane.layer_info().original_names()) {
+                            results.push_back(name);
+                        }
+                    }
+                    return results;
+                }
             }
         }
     }
@@ -3086,6 +3280,8 @@ hailo_status Hef::Impl::validate_core_op_unique_layer_names(const ProtoHEFCoreOp
                     layer_name = layer.layer_info().name();
                 } else if (ProtoHEFEdgeLayerType::PROTO__EDGE_LAYER_TYPE__MUX == layer.edge_layer_type()) {
                     layer_name = layer.layer_mux().name();
+                } else if (ProtoHEFEdgeLayerType::PROTO__EDGE_LAYER_TYPE__PLANES == layer.edge_layer_type()) {
+                    layer_name = layer.layer_planes().name();
                 } else {
                     LOGGER__ERROR("Invalid layer type.");
                     return HAILO_INVALID_HEF;
@@ -3167,7 +3363,7 @@ Expected<std::vector<hailo_network_group_info_t>> Hef::get_network_groups_infos(
 }
 
 Expected<std::vector<std::string>> Hef::Impl::get_stream_infos_description(const std::string &network_group_name, const std::string &network_name)
-{   
+{
     std::vector<std::string> infos_strings;
     auto input_stream_infos = get_input_stream_infos(network_group_name, network_name);
     CHECK_EXPECTED(input_stream_infos, "Failed to parse input stream infos");
@@ -3223,14 +3419,9 @@ Expected<std::vector<std::string>> Hef::Impl::get_post_processes_infos_descripti
 
     CHECK_AS_EXPECTED(contains(m_network_group_metadata, network_group_name), HAILO_INTERNAL_FAILURE);
 
-    auto post_process = m_network_group_metadata.at(network_group_name).m_net_flow_ops;
+    auto post_process = m_network_group_metadata.at(network_group_name).m_ops_metadata;
     for (const auto &post_process_info : post_process) {
-        infos_string = post_process_info->op->get_op_description();
-        if (HAILO_NET_FLOW_OP_TYPE_NMS == post_process_info->op_type) {
-
-            infos_string += ", Bbox size: " + std::to_string(post_process_info->nms_info.bbox_size) +
-                ", Max bboxes per class: " + std::to_string(post_process_info->nms_info.max_bboxes_per_class);
-        }
+        infos_string = post_process_info->get_op_description();
     }
     /* If the string is empty there is no need to continue. */
     if (infos_string.empty()) {
@@ -3270,8 +3461,10 @@ Expected<std::string> Hef::Impl::get_description(bool stream_infos, bool vstream
     for (const auto &network_group_info : network_group_infos.release()) {
         auto core_op_metadata = get_core_op_metadata(network_group_info.name);
         CHECK_EXPECTED(core_op_metadata);
-        auto number_of_contexts = core_op_metadata.value()->get_contexts_count();
-        auto contexts_str = (network_group_info.is_multi_context ? "Multi Context - Number of contexts: " + std::to_string(number_of_contexts) : "Single Context");
+        const auto number_of_dynamic_contexts = core_op_metadata.value()->get_dynamic_contexts_count();
+        auto contexts_str = network_group_info.is_multi_context ?
+            "Multi Context - Number of contexts: " + std::to_string(number_of_dynamic_contexts) :
+            "Single Context";
         hef_infos += "Network group name: " + std::string(network_group_info.name) + ", " + contexts_str + "\n";
 
         auto network_infos = get_network_infos(network_group_info.name);
@@ -3326,7 +3519,9 @@ Expected<std::vector<hailo_network_group_info_t>> Hef::Impl::get_network_groups_
         CHECK_AS_EXPECTED(HAILO_MAX_NETWORK_GROUP_NAME_SIZE >= (network_group_name.length() + 1), HAILO_INTERNAL_FAILURE,
             "The network group '{}' has a too long name (max is HAILO_MAX_NETWORK_GROUP_NAME_SIZE)", network_group_name);
         strncpy(info.name, network_group_name.c_str(), network_group_name.length() + 1);
-        info.is_multi_context = (1 < core_op.contexts.size());
+        const auto number_contexts = (ProtoHEFHwArch::PROTO__HW_ARCH__HAILO8L == get_device_arch()) ?
+            core_op.partial_core_ops[0]->core_op->contexts.size() : core_op.contexts.size();
+        info.is_multi_context = (1 < number_contexts);
         results.push_back(info);
     }
     return results;
index bdee7454095ec77e882b6f520e664e1e0a4ec42e..deafb2a561e8b94fdf60b5565a20be767ebf4ece 100644 (file)
 #include "hailo/hef.hpp"
 #include "hailo/network_group.hpp"
 #include "hailo/hailort_defaults.hpp"
+#include "net_flow/ops/op_metadata.hpp"
 
 #include "hef/core_op_metadata.hpp"
 #include "hef/layer_info.hpp"
 #include "hef/context_switch_actions.hpp"
 #include "net_flow/ops/op.hpp"
 #include "net_flow/pipeline/pipeline.hpp"
-#include "core_op/core_op.hpp"
 #include "device_common/control_protocol.hpp"
 
 #include "control_protocol.h"
@@ -52,6 +52,8 @@ extern "C" {
 namespace hailort
 {
 
+#define DEFAULT_NMS_NO_BURST_SIZE (1)
+
 class CoreOpMetadata;
 class CoreOp;
 using ProtoHEFNetworkGroupPtr = std::shared_ptr<ProtoHEFNetworkGroup>;
@@ -139,30 +141,20 @@ typedef enum {
     HAILO_NET_FLOW_OP_TYPE_MAX_ENUM          = HAILO_MAX_ENUM
 } hailo_net_flow_op_type_t;
 
-struct NetFlowElement
-{
-    std::string name;
-    std::shared_ptr<net_flow::Op> op;
-    std::set<std::string> input_streams;
-    hailo_nms_info_t nms_info;
-    hailo_net_flow_op_type_t op_type;
-    hailo_vstream_info_t output_vstream_info; // Should be vector?
-};
-
 const static uint32_t SUPPORTED_EXTENSIONS_BITSET_SIZE = 1000;
 static const std::vector<ProtoHEFExtensionType> SUPPORTED_EXTENSIONS = {
-    ABBALE, 
-    POSTED_WRITES, 
-    DDR, 
-    PADDED_DDR_BUFFERS, 
-    IS_MULTI_CONTEXTS, 
-    COMPRESSED_PARAMS, 
+    ABBALE,
+    POSTED_WRITES,
+    DDR,
+    PADDED_DDR_BUFFERS,
+    IS_MULTI_CONTEXTS,
+    COMPRESSED_PARAMS,
     TRANSPOSE_COMPONENT,
     IS_NMS_MULTI_CONTEXT,
     OFFLOAD_ARGMAX,
     KO_RUN_ASAP,
     HAILO_NET_FLOW,
-    HAILO_NET_FLOW_YOLO_NMS, // Extention added in platform 4.12 release
+    HAILO_NET_FLOW_YOLOV5_NMS, // Extention added in platform 4.12 release
     HAILO_NET_FLOW_SSD_NMS, // Extention added in platform 4.14 release
     WRITE_DATA_BY_TYPE, // Extention added in platform 4.14 release
     NMS_OUTPUT_BURST, // Extention added in platform 4.14 release
@@ -173,6 +165,8 @@ static const std::vector<ProtoHEFExtensionType> SUPPORTED_EXTENSIONS = {
     HAILO_NET_FLOW_YOLOX_NMS, // Extention added in platform 4.14 release
     OUTPUT_SCALE_PER_FEATURE, // Extension added in platform 4.14 release
     PERIPH_CALCULATION_IN_HAILORT, // Extension added in platform 4.14 release
+    HAILO_NET_FLOW_YOLOV5_SEG_NMS, // Extension added in platform 4.15 release
+    HAILO_NET_FLOW_IOU_NMS // Extension added in platform 4.15 release
 };
 
 static inline bool is_h2d_boundary_info_layer(const ProtoHEFEdgeLayer& layer)
@@ -207,6 +201,14 @@ static inline bool is_d2h_boundary_mux_layer(const ProtoHEFEdgeLayer& layer)
         (ProtoHEFEdgeLayerType::PROTO__EDGE_LAYER_TYPE__MUX == layer.edge_layer_type()));
 }
 
+static inline bool is_h2d_boundary_planes_layer(const ProtoHEFEdgeLayer& layer)
+{
+    return ((ProtoHEFEdgeLayerDirection::PROTO__EDGE_LAYER_DIRECTION__HOST_TO_DEVICE == layer.direction()) &&
+        (ProtoHEFEdgeConnectionType::PROTO__EDGE_CONNECTION_TYPE__BOUNDARY ==
+            layer.context_switch_info().edge_connection_type()) &&
+        (ProtoHEFEdgeLayerType::PROTO__EDGE_LAYER_TYPE__PLANES == layer.edge_layer_type()));
+}
+
 // TODO: Fix the circular dependency (with HRT-2899, InputStream/OutputStream related code will move elsewhere)
 class InputStreamBase;
 class OutputStreamBase;
@@ -312,7 +314,7 @@ public:
         uint32_t queue_size);
     // Also adds information to CoreOpMetadata
     // TODO: When supporting multiple core ops in same netflow - Change metadata param to a map of core_ops_metadata.
-    Expected<std::vector<std::shared_ptr<NetFlowElement>>> create_net_flow_ops(const ProtoHEFNetworkGroup &network_group_proto,
+    Expected<std::vector<net_flow::PostProcessOpMetadataPtr>> create_ops_metadata(const ProtoHEFNetworkGroup &network_group_proto,
         CoreOpMetadata &core_op_metadata, const ProtoHEFHwArch &hef_arch) const;
 
     // TODO: Should return map of NG's core_ops metadata?
@@ -348,6 +350,9 @@ public:
             for (auto &network_params : network_group_config_params.network_params_by_name) {
                 network_params.second.batch_size = network_group_config_params.batch_size;
             }
+
+            // Change batch_size to default for later update_network_batch_size runs.
+            network_group_config_params.batch_size = HAILO_DEFAULT_BATCH_SIZE;
         }
 
         return HAILO_SUCCESS;
@@ -403,7 +408,7 @@ private:
     SupportedFeatures m_supported_features;
     std::vector<ProtoHEFNetworkGroupPtr> m_groups;
     std::map<std::string, std::vector<ProtoHEFCoreOpMock>> m_core_ops_per_group;
-    std::map<std::string, std::vector<std::shared_ptr<NetFlowElement>>> m_post_process_ops_per_group;
+    std::map<std::string, std::vector<net_flow::PostProcessOpMetadataPtr>> m_post_process_ops_metadata_per_group;
     std::vector<ProtoHEFExtension> m_hef_extensions;
     std::vector<ProtoHEFOptionalExtension> m_hef_optional_extensions;
     std::bitset<SUPPORTED_EXTENSIONS_BITSET_SIZE> m_supported_extensions_bitset;
@@ -435,10 +440,9 @@ public:
     static bool is_hw_padding_supported(const ProtoHEFEdgeLayer &edge_layer, const uint32_t max_periph_bytes_value);
     static bool is_hw_padding_supported(const LayerInfo &layer_info, const uint32_t max_periph_bytes_value);
 private:
-    static Expected<CONTROL_PROTOCOL__nn_stream_config_t> parse_nn_stream_config(hailo_format_order_t format_order,
-        uint32_t width, uint32_t features, uint32_t hw_data_bytes, uint16_t core_buffers_per_frame,
-        uint16_t core_bytes_per_buffer, bool hw_padding_supported, bool is_ddr, uint16_t periph_buffers_per_frame,
-        uint16_t periph_bytes_per_buffer);
+    static Expected<CONTROL_PROTOCOL__nn_stream_config_t> parse_nn_stream_config(uint32_t width, uint32_t hw_data_bytes,
+        uint16_t core_buffers_per_frame, uint16_t core_bytes_per_buffer, bool hw_padding_supported, bool is_ddr,
+        uint16_t periph_buffers_per_frame, uint16_t periph_bytes_per_buffer);
 
     static bool is_hw_padding_supported(bool is_boundary, bool is_mux, hailo_format_order_t format_order,
         uint16_t core_buffers_per_frame, uint32_t height, uint32_t width, uint32_t features, uint32_t hw_data_bytes,
@@ -519,6 +523,12 @@ private:
         bool hw_padding_supported, const uint8_t context_index, const std::string &partial_network_name, 
         uint8_t network_index, LayerInfo &layer_info, const SupportedFeatures &supported_features,
         const ProtoHEFHwArch &hef_arch);
+    static hailo_status fill_planes_info(const ProtoHEFEdgeLayerPlanes &info,
+        const ProtoHEFEdgeConnectionType &edge_connection_type,
+        const ProtoHEFCoreOpMock &core_op, hailo_stream_direction_t direction,
+        bool hw_padding_supported, const uint8_t context_index, const std::string &partial_network_name, 
+        uint8_t network_index, LayerInfo &layer_info, const SupportedFeatures &supported_features,
+        const ProtoHEFHwArch &hef_arch);
 };
 
 } /* namespace hailort */
index 2d0769fe1be9e8a7fc8fac166240013a1d4ff8c8..90c32147ac569c3249dcca41b5a9bcbcd1732c00 100644 (file)
@@ -28,6 +28,7 @@ namespace hailort
 #define INVALID_PAD_INDEX (UINT32_MAX)
 #define PERIPH_BYTES_PER_BUFFER_ALIGNMENT_SIZE (8)
 #define PERIPH_BYTES_PER_BUFFER_DDR_ALIGNMENT_SIZE (512)
+#define NMS_NUMBER_OF_QPS (2)
 
 enum class LayerType
 {
@@ -84,6 +85,11 @@ struct LayerInfo {
     uint32_t height_gcd;
     std::vector<uint32_t> height_ratios;
 
+    // Multi planes info
+    bool is_multi_planar;
+    std::vector<LayerInfo> planes;
+    uint8_t plane_index; // relevant for the underlying planes only
+
     // Defused nms info
     bool is_defused_nms;
     // TODO HRT-4441 change fused_layer from vector.
@@ -107,31 +113,38 @@ inline LayerIdentifier to_layer_identifier(const LayerInfo &info)
 
 class LayerInfoUtils {
 public:
-    static hailo_stream_info_t get_stream_info_from_layer_info(const LayerInfo &layer_info)
+    static std::vector<hailo_stream_info_t> get_stream_infos_from_layer_info(const LayerInfo &layer_info)
     {
-        hailo_stream_info_t res = {};
-        res.hw_data_bytes = layer_info.hw_data_bytes;
-        res.format = layer_info.format;
-        if (HAILO_FORMAT_ORDER_HAILO_NMS == res.format.order) {
-            res.nms_info = layer_info.nms_info;
-            res.hw_frame_size =
-                HailoRTCommon::get_nms_hw_frame_size(res.nms_info);
-        } else {
-            res.shape.height = layer_info.shape.height;
-            res.shape.width = layer_info.shape.width;
-            res.shape.features = layer_info.shape.features;
-            res.hw_shape.height = layer_info.hw_shape.height;
-            res.hw_shape.width = layer_info.hw_shape.width;
-            res.hw_shape.features = layer_info.hw_shape.features;
-            res.hw_frame_size =
-                res.hw_shape.height * res.hw_shape.width * res.hw_shape.features * res.hw_data_bytes;
+        std::vector<hailo_stream_info_t> res = {};
+        size_t number_of_streams = (layer_info.is_multi_planar) ? layer_info.planes.size() : 1;
+        res.reserve(number_of_streams);
+        for (size_t i = 0; i < number_of_streams; i++) {
+            auto &layer = (layer_info.is_multi_planar) ? layer_info.planes[i] : layer_info;
+            hailo_stream_info_t stream_info = {};
+            stream_info.hw_data_bytes = layer.hw_data_bytes;
+            stream_info.format = layer.format;
+            if (HailoRTCommon::is_nms(stream_info)) {
+                stream_info.nms_info = layer.nms_info;
+                stream_info.hw_frame_size =
+                    HailoRTCommon::get_nms_hw_frame_size(stream_info.nms_info);
+            } else {
+                stream_info.shape.height = layer.shape.height;
+                stream_info.shape.width = layer.shape.width;
+                stream_info.shape.features = layer.shape.features;
+                stream_info.hw_shape.height = layer.hw_shape.height;
+                stream_info.hw_shape.width = layer.hw_shape.width;
+                stream_info.hw_shape.features = layer.hw_shape.features;
+                stream_info.hw_frame_size =
+                    stream_info.hw_shape.height * stream_info.hw_shape.width * stream_info.hw_shape.features * stream_info.hw_data_bytes;
+            }
+            stream_info.direction = layer.direction;
+            stream_info.index = layer.stream_index;
+            assert(layer.name.length() < HAILO_MAX_NAME_SIZE);
+            strncpy(stream_info.name, layer.name.c_str(), layer.name.length() + 1);
+            stream_info.quant_info = layer.quant_info;
+            stream_info.is_mux = layer.is_mux;
+            res.push_back(stream_info);
         }
-        res.direction = layer_info.direction;
-        res.index = layer_info.stream_index;
-        assert(layer_info.name.length() < HAILO_MAX_NAME_SIZE);
-        strncpy(res.name, layer_info.name.c_str(), layer_info.name.length() + 1);
-        res.quant_info = layer_info.quant_info;
-        res.is_mux = layer_info.is_mux;
 
         return res;
     }
@@ -178,36 +191,13 @@ public:
             }
             return layer_info.nn_stream_config.periph_bytes_per_buffer * layer_info.nn_stream_config.periph_buffers_per_frame;
         case LayerType::INTER_CONTEXT:
-            return layer_info.nn_stream_config.periph_bytes_per_buffer * layer_info.nn_stream_config.periph_buffers_per_frame;
         case LayerType::DDR:
-            return layer_info.nn_stream_config.periph_bytes_per_buffer * layer_info.ddr_info.total_buffers_per_frame;
+            return layer_info.nn_stream_config.periph_bytes_per_buffer * layer_info.nn_stream_config.periph_buffers_per_frame;
         default:
             return make_unexpected(HAILO_NOT_IMPLEMENTED);
         }
     }
 
-    /**
-     * Validate nms burst type vs device architecture
-     *
-     * @param[in] burst_type             A hailo_nms_burst_type_t burst_type.
-     * @param[in] arch            A ::hailo_device_architecture_t architecture.
-     * @return true if the burst type matches the device architecture, otherwise false.
-     */
-    static bool validate_nms_burst_type(const hailo_nms_burst_type_t burst_type, const hailo_device_architecture_t arch)
-    {
-        switch (arch)
-        {
-        case HAILO_ARCH_HAILO8_A0:
-        case HAILO_ARCH_HAILO8:
-        case HAILO_ARCH_HAILO8L:
-            return (HAILO_BURST_TYPE_H8_PER_CLASS == burst_type);
-        case HAILO_ARCH_HAILO15:
-            return ((HAILO_BURST_TYPE_H15_PER_CLASS == burst_type) || (HAILO_BURST_TYPE_H15_PER_FRAME == burst_type));
-        default:
-            return false;
-        }
-    }
-
     /**
      * Gets stream's transfer size in bytes by stream info and layer info params.
      *
@@ -233,7 +223,8 @@ public:
     {
         switch (layer_info.nms_info.burst_type) {
             // If No Burst mode - size of transfer is size of bbox
-            case HAILO_BURST_TYPE_NO_BURST:
+            case HAILO_BURST_TYPE_H8_BBOX:
+            case HAILO_BURST_TYPE_H15_BBOX:
                 return layer_info.nms_info.bbox_size;
             // In hailo8 per class and hailo15 per class mode - check if can support interrupt per frame and if not do interrupt per burst
             case HAILO_BURST_TYPE_H8_PER_CLASS:
@@ -245,7 +236,8 @@ public:
                 // If burst size is bigger than max bboxes per class + bboxes_needed_for_delimeter - we can enable 1 interrupt per frame
                 // Becasue we know output size will be burst size * num classes
                 if (layer_info.nms_info.burst_size >= (layer_info.nms_info.max_bboxes_per_class + bboxes_needed_for_delimeter)) {
-                    return layer_info.nms_info.burst_size * layer_info.nms_info.bbox_size * layer_info.nms_info.number_of_classes;
+                    return layer_info.nms_info.burst_size * layer_info.nms_info.bbox_size *
+                        layer_info.nms_info.number_of_classes * layer_info.nms_info.chunks_per_frame;
                 } else {
                     // support regular interrupt per burst
                     return layer_info.nms_info.burst_size * layer_info.nms_info.bbox_size;
@@ -290,10 +282,12 @@ private:
         hailo_vstream_info_t res = {};
         res.format.type = layer_info.format.type;
         res.format.flags = layer_info.format.flags;
-        res.format.order = HailoRTDefaults::get_default_host_format_order(layer_info.format);
-        if (HAILO_FORMAT_ORDER_HAILO_NMS == res.format.order) {
+        // If a layer is multi-planar, its format_order is already the host-side format order
+        res.format.order = (layer_info.is_multi_planar) ? layer_info.format.order : HailoRTDefaults::get_default_host_format_order(layer_info.format);
+        if (HailoRTCommon::is_nms(res)) {
             res.nms_shape.max_bboxes_per_class = layer_info.nms_info.max_bboxes_per_class * layer_info.nms_info.chunks_per_frame;
             res.nms_shape.number_of_classes = layer_info.nms_info.number_of_classes;
+            res.format.type = HAILO_FORMAT_TYPE_FLOAT32; // NMS on vstream is always float32s
         } else {
             res.shape.height = layer_info.shape.height;
             res.shape.width = layer_info.shape.width;
index 3acd38f4a557fbc21056fe9f9d32a8e88981d650..114d9cf4e643b7c474db5d660eeea26f620d328b 100644 (file)
@@ -30,7 +30,6 @@
 /** Vdma Channel registers ***************************************************/
 #define VDMA_CHANNEL_CONTROL_OFFSET         (0x00)
 #define VDMA_CHANNEL_NUM_AVAIL_OFFSET       (0x02)
-#define VDMA_CHANNEL_NUM_PROC_OFFSET        (0x04)
 
 
 #endif /* _HAILO_HW_CONSTS_HPP_ */
index 36007ec725d7a12b1fbf65964886090459466be6..6332f485e26d371d93d9c467e97d152f3549ae24 100644 (file)
@@ -103,8 +103,7 @@ CONTROL_PROTOCOL__mipi_input_config_params_t MipiInputStream::hailo_mipi_params_
     return control_mipi_params;
 }
 
-// Note: Mipi streams don't work with dynamic batch sizes
-hailo_status MipiInputStream::activate_stream(uint16_t /* dynamic_batch_size */, bool /* resume_pending_stream_transfers */)
+hailo_status MipiInputStream::activate_stream()
 {
     hailo_status status = HAILO_UNINITIALIZED;
     CONTROL_PROTOCOL__config_stream_params_t params = {};
index 73178e1d9842e45fc6067d7b8f936d12042a5f82..d9223d643418d1cfc51a02d49ccf596c2644abf6 100644 (file)
@@ -44,7 +44,14 @@ public:
         EventPtr core_op_activated_event);
     virtual ~MipiInputStream();
 
-    virtual hailo_status activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers) override;
+    virtual hailo_status set_buffer_mode(StreamBufferMode buffer_mode) override
+    {
+        CHECK(buffer_mode == StreamBufferMode::OWNING, HAILO_INVALID_ARGUMENT,
+            "Mipi streams supports only sync api");
+        return HAILO_SUCCESS;
+    }
+
+    virtual hailo_status activate_stream() override;
     virtual hailo_status deactivate_stream() override;
     virtual hailo_stream_interface_t get_interface() const override { return HAILO_STREAM_INTERFACE_MIPI; }
     virtual std::chrono::milliseconds get_timeout() const override;
index dd4a2c503dd052e89d18b4243dae2687a7a83abb..55aa0c0d778397ecfb78991014363d6541f082be 100644 (file)
@@ -2,15 +2,18 @@ cmake_minimum_required(VERSION 3.0.0)
 
 set(SRC_FILES
     ${CMAKE_CURRENT_SOURCE_DIR}/ops/nms_post_process.cpp
-    ${CMAKE_CURRENT_SOURCE_DIR}/ops/yolo_post_process.cpp
+    ${CMAKE_CURRENT_SOURCE_DIR}/ops/yolov5_post_process.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/ops/yolox_post_process.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/ops/ssd_post_process.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/ops/argmax_post_process.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/ops/softmax_post_process.cpp
+    ${CMAKE_CURRENT_SOURCE_DIR}/ops/yolov5_seg_post_process.cpp
 
     ${CMAKE_CURRENT_SOURCE_DIR}/pipeline/pipeline.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/pipeline/inference_pipeline.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/pipeline/vstream.cpp
+    ${CMAKE_CURRENT_SOURCE_DIR}/pipeline/async_infer_runner.cpp
+    ${CMAKE_CURRENT_SOURCE_DIR}/pipeline/infer_model.cpp
 )
 
 set(HAILORT_CPP_SOURCES ${HAILORT_CPP_SOURCES} ${SRC_FILES} PARENT_SCOPE)
index b7e2df9c06b0c5e9e13c36f3ffc2449a655a4a65..3d7532139e13159274cb5c6dd53c4091d0095617 100644 (file)
@@ -139,8 +139,10 @@ hailo_status ArgmaxPostProcessOp::execute(const std::map<std::string, MemoryView
 {
     auto &input_name = inputs.begin()->first;
     auto &output_name = outputs.begin()->first;
-    auto &input_metadata = m_inputs_metadata[input_name];
-    auto &output_metadata = m_outputs_metadata[output_name];
+    assert(contains(m_op_metadata->inputs_metadata(), input_name));
+    auto &input_metadata = m_op_metadata->inputs_metadata().at(input_name);
+    assert(contains(m_op_metadata->outputs_metadata(), output_name));
+    auto &output_metadata = m_op_metadata->outputs_metadata().at(output_name);
 
     uint8_t format_index = UINT8_MAX;
     switch (input_metadata.format.order) {
@@ -161,13 +163,27 @@ hailo_status ArgmaxPostProcessOp::execute(const std::map<std::string, MemoryView
     return ArgmaxPostProcessOp::m_argmax_function_array[format_index][input_metadata.format.type][output_metadata.format.type](input_metadata, output_metadata, inputs, outputs);
 }
 
-std::string ArgmaxPostProcessOp::get_op_description()
+Expected<std::shared_ptr<OpMetadata>> ArgmaxOpMetadata::create(const std::unordered_map<std::string, BufferMetaData> &inputs_metadata,
+                                                            const std::unordered_map<std::string, BufferMetaData> &outputs_metadata,
+                                                            const std::string &network_name)
 {
-    auto config_info = fmt::format("ArgmaxPostProcess Op, Name: {}", m_name);
+    auto op_metadata = std::shared_ptr<ArgmaxOpMetadata>(new (std::nothrow) ArgmaxOpMetadata(inputs_metadata, outputs_metadata, network_name));
+
+    CHECK_AS_EXPECTED(op_metadata != nullptr, HAILO_OUT_OF_HOST_MEMORY);
+
+    auto status = op_metadata->validate_params();
+    CHECK_SUCCESS_AS_EXPECTED(status);
+
+    return std::shared_ptr<OpMetadata>(std::move(op_metadata));
+}
+
+std::string ArgmaxOpMetadata::get_op_description()
+{
+    auto config_info = fmt::format("{} Op, Name: {}", OpMetadata::get_operation_type_str(m_type), m_name);
     return config_info;
 }
 
-hailo_status ArgmaxPostProcessOp::validate_metadata()
+hailo_status ArgmaxOpMetadata::validate_params()
 {
     assert(m_inputs_metadata.size() == hailort::net_flow::ARGMAX_NUMBER_OF_SRCS);
     assert(m_outputs_metadata.size() == hailort::net_flow::ARGMAX_NUMBER_OF_DSTS);
@@ -175,41 +191,90 @@ hailo_status ArgmaxPostProcessOp::validate_metadata()
     auto &input_metadata = m_inputs_metadata.begin()->second;
     auto &output_metadata = m_outputs_metadata.begin()->second;
 
+    CHECK(output_metadata.shape.features == hailort::net_flow::ARGMAX_OUTPUT_FEATURES_SIZE, HAILO_INVALID_OPERATION,
+        "Output features ({}) must be 1 on Argmax op", output_metadata.shape.features);
+    CHECK(input_metadata.shape.height == output_metadata.shape.height, HAILO_INVALID_OPERATION,
+        "Argmax op is supported only when input height ({}) is equal to output height ({})",
+        input_metadata.shape.height, output_metadata.shape.height);
+    CHECK(input_metadata.shape.width == output_metadata.shape.width, HAILO_INVALID_OPERATION,
+        "Argmax op is supported only when input width ({}) is equal to output width ({})",
+        input_metadata.shape.width, output_metadata.shape.width);
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status ArgmaxOpMetadata::validate_format_info()
+{
+    auto &input_metadata = m_inputs_metadata.begin()->second;
+    auto &output_metadata = m_outputs_metadata.begin()->second;
+
     CHECK((
         ((output_metadata.format.type == HAILO_FORMAT_TYPE_UINT8) && (input_metadata.shape.features <= std::numeric_limits<uint8_t>::max())) ||
         ((output_metadata.format.type == HAILO_FORMAT_TYPE_UINT16) && (input_metadata.shape.features <= std::numeric_limits<uint16_t>::max())) ||
         ((output_metadata.format.type == HAILO_FORMAT_TYPE_FLOAT32) && (input_metadata.shape.features <= FLOAT_LAST_CONSECUTIVE_REPRESENTABLE_INT))),
-        HAILO_INVALID_OPERATION, "Dst format type {} can't represent possible range {} for Argmax op",
+        HAILO_INVALID_OPERATION, "Output format type {} can't represent possible range {} for Argmax op",
         HailoRTCommon::get_format_type_str(output_metadata.format.type), input_metadata.shape.features);
     CHECK(
         ((input_metadata.format.order == HAILO_FORMAT_ORDER_NHCW) &&  (output_metadata.format.order == HAILO_FORMAT_ORDER_NHW)) ||
         ((input_metadata.format.order == HAILO_FORMAT_ORDER_NHWC) && (output_metadata.format.order == HAILO_FORMAT_ORDER_NHW)) ||
         ((input_metadata.format.order == HAILO_FORMAT_ORDER_NC) && (output_metadata.format.order == HAILO_FORMAT_ORDER_NC)),
-        HAILO_INVALID_OPERATION, "Argmax op is not supported for src format order ({}) and dst format order ({})",
+        HAILO_INVALID_OPERATION, "Argmax op is not supported for input format order ({}) and output format order ({})",
         HailoRTCommon::get_format_order_str(input_metadata.format.order),
         HailoRTCommon::get_format_order_str(output_metadata.format.order));
-
-    CHECK(output_metadata.shape.features == hailort::net_flow::ARGMAX_OUTPUT_FEATURES_SIZE, HAILO_INVALID_OPERATION,
-        "Dst features ({}) must be 1 on Argmax op", output_metadata.shape.features);
-    CHECK(input_metadata.shape.height == output_metadata.shape.height, HAILO_INVALID_OPERATION,
-        "Argmax op is supported only when src height ({}) is equal to dst height ({})",
-        input_metadata.shape.height, output_metadata.shape.height);
-    CHECK(input_metadata.shape.width == output_metadata.shape.width, HAILO_INVALID_OPERATION,
-        "Argmax op is supported only when src width ({}) is equal to dst width ({})",
-        input_metadata.shape.width, output_metadata.shape.width);
     CHECK((
         (input_metadata.format.type == HAILO_FORMAT_TYPE_UINT8) || (input_metadata.format.type == HAILO_FORMAT_TYPE_UINT16)),
-        HAILO_INVALID_OPERATION, "Src format type {} is not valid. Must be either {} or {}",
+        HAILO_INVALID_OPERATION, "The given input format type {} is not supported, should be either {} or {}",
         HailoRTCommon::get_format_type_str(input_metadata.format.type), HailoRTCommon::get_format_type_str(HAILO_FORMAT_TYPE_UINT8),
         HailoRTCommon::get_format_type_str(HAILO_FORMAT_TYPE_UINT16));
 
     return HAILO_SUCCESS;
 }
 
-Expected<std::shared_ptr<Op>> ArgmaxPostProcessOp::create(const std::map<std::string, BufferMetaData> &inputs_metadata,
-    std::map<std::string, BufferMetaData> &outputs_metadata)
+hailo_format_t ArgmaxOpMetadata::expand_output_format_autos(const hailo_format_t &output_format, const hailo_format_t &input_format)
+{
+    auto format = output_format;
+
+    if (format.type == HAILO_FORMAT_TYPE_AUTO) {
+        format.type = input_format.type;
+    }
+    if (format.order == HAILO_FORMAT_ORDER_AUTO) {
+        if (input_format.order == HAILO_FORMAT_ORDER_NHCW || input_format.order == HAILO_FORMAT_ORDER_NHWC) {
+            format.order = HAILO_FORMAT_ORDER_NHW;
+        }
+        if (input_format.order == HAILO_FORMAT_ORDER_NC) {
+            format.order = HAILO_FORMAT_ORDER_NC;
+        }
+    }
+    return format;
+}
+
+Expected<hailo_vstream_info_t> ArgmaxOpMetadata::get_output_vstream_info()
+{
+    CHECK_AS_EXPECTED((m_outputs_metadata.size() == 1), HAILO_INVALID_OPERATION, "{} has more than 1 output", m_name);
+
+    hailo_vstream_info_t vstream_info{};
+    strncpy(vstream_info.name, m_outputs_metadata.begin()->first.c_str(), m_outputs_metadata.begin()->first.length() + 1);
+    strncpy(vstream_info.network_name, m_network_name.c_str(), m_network_name.length() + 1);
+    vstream_info.direction = HAILO_D2H_STREAM;
+    vstream_info.format.order = m_outputs_metadata.begin()->second.format.order;
+    vstream_info.format.type = m_outputs_metadata.begin()->second.format.type;
+    vstream_info.format.flags = HAILO_FORMAT_FLAGS_NONE;
+
+    assert(m_inputs_metadata.size() == 1);
+    vstream_info.format = ArgmaxOpMetadata::expand_output_format_autos(vstream_info.format, m_inputs_metadata.begin()->second.format);
+    vstream_info.shape = m_outputs_metadata.begin()->second.shape;
+
+    vstream_info.quant_info = m_inputs_metadata.begin()->second.quant_info;
+
+    return vstream_info;
+}
+
+Expected<std::shared_ptr<Op>> ArgmaxPostProcessOp::create(std::shared_ptr<ArgmaxOpMetadata> metadata)
 {
-    auto op = std::shared_ptr<ArgmaxPostProcessOp>(new (std::nothrow) ArgmaxPostProcessOp(inputs_metadata, outputs_metadata));
+    auto status = metadata->validate_format_info();
+    CHECK_SUCCESS_AS_EXPECTED(status);
+
+    auto op = std::shared_ptr<ArgmaxPostProcessOp>(new (std::nothrow) ArgmaxPostProcessOp(metadata));
     CHECK_AS_EXPECTED(op != nullptr, HAILO_OUT_OF_HOST_MEMORY);
 
     return std::shared_ptr<Op>(std::move(op));
index 23dd6b46e0bb38b842c4aec34a87fa63bb1d4b41..5f2bb642f2e22f5f839087351069b40de95286f1 100644 (file)
@@ -17,6 +17,7 @@
 
 #include "hailo/hailort.h"
 #include "net_flow/ops/op.hpp"
+#include "net_flow/ops/op_metadata.hpp"
 #include "common/utils.hpp"
 
 #include <iostream>
@@ -36,36 +37,58 @@ constexpr std::size_t ARGMAX_NUMBER_OF_DSTS {1};
 typedef hailo_status (*ArgmaxFunction)(const BufferMetaData &input_metadata, const BufferMetaData &output_metadata,
     const std::map<std::string, MemoryView> &inputs, std::map<std::string, MemoryView> &outputs);
 
+
+class ArgmaxOpMetadata : public OpMetadata
+{
+public:
+    static Expected<std::shared_ptr<OpMetadata>> create(const std::unordered_map<std::string, BufferMetaData> &inputs_metadata,
+                                                        const std::unordered_map<std::string, BufferMetaData> &outputs_metadata,
+                                                        const std::string &network_name);
+    std::string get_op_description() override;
+    hailo_status validate_format_info() override;
+    static hailo_format_t expand_output_format_autos(const hailo_format_t &output_format, const hailo_format_t &input_format);
+
+    virtual Expected<hailo_vstream_info_t> get_output_vstream_info() override;
+
+private:
+    ArgmaxOpMetadata(const std::unordered_map<std::string, BufferMetaData> &inputs_metadata,
+                        const std::unordered_map<std::string, BufferMetaData> &outputs_metadata,
+                        const std::string &network_name)
+        : OpMetadata(inputs_metadata, outputs_metadata, "Argmax-Post-Process", network_name, OperationType::ARGMAX)
+    {}
+
+    hailo_status validate_params() override;
+};
+
 class ArgmaxPostProcessOp : public Op
 {
 
 private:
-    ArgmaxPostProcessOp(const std::map<std::string, BufferMetaData> &inputs_metadata,
-                         const std::map<std::string, BufferMetaData> &outputs_metadata)
-        : Op(inputs_metadata, outputs_metadata, "Argmax-Post-Process")
+    ArgmaxPostProcessOp(std::shared_ptr<ArgmaxOpMetadata> metadata)
+        : Op(static_cast<std::shared_ptr<OpMetadata>>(metadata))
     {}
 
-    template<typename DeviceType, typename HostType>
+    template<typename SrcType, typename DstType>
     static hailo_status NHCW_to_NHW_feature_axis(const BufferMetaData &input_metadata, const BufferMetaData &output_metadata,
         const std::map<std::string, MemoryView> &inputs, std::map<std::string, MemoryView> &outputs)
     {
-        auto src_ptr = (DeviceType*)inputs.begin()->second.data();
-        auto dst_ptr = (HostType*)outputs.begin()->second.data();
+        auto src_ptr = (SrcType*)inputs.begin()->second.data();
+        auto dst_ptr = (DstType*)outputs.begin()->second.data();
         const auto src_row_size = input_metadata.padded_shape.width * input_metadata.padded_shape.features;
         const auto dst_row_size = output_metadata.shape.width;
 
         for (uint32_t r = 0; r < input_metadata.shape.height; r++) {
-            const DeviceType *src_row = src_ptr + (r * src_row_size);
-            HostType *dst_row = dst_ptr + (r * dst_row_size);
+            const SrcType *src_row = src_ptr + (r * src_row_size);
+            DstType *dst_row = dst_ptr + (r * dst_row_size);
             for (uint32_t w = 0; w < input_metadata.shape.width; w++) {
-                const DeviceType *offset_in_row = src_row + w;
-                HostType max_index = 0;
+                const SrcType *offset_in_row = src_row + w;
+                DstType max_index = 0;
                 auto max_value = *offset_in_row;
                 for (uint32_t c = 1; c < input_metadata.shape.features; c++) {
                     offset_in_row += input_metadata.padded_shape.width;
                     const auto &current_value = *offset_in_row;
                     if (current_value > max_value) {
-                        max_index = static_cast<HostType>(c);
+                        max_index = static_cast<DstType>(c);
                         max_value = current_value;
                     }
                 }
@@ -75,26 +98,26 @@ private:
         return HAILO_SUCCESS;
     }
 
-    template<typename DeviceType, typename HostType>
+    template<typename SrcType, typename DstType>
     static hailo_status NHWC_to_NHW_feature_axis(const BufferMetaData &input_metadata, const BufferMetaData &output_metadata,
         const std::map<std::string, MemoryView> &inputs, std::map<std::string, MemoryView> &outputs)
     {
-        auto src_ptr = (DeviceType*)inputs.begin()->second.data();
-        auto dst_ptr = (HostType*)outputs.begin()->second.data();
+        auto src_ptr = (SrcType*)inputs.begin()->second.data();
+        auto dst_ptr = (DstType*)outputs.begin()->second.data();
         const auto src_row_size = input_metadata.padded_shape.width * input_metadata.padded_shape.features;
         const auto dst_row_size = output_metadata.shape.width;
 
         for (uint32_t r = 0; r < input_metadata.shape.height; r++) {
-            const DeviceType *src_row = src_ptr + (r * src_row_size);
-            HostType *dst_row = dst_ptr + (r * dst_row_size);
+            const SrcType *src_row = src_ptr + (r * src_row_size);
+            DstType *dst_row = dst_ptr + (r * dst_row_size);
             for (uint32_t w = 0; w < input_metadata.shape.width; w++) {
-                const DeviceType *offset_in_row = src_row + (w * input_metadata.padded_shape.features);
-                HostType max_index = 0;
+                const SrcType *offset_in_row = src_row + (w * input_metadata.padded_shape.features);
+                DstType max_index = 0;
                 auto max_value = *offset_in_row;
                 for (uint32_t c = 1; c < input_metadata.shape.features; c++) {
                     const auto &current_value = *(offset_in_row + c);
                     if (current_value > max_value) {
-                        max_index = static_cast<HostType>(c);
+                        max_index = static_cast<DstType>(c);
                         max_value = current_value;
                     }
                 }
@@ -104,20 +127,20 @@ private:
         return HAILO_SUCCESS;
     }
 
-    template<typename DeviceType, typename HostType>
+    template<typename SrcType, typename DstType>
     static hailo_status NC_to_N(const BufferMetaData &input_metadata, const BufferMetaData &output_metadata,
         const std::map<std::string, MemoryView> &inputs, std::map<std::string, MemoryView> &outputs)
     {
         (void) output_metadata; // only reason to have output_metadata is so that the function array will work
-        auto src_ptr = (DeviceType*)inputs.begin()->second.data();
-        auto dst_ptr = (HostType*)outputs.begin()->second.data();
-        HostType max_index = 0;
-        DeviceType max_value = 0;
+        auto src_ptr = (SrcType*)inputs.begin()->second.data();
+        auto dst_ptr = (DstType*)outputs.begin()->second.data();
+        DstType max_index = 0;
+        SrcType max_value = 0;
 
         for (uint32_t c = 0; c < input_metadata.shape.features; c++) {
             const auto &current_value = *(src_ptr + c);
             if (current_value > max_value) {
-                max_index = static_cast<HostType>(c);
+                max_index = static_cast<DstType>(c);
                 max_value = current_value;
             }
         }
@@ -129,12 +152,9 @@ private:
         const std::map<std::string, MemoryView> &inputs, std::map<std::string, MemoryView> &outputs);
 
 public:
-    static Expected<std::shared_ptr<Op>> create(const std::map<std::string, BufferMetaData> &inputs_metadata,
-                                                std::map<std::string, BufferMetaData> &outputs_metadata);
+    static Expected<std::shared_ptr<Op>> create(std::shared_ptr<ArgmaxOpMetadata> metadata);
     virtual hailo_status execute(const std::map<std::string, MemoryView> &inputs,
         std::map<std::string, MemoryView> &outputs) override;
-    virtual std::string get_op_description() override;
-    hailo_status validate_metadata() override;
 
     // A 3D array of argmax functions to call:
     // 1st dim represent the data format order
index 8fced3b2c371bee89a618f6402aea8707137f11b..efe8608d8cd106633f618aa50534e8093d53b256 100644 (file)
  **/
 
 #include "net_flow/ops/nms_post_process.hpp"
+#include "hef/hef_internal.hpp"
 
 namespace hailort
 {
 namespace net_flow
 {
 
-    hailo_status NmsPostProcessOp::validate_metadata()
-    {
-        for (const auto& output_metadata : m_outputs_metadata) {
-            CHECK(HAILO_FORMAT_ORDER_HAILO_NMS == output_metadata.second.format.order, HAILO_INVALID_ARGUMENT, "The given output format order {} is not supported, "
-                "should be HAILO_FORMAT_ORDER_HAILO_NMS", HailoRTCommon::get_format_order_str(output_metadata.second.format.order));
-
-            CHECK(HAILO_FORMAT_TYPE_FLOAT32 == output_metadata.second.format.type, HAILO_INVALID_ARGUMENT, "The given output format type {} is not supported, "
-                "should be HAILO_FORMAT_TYPE_FLOAT32", HailoRTCommon::get_format_type_str(output_metadata.second.format.type));
-
-            CHECK(!(HAILO_FORMAT_FLAGS_TRANSPOSED & output_metadata.second.format.flags), HAILO_INVALID_ARGUMENT, "Output {} is marked as transposed, which is not supported for this model.",
-                output_metadata.first);
-            CHECK(!(HAILO_FORMAT_FLAGS_HOST_ARGMAX & output_metadata.second.format.flags), HAILO_INVALID_ARGUMENT, "Output {} is marked as argmax, which is not supported for this model.",
-                output_metadata.first);
-            CHECK(!(HAILO_FORMAT_FLAGS_QUANTIZED & output_metadata.second.format.flags), HAILO_INVALID_ARGUMENT, "Output {} is marked as quantized, which is not supported for this model.",
-                output_metadata.first);
-        }
+Expected<std::shared_ptr<OpMetadata>> NmsOpMetadata::create(const std::unordered_map<std::string, BufferMetaData> &inputs_metadata,
+    const std::unordered_map<std::string, BufferMetaData> &outputs_metadata, const NmsPostProcessConfig &nms_post_process_config,
+    const std::string &network_name, const OperationType type, const std::string &name)
+{
+    auto op_metadata = std::shared_ptr<NmsOpMetadata>(new (std::nothrow) NmsOpMetadata(inputs_metadata, outputs_metadata, nms_post_process_config,
+        name, network_name, type));
+    CHECK_AS_EXPECTED(op_metadata != nullptr, HAILO_OUT_OF_HOST_MEMORY);
+
+    auto status = op_metadata->validate_params();
+    CHECK_SUCCESS_AS_EXPECTED(status);
+
+    return std::shared_ptr<OpMetadata>(std::move(op_metadata));
+}
 
+std::string NmsOpMetadata::get_op_description()
+{
+    return get_nms_config_description();
+}
+
+hailo_status NmsOpMetadata::validate_format_info()
+{
+    for (const auto& output_metadata : m_outputs_metadata) {
+        CHECK(HAILO_FORMAT_ORDER_HAILO_NMS == output_metadata.second.format.order, HAILO_INVALID_ARGUMENT, "The given output format order {} is not supported, "
+            "should be HAILO_FORMAT_ORDER_HAILO_NMS", HailoRTCommon::get_format_order_str(output_metadata.second.format.order));
+
+        CHECK(HAILO_FORMAT_TYPE_FLOAT32 == output_metadata.second.format.type, HAILO_INVALID_ARGUMENT, "The given output format type {} is not supported, "
+            "should be HAILO_FORMAT_TYPE_FLOAT32", HailoRTCommon::get_format_type_str(output_metadata.second.format.type));
+
+        CHECK(!(HAILO_FORMAT_FLAGS_TRANSPOSED & output_metadata.second.format.flags), HAILO_INVALID_ARGUMENT, "Output {} is marked as transposed, which is not supported for this model.",
+            output_metadata.first);
+        CHECK(!(HAILO_FORMAT_FLAGS_HOST_ARGMAX & output_metadata.second.format.flags), HAILO_INVALID_ARGUMENT, "Output {} is marked as argmax, which is not supported for this model.",
+            output_metadata.first);
+        CHECK(!(HAILO_FORMAT_FLAGS_QUANTIZED & output_metadata.second.format.flags), HAILO_INVALID_ARGUMENT, "Output {} is marked as quantized, which is not supported for this model.",
+            output_metadata.first);
+    }
+    if (m_type == OperationType::IOU) {
+        assert(1 == m_inputs_metadata.size());
+        CHECK(HAILO_FORMAT_ORDER_HAILO_NMS == m_inputs_metadata.begin()->second.format.order, HAILO_INVALID_ARGUMENT, "The given input format order {} is not supported, "
+            "should be HAILO_FORMAT_ORDER_HAILO_NMS", HailoRTCommon::get_format_order_str(m_inputs_metadata.begin()->second.format.order));
+    } else {
         assert(1 <= m_inputs_metadata.size());
         const hailo_format_type_t& first_input_type = m_inputs_metadata.begin()->second.format.type;
         for (const auto& input_metadata : m_inputs_metadata) {
@@ -49,122 +73,193 @@ namespace net_flow
             CHECK(HAILO_FORMAT_FLAGS_QUANTIZED == input_metadata.second.format.flags, HAILO_INVALID_ARGUMENT, "The given input format flag is not supported,"
                 "should be HAILO_FORMAT_FLAGS_QUANTIZED");
         }
-
-        return HAILO_SUCCESS;
     }
 
-    float NmsPostProcessOp::compute_iou(const hailo_bbox_float32_t &box_1, const hailo_bbox_float32_t &box_2)
-    {
-        const float overlap_area_width = std::min(box_1.x_max, box_2.x_max) - std::max(box_1.x_min, box_2.x_min);
-        const float overlap_area_height = std::min(box_1.y_max, box_2.y_max) - std::max(box_1.y_min, box_2.y_min);
-        if (overlap_area_width <= 0.0f || overlap_area_height <= 0.0f) {
-            return 0.0f;
-        }
-        const float intersection = overlap_area_width * overlap_area_height;
-        const float box_1_area = (box_1.y_max - box_1.y_min) * (box_1.x_max - box_1.x_min);
-        const float box_2_area = (box_2.y_max - box_2.y_min) * (box_2.x_max - box_2.x_min);
-        const float union_area = (box_1_area + box_2_area - intersection);
+    return HAILO_SUCCESS;
+}
 
-        return (intersection / union_area);
+hailo_status NmsOpMetadata::validate_params()
+{
+    return HAILO_SUCCESS;
+}
+
+std::string NmsOpMetadata::get_nms_config_description()
+{
+    auto config_info = fmt::format("Score threshold: {:.3f}, IoU threshold: {:.2f}, Classes: {}, Cross classes: {}, Max bboxes per class: {}",
+                        m_nms_config.nms_score_th, m_nms_config.nms_iou_th, m_nms_config.number_of_classes, m_nms_config.cross_classes,
+                        m_nms_config.max_proposals_per_class);
+    if (m_nms_config.background_removal) {
+        config_info += fmt::format(", Background removal index: {}", m_nms_config.background_removal_index);
     }
+    return config_info;
+}
 
-    void NmsPostProcessOp::remove_overlapping_boxes(std::vector<DetectionBbox> &detections, std::vector<uint32_t> &classes_detections_count)
-    {
-        std::sort(detections.begin(), detections.end(),
-                [](DetectionBbox a, DetectionBbox b)
-                { return a.m_bbox.score > b.m_bbox.score; });
+float NmsPostProcessOp::compute_iou(const hailo_bbox_float32_t &box_1, const hailo_bbox_float32_t &box_2)
+{
+    const float overlap_area_width = std::min(box_1.x_max, box_2.x_max) - std::max(box_1.x_min, box_2.x_min);
+    const float overlap_area_height = std::min(box_1.y_max, box_2.y_max) - std::max(box_1.y_min, box_2.y_min);
+    if (overlap_area_width <= 0.0f || overlap_area_height <= 0.0f) {
+        return 0.0f;
+    }
+    const float intersection = overlap_area_width * overlap_area_height;
+    const float box_1_area = (box_1.y_max - box_1.y_min) * (box_1.x_max - box_1.x_min);
+    const float box_2_area = (box_2.y_max - box_2.y_min) * (box_2.x_max - box_2.x_min);
+    const float union_area = (box_1_area + box_2_area - intersection);
+
+    return (intersection / union_area);
+}
+
+void NmsPostProcessOp::remove_overlapping_boxes(std::vector<DetectionBbox> &detections, std::vector<uint32_t> &classes_detections_count,
+    double iou_th)
+{
+    std::sort(detections.begin(), detections.end(),
+            [](DetectionBbox a, DetectionBbox b)
+            { return a.m_bbox.score > b.m_bbox.score; });
+
+    for (size_t i = 0; i < detections.size(); i++) {
+        if (detections[i].m_bbox.score == REMOVED_CLASS_SCORE) {
+            // Detection overlapped with a higher score detection
+            continue;
+        }
 
-        for (size_t i = 0; i < detections.size(); i++) {
-            if (detections[i].m_bbox.score == REMOVED_CLASS_SCORE) {
+        for (size_t j = i + 1; j < detections.size(); j++) {
+            if (detections[j].m_bbox.score == REMOVED_CLASS_SCORE) {
                 // Detection overlapped with a higher score detection
                 continue;
             }
 
-            for (size_t j = i + 1; j < detections.size(); j++) {
-                if (detections[j].m_bbox.score == REMOVED_CLASS_SCORE) {
-                    // Detection overlapped with a higher score detection
-                    continue;
-                }
-
-                if (detections[i].m_class_id == detections[j].m_class_id
-                        && (compute_iou(detections[i].m_bbox, detections[j].m_bbox) >= m_nms_config.nms_iou_th)) {
-                    // Remove detections[j] if the iou is higher then the threshold
-                    detections[j].m_bbox.score = REMOVED_CLASS_SCORE;
-                    assert(detections[i].m_class_id < classes_detections_count.size());
-                    assert(classes_detections_count[detections[j].m_class_id] > 0);
-                    classes_detections_count[detections[j].m_class_id]--;
-                }
+            if (detections[i].m_class_id == detections[j].m_class_id
+                    && (compute_iou(detections[i].m_bbox, detections[j].m_bbox) >= iou_th)) {
+                // Remove detections[j] if the iou is higher then the threshold
+                detections[j].m_bbox.score = REMOVED_CLASS_SCORE;
+                assert(detections[i].m_class_id < classes_detections_count.size());
+                assert(classes_detections_count[detections[j].m_class_id] > 0);
+                classes_detections_count[detections[j].m_class_id]--;
             }
         }
     }
+}
 
-    void NmsPostProcessOp::fill_nms_format_buffer(MemoryView &buffer, const std::vector<DetectionBbox> &detections,
-        std::vector<uint32_t> &classes_detections_count)
-    {
-        // Calculate the number of detections before each class, to help us later calculate the buffer_offset for it's detections.
-        std::vector<uint32_t> num_of_detections_before(m_nms_config.number_of_classes, 0);
-        uint32_t ignored_detections_count = 0;
-        for (size_t class_idx = 0; class_idx < m_nms_config.number_of_classes; class_idx++) {
-            if (classes_detections_count[class_idx] > m_nms_config.max_proposals_per_class) {
-                ignored_detections_count += (classes_detections_count[class_idx] - m_nms_config.max_proposals_per_class);
-                classes_detections_count[class_idx] = m_nms_config.max_proposals_per_class;
-            }
-
-            if (0 == class_idx) {
-                num_of_detections_before[class_idx] = 0;
-            }
-            else {
-                num_of_detections_before[class_idx] = num_of_detections_before[class_idx - 1] + classes_detections_count[class_idx - 1];
-            }
-
-            // Fill `bbox_count` value for class_idx in the result buffer
-            float32_t bbox_count_casted = static_cast<float32_t>(classes_detections_count[class_idx]);
-            auto buffer_offset = (class_idx * sizeof(bbox_count_casted)) + (num_of_detections_before[class_idx] * sizeof(hailo_bbox_float32_t));
-            memcpy((buffer.data() + buffer_offset), &bbox_count_casted, sizeof(bbox_count_casted));
+void NmsPostProcessOp::fill_nms_format_buffer(MemoryView &buffer, const std::vector<DetectionBbox> &detections,
+    std::vector<uint32_t> &classes_detections_count, const NmsPostProcessConfig &nms_config)
+{
+    // Calculate the number of detections before each class, to help us later calculate the buffer_offset for it's detections.
+    std::vector<uint32_t> num_of_detections_before(nms_config.number_of_classes, 0);
+    uint32_t ignored_detections_count = 0;
+    for (size_t class_idx = 0; class_idx < nms_config.number_of_classes; class_idx++) {
+        if (classes_detections_count[class_idx] > nms_config.max_proposals_per_class) {
+            ignored_detections_count += (classes_detections_count[class_idx] - nms_config.max_proposals_per_class);
+            classes_detections_count[class_idx] = nms_config.max_proposals_per_class;
         }
 
-        for (auto &detection : detections) {
-            if (REMOVED_CLASS_SCORE == detection.m_bbox.score) {
-                // Detection overlapped with a higher score detection and removed in remove_overlapping_boxes()
-                continue;
-            }
-            if (0 == classes_detections_count[detection.m_class_id]) {
-                // This class' detections count is higher then m_nms_config.max_proposals_per_class.
-                // This detection is ignored due to having lower score (detections vector is sorted by score).
-                continue;
-            }
+        if (0 == class_idx) {
+            num_of_detections_before[class_idx] = 0;
+        }
+        else {
+            num_of_detections_before[class_idx] = num_of_detections_before[class_idx - 1] + classes_detections_count[class_idx - 1];
+        }
 
-            auto buffer_offset = ((detection.m_class_id + 1) * sizeof(float32_t))
-                                    + (num_of_detections_before[detection.m_class_id] * sizeof(hailo_bbox_float32_t));
+        // Fill `bbox_count` value for class_idx in the result buffer
+        float32_t bbox_count_casted = static_cast<float32_t>(classes_detections_count[class_idx]);
+        auto buffer_offset = (class_idx * sizeof(bbox_count_casted)) + (num_of_detections_before[class_idx] * sizeof(hailo_bbox_float32_t));
+        memcpy((buffer.data() + buffer_offset), &bbox_count_casted, sizeof(bbox_count_casted));
+    }
 
-            assert((buffer_offset + sizeof(hailo_bbox_float32_t)) <= buffer.size());
-            memcpy((hailo_bbox_float32_t*)(buffer.data() + buffer_offset), &detection.m_bbox, sizeof(hailo_bbox_float32_t));
-            num_of_detections_before[detection.m_class_id]++;
-            classes_detections_count[detection.m_class_id]--;
+    for (auto &detection : detections) {
+        if (REMOVED_CLASS_SCORE == detection.m_bbox.score) {
+            // Detection overlapped with a higher score detection and removed in remove_overlapping_boxes()
+            continue;
         }
-
-        if (0 != ignored_detections_count) {
-            LOGGER__INFO("{} Detections were ignored, due to `max_bboxes_per_class` defined as {}.",
-                ignored_detections_count, m_nms_config.max_proposals_per_class);
+        if (0 == classes_detections_count[detection.m_class_id]) {
+            // This class' detections count is higher then m_nms_config.max_proposals_per_class.
+            // This detection is ignored due to having lower score (detections vector is sorted by score).
+            continue;
         }
+
+        auto buffer_offset = ((detection.m_class_id + 1) * sizeof(float32_t))
+                                + (num_of_detections_before[detection.m_class_id] * sizeof(hailo_bbox_float32_t));
+
+        assert((buffer_offset + sizeof(hailo_bbox_float32_t)) <= buffer.size());
+        memcpy((hailo_bbox_float32_t*)(buffer.data() + buffer_offset), &detection.m_bbox, sizeof(hailo_bbox_float32_t));
+        num_of_detections_before[detection.m_class_id]++;
+        classes_detections_count[detection.m_class_id]--;
     }
 
-    hailo_status NmsPostProcessOp::hailo_nms_format(std::vector<DetectionBbox> &&detections,
-        MemoryView dst_view, std::vector<uint32_t> &classes_detections_count)
-    {
-        remove_overlapping_boxes(detections, classes_detections_count);
-        fill_nms_format_buffer(dst_view, detections, classes_detections_count);
-        return HAILO_SUCCESS;
+    if (0 != ignored_detections_count) {
+        LOGGER__INFO("{} Detections were ignored, due to `max_bboxes_per_class` defined as {}.",
+            ignored_detections_count, nms_config.max_proposals_per_class);
     }
+}
+
+hailo_status NmsPostProcessOp::hailo_nms_format(std::vector<DetectionBbox> &&detections,
+    MemoryView dst_view, std::vector<uint32_t> &classes_detections_count)
+{
+    remove_overlapping_boxes(detections, classes_detections_count, m_nms_metadata->nms_config().nms_iou_th);
+    fill_nms_format_buffer(dst_view, detections, classes_detections_count, m_nms_metadata->nms_config());
+    return HAILO_SUCCESS;
+}
+
+hailo_format_t NmsOpMetadata::expand_output_format_autos_by_op_type(const hailo_format_t &output_format, OperationType type)
+{
+    auto format = output_format;
 
-    std::string NmsPostProcessOp::get_nms_config_description()
+    if (HAILO_FORMAT_ORDER_AUTO == format.order)
     {
-        auto config_info = fmt::format("Score threshold: {:.3f}, Iou threshold: {:.2f}, Classes: {}, Cross classes: {}", 
-                            m_nms_config.nms_score_th, m_nms_config.nms_iou_th, m_nms_config.number_of_classes, m_nms_config.cross_classes);
-        if (m_nms_config.background_removal) {
-            config_info += fmt::format(", Background removal index: {}", m_nms_config.background_removal_index);
+        if (OperationType::YOLOV5SEG == type) {
+            format.order = HAILO_FORMAT_ORDER_HAILO_NMS_WITH_BYTE_MASK;
+        } else {
+            format.order = HAILO_FORMAT_ORDER_HAILO_NMS;
         }
-        return config_info;
     }
+    if (HAILO_FORMAT_TYPE_AUTO == format.type)
+    {
+        format.type = HAILO_FORMAT_TYPE_FLOAT32;
+    }
+    return format;
+}
+
+Expected<hailo_vstream_info_t> NmsOpMetadata::get_output_vstream_info()
+{
+    CHECK_AS_EXPECTED((m_outputs_metadata.size() == 1), HAILO_INVALID_OPERATION, "{} has more than 1 output", m_name);
+
+    hailo_vstream_info_t vstream_info{};
+    strncpy(vstream_info.name, m_outputs_metadata.begin()->first.c_str(), m_outputs_metadata.begin()->first.length() + 1);
+    strncpy(vstream_info.network_name, m_network_name.c_str(), m_network_name.length() + 1);
+    vstream_info.direction = HAILO_D2H_STREAM;
+    vstream_info.format.order = m_outputs_metadata.begin()->second.format.order;
+    vstream_info.format.type = m_outputs_metadata.begin()->second.format.type;
+    vstream_info.format.flags = HAILO_FORMAT_FLAGS_NONE;
+
+    vstream_info.nms_shape.max_bboxes_per_class = nms_config().max_proposals_per_class;
+    vstream_info.nms_shape.number_of_classes = nms_config().number_of_classes;
+    if (nms_config().background_removal) {
+        vstream_info.nms_shape.number_of_classes--;
+    }
+
+    // In order to pass is_qp_valid check in pyhailort
+    vstream_info.quant_info.qp_scale = 1;
+
+    return vstream_info;
+}
+
+hailo_nms_info_t NmsOpMetadata::nms_info()
+{
+    hailo_nms_info_t nms_info = {
+        nms_config().number_of_classes,
+        nms_config().max_proposals_per_class,
+        sizeof(hailo_bbox_float32_t),
+        1, // input_division_factor
+        false,
+        hailo_nms_defuse_info_t(),
+        DEFAULT_NMS_NO_BURST_SIZE,
+        HAILO_BURST_TYPE_H8_BBOX
+    };
+    if (nms_config().background_removal) {
+        nms_info.number_of_classes--;
+    }
+
+    return nms_info;
+}
+
 }
 }
\ No newline at end of file
index e7c9d5960f10fb53a31bef805263b2c4d2e1ffe5..f74698190c406e5dcef893c4f2b07473aae3e793 100644 (file)
@@ -16,6 +16,7 @@
 #include "hailo/hailort.h"
 #include "hailo/quantization.hpp"
 #include "hailo/buffer.hpp"
+#include "hailo/transform.hpp"
 
 #include "common/utils.hpp"
 #include "common/logger_macros.hpp"
@@ -32,6 +33,14 @@ namespace net_flow
 #define INVALID_NMS_DETECTION (std::numeric_limits<uint32_t>::max())
 #define INVALID_NMS_SCORE (std::numeric_limits<float32_t>::max())
 
+inline bool operator==(const hailo_bbox_float32_t &first, const hailo_bbox_float32_t &second) {
+    return first.y_min == second.y_min && first.x_min == second.x_min && first.y_max == second.y_max && first.x_max == second.x_max && first.score == second.score;
+}
+
+inline bool operator==(const hailo_bbox_t &first, const hailo_bbox_t &second) {
+    return first.y_min == second.y_min && first.x_min == second.x_min && first.y_max == second.y_max && first.x_max == second.x_max && first.score == second.score;
+}
+
 struct DetectionBbox
 {
     DetectionBbox(float32_t x_min, float32_t y_min, float32_t width, float32_t height, float32_t score, uint32_t class_id)
@@ -40,6 +49,9 @@ struct DetectionBbox
     DetectionBbox(const hailo_bbox_float32_t &bbox, uint32_t class_id)
         : m_class_id(class_id), m_bbox(bbox) {}
 
+    DetectionBbox(const hailo_bbox_float32_t &bbox, uint32_t class_id, std::vector<float32_t> &&mask)
+        : m_class_id(class_id), m_bbox(bbox), m_mask(std::move(mask)) {}
+
     DetectionBbox() : DetectionBbox(hailo_bbox_float32_t{
         INVALID_BBOX_DIM,
         INVALID_BBOX_DIM,
@@ -48,17 +60,32 @@ struct DetectionBbox
         INVALID_BBOX_DIM
     }, INVALID_NMS_DETECTION) {}
 
+    inline uint32_t get_bbox_rounded_height(float32_t image_height) const
+    {
+        return static_cast<uint32_t>(std::round((m_bbox.y_max - m_bbox.y_min) * image_height));
+    }
+
+    inline uint32_t get_bbox_rounded_width(float32_t image_width) const
+    {
+        return static_cast<uint32_t>(std::round((m_bbox.x_max - m_bbox.x_min) * image_width));
+    }
+
     uint32_t m_class_id;
     hailo_bbox_float32_t m_bbox;
+    std::vector<float32_t> m_mask; // Used in segmentation networks, otherwise there is no mask.
 };
 
+inline bool operator==(const DetectionBbox &first, const DetectionBbox &second) {
+    return first.m_class_id == second.m_class_id && first.m_bbox == second.m_bbox;
+}
+
 struct NmsPostProcessConfig
 {
-    // User given confidence threshold for a bbox. A bbox will be consider as detection if the 
+    // User given confidence threshold for a bbox. A bbox will be consider as detection if the
     // (objectness * class_score) is higher then the confidence_threshold.
     double nms_score_th = 0;
 
-    // User given IOU threshold (intersection over union). This threshold is for performing
+    // User given IoU threshold (intersection over union). This threshold is for performing
     // Non-maximum suppression (Removing overlapping boxes).
     double nms_iou_th = 0;
 
@@ -74,47 +101,168 @@ struct NmsPostProcessConfig
     // Index of background class for background removal
     uint32_t background_removal_index = 0;
 
-    // Indicates whether or not NMS performs IOU over different classes for the same box.
+    // Indicates whether or not NMS performs IoU over different classes for the same box.
     // If set to false - NMS won't intersect different classes, and a box could have multiple labels.
     bool cross_classes = false;
 };
 
 static const float32_t REMOVED_CLASS_SCORE = 0.0f;
 
+class NmsOpMetadata : public OpMetadata
+{
+public:
+    static Expected<std::shared_ptr<OpMetadata>> create(const std::unordered_map<std::string, BufferMetaData> &inputs_metadata,
+                                                    const std::unordered_map<std::string, BufferMetaData> &outputs_metadata,
+                                                    const NmsPostProcessConfig &nms_post_process_config,
+                                                    const std::string &network_name,
+                                                    const OperationType type,
+                                                    const std::string &name);
+    virtual ~NmsOpMetadata() = default;
+    std::string get_nms_config_description();
+    hailo_status validate_format_info() override;
+    NmsPostProcessConfig &nms_config() { return m_nms_config;};
+    hailo_nms_info_t nms_info();
+    std::string get_op_description() override;
+    static hailo_format_t expand_output_format_autos_by_op_type(const hailo_format_t &output_format, OperationType type);
+
+    virtual Expected<hailo_vstream_info_t> get_output_vstream_info() override;
+
+protected:
+    NmsOpMetadata(const std::unordered_map<std::string, BufferMetaData> &inputs_metadata,
+                    const std::unordered_map<std::string, BufferMetaData> &outputs_metadata,
+                    const NmsPostProcessConfig &nms_post_process_config,
+                    const std::string &name,
+                    const std::string &network_name,
+                    const OperationType type)
+        : OpMetadata(inputs_metadata, outputs_metadata, name, network_name, type),
+            m_nms_config(nms_post_process_config)
+    {}
+
+    hailo_status validate_params() override;
+
+private:
+    NmsPostProcessConfig m_nms_config;
+};
+
 class NmsPostProcessOp : public Op
 {
 public:
     virtual ~NmsPostProcessOp() = default;
-    
+
     /**
-     * Computes the IOU ratio of @a box_1 and @a box_2 
+     * Computes the IOU ratio of @a box_1 and @a box_2
     */
     static float compute_iou(const hailo_bbox_float32_t &box_1, const hailo_bbox_float32_t &box_2);
 
+    std::shared_ptr<NmsOpMetadata> metadata() { return m_nms_metadata;}
+    virtual bool should_sigmoid()
+    {
+        return false;
+    };
+
+    virtual bool should_add_mask()
+    {
+        return false;
+    };
+
+    static float32_t sigmoid(float32_t number)
+    {
+        return (1.0f / (1.0f + std::exp(-number)));
+    }
+
+    /**
+     * Removes overlapping boxes in @a detections by setting the class confidence to zero.
+     *
+     * @param[in] detections            A vector of @a DetectionBbox containing the detections boxes after ::extract_detections() function.
+     *
+    */
+    static void remove_overlapping_boxes(std::vector<DetectionBbox> &detections,
+        std::vector<uint32_t> &classes_detections_count, double nms_iou_th);
+
+    template<typename DstType = float32_t, typename SrcType>
+    DstType dequantize_and_sigmoid(SrcType number, hailo_quant_info_t quant_info)
+    {
+        auto dequantized_val = Quantization::dequantize_output<DstType, SrcType>(number, quant_info);
+        if (should_sigmoid()) {
+            return sigmoid(dequantized_val);
+        } else {
+            return dequantized_val;
+        }
+    }
+
+    static inline void transform__parse_and_copy_bbox(hailo_bbox_t *dst, uint64_t* proposal)
+    {
+        dst->y_min = (uint16_t)((*((uint64_t*)proposal) & 0xfff000000000) >> 36);
+        dst->x_min = (uint16_t)((*((uint64_t*)proposal) & 0xfff000000) >> 24);
+        dst->y_max = (uint16_t)((*((uint64_t*)proposal) & 0xfff000) >> 12);
+        dst->x_max = (uint16_t)((*((uint64_t*)proposal) & 0xfff));
+        dst->score = (uint16_t)((*((uint64_t*)proposal) & 0xffff000000000000) >> 48);
+    }
+
+    static std::pair<std::vector<net_flow::DetectionBbox>, std::vector<uint32_t>>
+        transform__d2h_NMS_DETECTIONS(const uint8_t *src_ptr, const hailo_nms_info_t &nms_info)
+    {
+        /* Validate arguments */
+        assert(NULL != src_ptr);
+
+        std::vector<net_flow::DetectionBbox> detections;
+        std::vector<uint32_t> classes_detection_count(nms_info.number_of_classes, 0);
+        detections.reserve(nms_info.max_bboxes_per_class * nms_info.number_of_classes);
+
+        const uint32_t bbox_size = sizeof(hailo_bbox_float32_t);
+
+        float32_t class_bboxes_count = 0;
+
+        size_t current_offset = 0;
+        // Now, the merge itself
+        for (size_t class_index = 0; class_index < nms_info.number_of_classes ; class_index++) {
+            class_bboxes_count = *(reinterpret_cast<const float32_t*>(src_ptr + current_offset));
+            classes_detection_count[class_index] += (uint32_t)class_bboxes_count;
+            current_offset += sizeof(float32_t);
+            for (nms_bbox_counter_t bbox_count = 0; bbox_count < class_bboxes_count; bbox_count++) {
+                hailo_bbox_float32_t bbox = *(reinterpret_cast<const hailo_bbox_float32_t*>(src_ptr + current_offset));
+                DetectionBbox detection_bbox;
+                detection_bbox.m_class_id = (uint32_t)class_index;
+                detection_bbox.m_bbox = bbox;
+                detections.push_back(detection_bbox);
+                current_offset += bbox_size;
+            }
+        }
+        return std::make_pair(std::move(detections), std::move(classes_detection_count));
+    }
+
+    /*
+    * For each class the layout is
+    *       \code
+    *       struct (packed) {
+    *           uint16_t/float32_t bbox_count;
+    *           hailo_bbox_t/hailo_bbox_float32_t bbox[bbox_count];
+    *       };
+    *       \endcode
+    */
+    static void fill_nms_format_buffer(MemoryView &buffer, const std::vector<DetectionBbox> &detections,
+        std::vector<uint32_t> &classes_detections_count, const NmsPostProcessConfig &nms_config);
+
 protected:
-    NmsPostProcessOp(const std::map<std::string, BufferMetaData> &inputs_metadata,
-                     const std::map<std::string, BufferMetaData> &outputs_metadata,
-                     const NmsPostProcessConfig &nms_post_process_config,
-                     const std::string &name)
-        : Op(inputs_metadata, outputs_metadata, name)
-        , m_nms_config(nms_post_process_config)
+    NmsPostProcessOp(std::shared_ptr<NmsOpMetadata> metadata)
+        : Op(static_cast<PostProcessOpMetadataPtr>(metadata))
+        , m_nms_metadata(metadata)
     {}
 
-    NmsPostProcessConfig m_nms_config;
-
-    template<typename HostType = float32_t, typename DeviceType>
-    std::pair<uint32_t, float32_t> get_max_class(const DeviceType *data, uint32_t entry_idx, uint32_t classes_start_index,
+    template<typename DstType = float32_t, typename SrcType>
+    std::pair<uint32_t, float32_t> get_max_class(const SrcType *data, uint32_t entry_idx, uint32_t classes_start_index,
         float32_t objectness, hailo_quant_info_t quant_info, uint32_t width)
     {
+        auto const &nms_config = m_nms_metadata->nms_config();
         std::pair<uint32_t, float32_t> max_id_score_pair;
-        for (uint32_t class_index = 0; class_index < m_nms_config.number_of_classes; class_index++) {
+        for (uint32_t class_index = 0; class_index < nms_config.number_of_classes; class_index++) {
             auto class_id = class_index;
-            if (m_nms_config.background_removal) {
-                if (m_nms_config.background_removal_index == class_index) {
+            if (nms_config.background_removal) {
+                if (nms_config.background_removal_index == class_index) {
                     // Ignore if class_index is background_removal_index
                     continue;
                 }
-                else if (0 == m_nms_config.background_removal_index) {
+                else if (0 == nms_config.background_removal_index) {
                     // background_removal_index will always be the first or last index.
                     // If it is the first one we need to reduce all classes id's in 1.
                     // If it is the last one we just ignore it in the previous if case.
@@ -123,7 +271,7 @@ protected:
             }
 
             auto class_entry_idx = entry_idx + ((classes_start_index + class_index) * width);
-            auto class_confidence = Quantization::dequantize_output<HostType, DeviceType>(data[class_entry_idx], quant_info);
+            auto class_confidence = dequantize_and_sigmoid<DstType, SrcType>(data[class_entry_idx], quant_info);
             auto class_score = class_confidence * objectness;
             if (class_score > max_id_score_pair.second) {
                 max_id_score_pair.first = class_id;
@@ -133,32 +281,11 @@ protected:
         return max_id_score_pair;
     }
 
-    /**
-     * Removes overlapping boxes in @a detections by setting the class confidence to zero.
-     * 
-     * @param[in] detections            A vector of @a DetectionBbox containing the detections boxes after ::extract_detections() function.
-     * 
-    */
-    void remove_overlapping_boxes(std::vector<DetectionBbox> &detections, std::vector<uint32_t> &classes_detections_count);
-
-    /*
-    * For each class the layout is
-    *       \code
-    *       struct (packed) {
-    *           uint16_t/float32_t bbox_count;
-    *           hailo_bbox_t/hailo_bbox_float32_t bbox[bbox_count];
-    *       };
-    *       \endcode
-    */
-    void fill_nms_format_buffer(MemoryView &buffer, const std::vector<DetectionBbox> &detections,
-        std::vector<uint32_t> &classes_detections_count);
-
     hailo_status hailo_nms_format(std::vector<DetectionBbox> &&detections,
         MemoryView dst_view, std::vector<uint32_t> &classes_detections_count);
 
-    std::string get_nms_config_description();
-
-    hailo_status validate_metadata() override;
+private:
+    std::shared_ptr<NmsOpMetadata> m_nms_metadata;
 
 };
 
index cd8b3ae3abb7737bbf1a412f96f7c0cb1ecc9121..894c17b295cec24e95c8d9a812a652b16055fae2 100644 (file)
@@ -15,6 +15,7 @@
 
 #include "hailo/hailort.h"
 #include "hailo/buffer.hpp"
+#include "net_flow/ops/op_metadata.hpp"
 
 #include "common/utils.hpp"
 #include "common/logger_macros.hpp"
@@ -25,15 +26,6 @@ namespace hailort
 namespace net_flow
 {
 
-struct BufferMetaData
-{
-    hailo_3d_image_shape_t shape;
-    hailo_3d_image_shape_t padded_shape;
-    hailo_format_t format;
-    hailo_quant_info_t quant_info;
-};
-
-
 class Op
 {
 public:
@@ -44,52 +36,37 @@ public:
      *
      * @param[in] inputs                A map between input names to input buffers.
      * @param[in] outputs               A map between outputs names and their pre-allocated buffers.
-     * 
+     *
      * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
      *
      */
     virtual hailo_status execute(const std::map<std::string, MemoryView> &inputs, std::map<std::string, MemoryView> &outputs) = 0;
 
-    virtual hailo_status validate_metadata() = 0;
-
-    const std::map<std::string, BufferMetaData> &inputs_metadata() const
+    const std::unordered_map<std::string, BufferMetaData> &inputs_metadata() const
     {
-        return m_inputs_metadata;
+        return m_op_metadata->inputs_metadata();
     }
 
-    const std::map<std::string, BufferMetaData> &outputs_metadata() const
+    const std::unordered_map<std::string, BufferMetaData> &outputs_metadata() const
     {
-        return m_outputs_metadata;
-    }
-
-    void set_outputs_metadata(std::map<std::string, BufferMetaData> &outputs_metadata)
-    {
-        m_outputs_metadata = outputs_metadata;
-    }
-
-    void set_inputs_metadata(std::map<std::string, BufferMetaData> &inputs_metadata)
-    {
-        m_inputs_metadata = inputs_metadata;
+        return m_op_metadata->outputs_metadata();
     }
 
     std::string get_name() {
-        return m_name;
+        return m_op_metadata->get_name();
     }
 
-    virtual std::string get_op_description() = 0;
+    const PostProcessOpMetadataPtr &metadata() { return m_op_metadata;}
+
 
 protected:
-    Op(const std::map<std::string, BufferMetaData> &inputs_metadata,
-       const std::map<std::string, BufferMetaData> &outputs_metadata,
-       const std::string &name)
-        : m_inputs_metadata(inputs_metadata)
-        , m_outputs_metadata(outputs_metadata)
-        , m_name(name)
+
+    Op(PostProcessOpMetadataPtr op_metadata)
+        : m_op_metadata(op_metadata)
     {}
 
-    std::map<std::string, BufferMetaData> m_inputs_metadata;
-    std::map<std::string, BufferMetaData> m_outputs_metadata;
-    const std::string m_name;
+    PostProcessOpMetadataPtr m_op_metadata;
+
 };
 
 }
diff --git a/hailort/libhailort/src/net_flow/ops/op_metadata.hpp b/hailort/libhailort/src/net_flow/ops/op_metadata.hpp
new file mode 100644 (file)
index 0000000..803d883
--- /dev/null
@@ -0,0 +1,117 @@
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file op_metadata.cpp
+ * @brief ops metadata's
+ *
+ **/
+
+#ifndef _HAILO_OP_META_DATA_HPP_
+#define _HAILO_OP_META_DATA_HPP_
+
+#include "hailo/hailort.h"
+#include <set>
+
+namespace hailort
+{
+namespace net_flow
+{
+
+struct BufferMetaData
+{
+    hailo_3d_image_shape_t shape;
+    hailo_3d_image_shape_t padded_shape;
+    hailo_format_t format;
+    hailo_quant_info_t quant_info;
+};
+
+enum class OperationType {
+    YOLOX,
+    YOLOV5,
+    YOLOV5SEG,
+    SSD,
+    SOFTMAX,
+    ARGMAX,
+    IOU
+};
+
+class OpMetadata
+{
+public:
+    virtual ~OpMetadata() = default;
+    const std::unordered_map<std::string, BufferMetaData> &inputs_metadata() { return m_inputs_metadata;};
+    const std::unordered_map<std::string, BufferMetaData> &outputs_metadata() { return m_outputs_metadata;};
+    std::string get_name() { return m_name;};
+    OperationType type() { return m_type;};
+    virtual std::string get_op_description() = 0;
+    virtual hailo_status validate_format_info() = 0;
+
+    void set_outputs_metadata(std::unordered_map<std::string, BufferMetaData> &outputs_metadata)
+    {
+        m_outputs_metadata = outputs_metadata;
+    }
+
+    void set_inputs_metadata(std::unordered_map<std::string, BufferMetaData> &inputs_metadata)
+    {
+        m_inputs_metadata = inputs_metadata;
+    }
+
+    virtual Expected<hailo_vstream_info_t> get_output_vstream_info() = 0;
+
+    const std::set<std::string> get_input_names()
+    {
+        std::set<std::string> names;
+        for (const auto &pair : m_inputs_metadata) {
+            names.insert(pair.first);
+        }
+        return names;
+    }
+
+    static std::string get_operation_type_str(const OperationType &type)
+    {
+        switch (type) {
+        case OperationType::YOLOX:
+            return "YOLOX";
+        case OperationType::YOLOV5:
+            return "YOLOV5";
+        case OperationType::YOLOV5SEG:
+            return "YOLOV5SEG";
+        case OperationType::SSD:
+            return "SSD";
+        case OperationType::SOFTMAX:
+            return "SOFTMAX";
+        case OperationType::ARGMAX:
+            return "ARGMAX";
+        case OperationType::IOU:
+            return "IOU";
+        default:
+            return "Nan";
+        }
+    }
+
+protected:
+    // TODO - move inputs/outputs_metadata to the op itself, since they depend on the vstream_params (HRT-11426)
+    std::unordered_map<std::string, BufferMetaData> m_inputs_metadata;
+    std::unordered_map<std::string, BufferMetaData> m_outputs_metadata;
+    const std::string m_name;
+    const std::string m_network_name;
+    OperationType m_type;
+
+    OpMetadata(const std::unordered_map<std::string, BufferMetaData> &inputs_metadata,
+               const std::unordered_map<std::string, BufferMetaData> &outputs_metadata,
+               const std::string &name, const std::string &network_name, const OperationType type) :
+                m_inputs_metadata(inputs_metadata), m_outputs_metadata(outputs_metadata),
+                m_name(name), m_network_name(network_name), m_type(type)
+    {}
+
+    virtual hailo_status validate_params() = 0;
+};
+
+using PostProcessOpMetadataPtr = std::shared_ptr<OpMetadata>;
+
+}
+}
+
+#endif
\ No newline at end of file
index 97fb1e30a07533fa4b220059fffd5df8a95b8d1e..eb6132f913ee6e906fafb5c9e222cba9befdc27f 100644 (file)
@@ -10,6 +10,8 @@
 #include "softmax_post_process.hpp"
 #include "hailo/hailort.h"
 #include "hailo/hailort_common.hpp"
+#include "hailo/hailort_defaults.hpp"
+
 #include "common/utils.hpp"
 
 #include <limits>
@@ -19,7 +21,7 @@ namespace hailort
 namespace net_flow
 {
 
-// This function is for when trying to perform softmax op for unsupported formats 
+// This function is for when trying to perform softmax op for unsupported formats
 hailo_status SoftmaxPostProcessOp::execute_not_supported(const BufferMetaData &input_metadata, const BufferMetaData &output_metadata,
     const std::map<std::string, MemoryView> &inputs, std::map<std::string, MemoryView> &outputs)
     {
@@ -82,7 +84,7 @@ SoftmaxFunction SoftmaxPostProcessOp::m_softmax_function_array[SOFTMAX_NUM_OF_PO
         {
             // NC x UINT8
             // We don't support input_format_type to be UINT8
-            SoftmaxPostProcessOp::execute_not_supported, 
+            SoftmaxPostProcessOp::execute_not_supported,
             SoftmaxPostProcessOp::execute_not_supported,
             SoftmaxPostProcessOp::execute_not_supported,
             SoftmaxPostProcessOp::execute_not_supported,
@@ -110,8 +112,10 @@ hailo_status SoftmaxPostProcessOp::execute(const std::map<std::string, MemoryVie
 {
     auto &input_name = inputs.begin()->first;
     auto &output_name = outputs.begin()->first;
-    auto &input_metadata = m_inputs_metadata[input_name];
-    auto &output_metadata = m_outputs_metadata[output_name];
+    assert(contains(m_op_metadata->inputs_metadata(), input_name));
+    auto &input_metadata = m_op_metadata->inputs_metadata().at(input_name);
+    assert(contains(m_op_metadata->outputs_metadata(), output_name));
+    auto &output_metadata = m_op_metadata->outputs_metadata().at(output_name);
 
     uint8_t format_index = UINT8_MAX;
     switch (input_metadata.format.order) {
@@ -129,13 +133,25 @@ hailo_status SoftmaxPostProcessOp::execute(const std::map<std::string, MemoryVie
     return SoftmaxPostProcessOp::m_softmax_function_array[format_index][input_metadata.format.type][output_metadata.format.type](input_metadata, output_metadata, inputs, outputs);
 }
 
-std::string SoftmaxPostProcessOp::get_op_description()
+Expected<std::shared_ptr<OpMetadata>> SoftmaxOpMetadata::create(const std::unordered_map<std::string, BufferMetaData> &inputs_metadata,
+    const std::unordered_map<std::string, BufferMetaData> &outputs_metadata, const std::string &network_name)
+{
+    auto op_metadata = std::shared_ptr<SoftmaxOpMetadata>(new (std::nothrow) SoftmaxOpMetadata(inputs_metadata, outputs_metadata, network_name));
+    CHECK_AS_EXPECTED(op_metadata != nullptr, HAILO_OUT_OF_HOST_MEMORY);
+
+    auto status = op_metadata->validate_params();
+    CHECK_SUCCESS_AS_EXPECTED(status);
+
+    return std::shared_ptr<OpMetadata>(std::move(op_metadata));
+}
+
+std::string SoftmaxOpMetadata::get_op_description()
 {
-    auto config_info = fmt::format("SoftmaxPostProcess Op, Name: {}", m_name);
+    auto config_info = fmt::format("{} Op, Name: {}", OpMetadata::get_operation_type_str(m_type), m_name);
     return config_info;
 }
 
-hailo_status SoftmaxPostProcessOp::validate_metadata()
+hailo_status SoftmaxOpMetadata::validate_params()
 {
     assert(m_inputs_metadata.size() == hailort::net_flow::SOFTMAX_NUMBER_OF_SRCS);
     assert(m_outputs_metadata.size() == hailort::net_flow::SOFTMAX_NUMBER_OF_DSTS);
@@ -143,6 +159,23 @@ hailo_status SoftmaxPostProcessOp::validate_metadata()
     auto &input_metadata = m_inputs_metadata.begin()->second;
     auto &output_metadata = m_outputs_metadata.begin()->second;
 
+    CHECK(input_metadata.shape.features == output_metadata.shape.features, HAILO_INVALID_OPERATION,
+        "Softmax op is supported only when input num of features ({}) is equal to output num of features ({})",
+        input_metadata.shape.features, output_metadata.shape.features);
+    CHECK(input_metadata.shape.height == output_metadata.shape.height, HAILO_INVALID_OPERATION,
+        "Softmax op is supported only when input height ({}) is equal to output height ({})",
+        input_metadata.shape.height, output_metadata.shape.height);
+    CHECK(input_metadata.shape.width == output_metadata.shape.width, HAILO_INVALID_OPERATION,
+        "Softmax op is supported only when input width ({}) is equal to output width ({})",
+        input_metadata.shape.width, output_metadata.shape.width);
+    return HAILO_SUCCESS;
+}
+
+hailo_status SoftmaxOpMetadata::validate_format_info()
+{
+    auto &input_metadata = m_inputs_metadata.begin()->second;
+    auto &output_metadata = m_outputs_metadata.begin()->second;
+
     CHECK(
         ((input_metadata.format.flags & HAILO_FORMAT_FLAGS_QUANTIZED) == 0) && ((output_metadata.format.flags & HAILO_FORMAT_FLAGS_QUANTIZED) == 0),
         HAILO_INVALID_OPERATION, "Softmax op is supported only on dequantized data");
@@ -150,25 +183,16 @@ hailo_status SoftmaxPostProcessOp::validate_metadata()
     CHECK(
         ((input_metadata.format.order == HAILO_FORMAT_ORDER_NHWC) &&  (output_metadata.format.order == HAILO_FORMAT_ORDER_NHWC)) ||
         ((input_metadata.format.order == HAILO_FORMAT_ORDER_NC) && (output_metadata.format.order == HAILO_FORMAT_ORDER_NC)),
-        HAILO_INVALID_OPERATION, "Softmax op is not supported for src format order ({}) and dst format order ({})",
+        HAILO_INVALID_OPERATION, "Softmax op is not supported for input format order ({}) and output format order ({})",
         HailoRTCommon::get_format_order_str(input_metadata.format.order),
         HailoRTCommon::get_format_order_str(output_metadata.format.order));
 
-    CHECK(input_metadata.shape.features == output_metadata.shape.features, HAILO_INVALID_OPERATION,
-        "Softmax op is supported only when src num of features ({}) is equal to dst num of features ({})",
-        input_metadata.shape.features, output_metadata.shape.features);
-    CHECK(input_metadata.shape.height == output_metadata.shape.height, HAILO_INVALID_OPERATION,
-        "Softmax op is supported only when src height ({}) is equal to dst height ({})",
-        input_metadata.shape.height, output_metadata.shape.height);
-    CHECK(input_metadata.shape.width == output_metadata.shape.width, HAILO_INVALID_OPERATION,
-        "Softmax op is supported only when src width ({}) is equal to dst width ({})",
-        input_metadata.shape.width, output_metadata.shape.width);
     CHECK(input_metadata.format.type == HAILO_FORMAT_TYPE_FLOAT32,
-        HAILO_INVALID_OPERATION, "Src format type {} is not valid. Must be {}",
+        HAILO_INVALID_OPERATION, "The given input format type {} is not supported, should be {}",
         HailoRTCommon::get_format_type_str(input_metadata.format.type),
         HailoRTCommon::get_format_type_str(HAILO_FORMAT_TYPE_FLOAT32));
     CHECK(output_metadata.format.type == HAILO_FORMAT_TYPE_FLOAT32,
-        HAILO_INVALID_OPERATION, "Dst format type {} is not valid. Must be {}",
+        HAILO_INVALID_OPERATION, "The given output format type {} is not valid, should be {}",
         HailoRTCommon::get_format_type_str(output_metadata.format.type),
         HailoRTCommon::get_format_type_str(HAILO_FORMAT_TYPE_FLOAT32));
     CHECK(!(HAILO_FORMAT_FLAGS_HOST_ARGMAX & output_metadata.format.flags), HAILO_INVALID_ARGUMENT, "Output {} is marked as argmax, which is not supported for this model.",
@@ -179,10 +203,47 @@ hailo_status SoftmaxPostProcessOp::validate_metadata()
     return HAILO_SUCCESS;
 }
 
-Expected<std::shared_ptr<Op>> SoftmaxPostProcessOp::create(const std::map<std::string, BufferMetaData> &inputs_metadata,
-    std::map<std::string, BufferMetaData> &outputs_metadata)
+hailo_format_t SoftmaxOpMetadata::expand_output_format_autos(const hailo_format_t &output_format, const hailo_format_t &input_format)
+{
+    auto format = output_format;
+
+    // Type should be float32, after de-quantization, and order NHWC or NC in softmax
+    if (format.type == HAILO_FORMAT_TYPE_AUTO) {
+        format.type = HAILO_FORMAT_TYPE_FLOAT32;
+    }
+    if (format.order == HAILO_FORMAT_ORDER_AUTO) {
+        format.order = HailoRTDefaults::get_default_host_format_order(input_format);
+    }
+    return format;
+}
+
+Expected<hailo_vstream_info_t> SoftmaxOpMetadata::get_output_vstream_info()
+{
+    CHECK_AS_EXPECTED((m_outputs_metadata.size() == 1), HAILO_INVALID_OPERATION, "{} has more than 1 output", m_name);
+
+    hailo_vstream_info_t vstream_info{};
+    strncpy(vstream_info.name, m_outputs_metadata.begin()->first.c_str(), m_outputs_metadata.begin()->first.length() + 1);
+    strncpy(vstream_info.network_name, m_network_name.c_str(), m_network_name.length() + 1);
+    vstream_info.direction = HAILO_D2H_STREAM;
+    vstream_info.format.order = m_outputs_metadata.begin()->second.format.order;
+    vstream_info.format.type = m_outputs_metadata.begin()->second.format.type;
+    vstream_info.format.flags = HAILO_FORMAT_FLAGS_NONE;
+
+    assert(m_inputs_metadata.size() == 1);
+    vstream_info.format = SoftmaxOpMetadata::expand_output_format_autos(vstream_info.format, m_inputs_metadata.begin()->second.format);
+    vstream_info.shape = m_outputs_metadata.begin()->second.shape;
+
+    vstream_info.quant_info = m_inputs_metadata.begin()->second.quant_info;
+
+    return vstream_info;
+}
+
+Expected<std::shared_ptr<Op>> SoftmaxPostProcessOp::create(std::shared_ptr<SoftmaxOpMetadata> metadata)
 {
-    auto op = std::shared_ptr<SoftmaxPostProcessOp>(new (std::nothrow) SoftmaxPostProcessOp(inputs_metadata, outputs_metadata));
+    auto status = metadata->validate_format_info();
+    CHECK_SUCCESS_AS_EXPECTED(status);
+
+    auto op = std::shared_ptr<SoftmaxPostProcessOp>(new (std::nothrow) SoftmaxPostProcessOp(metadata));
     CHECK_AS_EXPECTED(op != nullptr, HAILO_OUT_OF_HOST_MEMORY);
 
     return std::shared_ptr<Op>(std::move(op));
index 9ebdb8a2350b6c75c14fb8ac02fb7cec74909bc2..48b26ff32b735c316fbf5a561b3dd66e96d1493b 100644 (file)
@@ -6,7 +6,7 @@
  * @file softmax_post_process.hpp
  * @brief: Softmax op perform softmax op as described: https://www.tensorflow.org/api_docs/python/tf/nn/softmax
  * A few notes:
- *  - Support only on features axis 
+ *  - Support only on features axis
  *  - Support only on NHWC and NC input data order
  **/
 
@@ -15,6 +15,8 @@
 
 #include "hailo/hailort.h"
 #include "net_flow/ops/op.hpp"
+#include "net_flow/ops/op_metadata.hpp"
+
 #include "common/utils.hpp"
 #include "hailo/quantization.hpp"
 
@@ -34,13 +36,34 @@ constexpr std::size_t SOFTMAX_NUMBER_OF_DSTS {1};
 typedef hailo_status (*SoftmaxFunction)(const BufferMetaData &input_metadata, const BufferMetaData &output_metadata,
     const std::map<std::string, MemoryView> &inputs, std::map<std::string, MemoryView> &outputs);
 
+class SoftmaxOpMetadata : public OpMetadata
+{
+public:
+    static Expected<std::shared_ptr<OpMetadata>> create(const std::unordered_map<std::string, BufferMetaData> &inputs_metadata,
+                                                        const std::unordered_map<std::string, BufferMetaData> &outputs_metadata,
+                                                        const std::string &network_name);
+    std::string get_op_description() override;
+    hailo_status validate_format_info() override;
+    static hailo_format_t expand_output_format_autos(const hailo_format_t &output_format, const hailo_format_t &input_format);
+
+    virtual Expected<hailo_vstream_info_t> get_output_vstream_info() override;
+
+private:
+    SoftmaxOpMetadata(const std::unordered_map<std::string, BufferMetaData> &inputs_metadata,
+                        const std::unordered_map<std::string, BufferMetaData> &outputs_metadata,
+                        const std::string &network_name)
+        : OpMetadata(inputs_metadata, outputs_metadata, "Softmax-Post-Process", network_name, OperationType::SOFTMAX)
+    {}
+
+    hailo_status validate_params() override;
+};
+
 class SoftmaxPostProcessOp : public Op
 {
 
 private:
-    SoftmaxPostProcessOp(const std::map<std::string, BufferMetaData> &inputs_metadata,
-                         const std::map<std::string, BufferMetaData> &outputs_metadata)
-        : Op(inputs_metadata, outputs_metadata, "Softmax-Post-Process")
+    SoftmaxPostProcessOp(std::shared_ptr<SoftmaxOpMetadata> metadata)
+        : Op(static_cast<std::shared_ptr<OpMetadata>>(metadata))
     {}
 
     template<typename src_type = float32_t, typename dst_type = float32_t>
@@ -58,7 +81,7 @@ private:
             dst_type *src_row = src_ptr + (r * src_row_size);
             src_type *dst_row = dst_ptr + (r * dst_row_size);
             for (uint32_t w = 0; w < input_metadata.shape.width; w++) { // W axis - coloums
-                dst_type *src_col = src_row + (w * src_width_size); 
+                dst_type *src_col = src_row + (w * src_width_size);
                 src_type *dst_col = dst_row + (w * dst_width_size);
                 // In order to avoid overflows, we will perform the following:
                 // For each HW, we will find the maximal c value and then we will substract this value from
@@ -113,17 +136,14 @@ private:
         }
         return HAILO_SUCCESS;
     }
-    
+
     static hailo_status execute_not_supported(const BufferMetaData &input_metadata, const BufferMetaData &output_metadata,
         const std::map<std::string, MemoryView> &inputs, std::map<std::string, MemoryView> &outputs);
 
     public:
-        static Expected<std::shared_ptr<Op>> create(const std::map<std::string, BufferMetaData> &inputs_metadata,
-                                                    std::map<std::string, BufferMetaData> &outputs_metadata);
+        static Expected<std::shared_ptr<Op>> create(std::shared_ptr<SoftmaxOpMetadata> metadata);
         virtual hailo_status execute(const std::map<std::string, MemoryView> &inputs,
             std::map<std::string, MemoryView> &outputs) override;
-        virtual std::string get_op_description() override;
-        hailo_status validate_metadata() override;
 
         // A 3D array of softmax functions to call:
         // 1st dim represent the data format order (NHWC and NC are supported)
index 4504630148cf9f34b3aa31b7536033a1d0bfe982..e56d3b77ad02c4c065e9ab2b2064fccdb078b3c1 100644 (file)
@@ -16,57 +16,93 @@ namespace hailort
 namespace net_flow
 {
 
-hailo_status SSDPostProcessOp::validate_metadata()
+Expected<std::shared_ptr<OpMetadata>> SSDOpMetadata::create(const std::unordered_map<std::string, BufferMetaData> &inputs_metadata,
+                                                            const std::unordered_map<std::string, BufferMetaData> &outputs_metadata,
+                                                            const NmsPostProcessConfig &nms_post_process_config,
+                                                            const SSDPostProcessConfig &ssd_post_process_config,
+                                                            const std::string &network_name)
 {
-    auto status = NmsPostProcessOp::validate_metadata();
-    if (HAILO_SUCCESS != status) {
-        return status;
-    }
+    auto op_metadata = std::shared_ptr<SSDOpMetadata>(new (std::nothrow) SSDOpMetadata(inputs_metadata, outputs_metadata, nms_post_process_config,
+        ssd_post_process_config, network_name));
+    CHECK_AS_EXPECTED(op_metadata != nullptr, HAILO_OUT_OF_HOST_MEMORY);
 
-    return HAILO_SUCCESS;
+    auto status = op_metadata->validate_params();
+    CHECK_SUCCESS_AS_EXPECTED(status);
+
+    return std::shared_ptr<OpMetadata>(std::move(op_metadata));
 }
 
-Expected<std::shared_ptr<Op>> SSDPostProcessOp::create(const std::map<std::string, BufferMetaData> &inputs_metadata,
-                                                       const std::map<std::string, BufferMetaData> &outputs_metadata,
-                                                       const NmsPostProcessConfig &nms_post_process_config,
-                                                       const SSDPostProcessConfig &ssd_post_process_config)
+hailo_status SSDOpMetadata::validate_params()
 {
+    auto status = NmsOpMetadata::validate_params();
+    if (HAILO_SUCCESS != status) {
+        return status;
+    }
+
     // Validate each anchor is mapped by reg and cls inputs
-    for (const auto &reg_to_cls_name : ssd_post_process_config.reg_to_cls_inputs) {
-        CHECK_AS_EXPECTED(ssd_post_process_config.anchors.count(reg_to_cls_name.first), HAILO_INVALID_ARGUMENT,
+    for (const auto &reg_to_cls_name : m_ssd_config.reg_to_cls_inputs) {
+        CHECK(m_ssd_config.anchors.count(reg_to_cls_name.first), HAILO_INVALID_ARGUMENT,
             "SSDPostProcessOp: anchors does not contain reg layer {}", reg_to_cls_name.first);
-        CHECK_AS_EXPECTED(ssd_post_process_config.anchors.count(reg_to_cls_name.second), HAILO_INVALID_ARGUMENT,
+        CHECK(m_ssd_config.anchors.count(reg_to_cls_name.second), HAILO_INVALID_ARGUMENT,
             "SSDPostProcessOp: anchors does not contain cls layer {}", reg_to_cls_name.second);
-        const auto &reg_anchors = ssd_post_process_config.anchors.at(reg_to_cls_name.first);
-        const auto &cls_anchors = ssd_post_process_config.anchors.at(reg_to_cls_name.second);
-        CHECK_AS_EXPECTED(reg_anchors.size() == cls_anchors.size(), HAILO_INVALID_ARGUMENT,
+        assert(contains(m_ssd_config.anchors, reg_to_cls_name.first));
+        const auto &reg_anchors = m_ssd_config.anchors.at(reg_to_cls_name.first);
+        assert(contains(m_ssd_config.anchors, reg_to_cls_name.second));
+        const auto &cls_anchors = m_ssd_config.anchors.at(reg_to_cls_name.second);
+        CHECK(reg_anchors.size() == cls_anchors.size(), HAILO_INVALID_ARGUMENT,
             "SSDPostProcessOp: reg and cls layers have different number of anchors. reg: #{}, cls: #{}",
                 reg_anchors.size(), cls_anchors.size());
         for (size_t i = 0; i < reg_anchors.size(); ++i) {
             auto reg_anchor = reg_anchors[i];
             auto cls_anchor = cls_anchors[i];
-            CHECK_AS_EXPECTED(reg_anchor == cls_anchor, HAILO_INVALID_ARGUMENT,
-                "SSDPostProcessOp: reg and cls layers have differenet anchors. reg: {}, cls: {}",
+            CHECK(reg_anchor == cls_anchor, HAILO_INVALID_ARGUMENT,
+                "SSDPostProcessOp: reg and cls layers have different anchors. reg: {}, cls: {}",
                     reg_anchor, cls_anchor);
         }
     }
 
     // Validate regs and clss pairs have same shapes
-    for (const auto &reg_to_cls_name : ssd_post_process_config.reg_to_cls_inputs) {
-        CHECK_AS_EXPECTED(inputs_metadata.count(reg_to_cls_name.first), HAILO_INVALID_ARGUMENT,
+    for (const auto &reg_to_cls_name : m_ssd_config.reg_to_cls_inputs) {
+        CHECK(m_inputs_metadata.count(reg_to_cls_name.first), HAILO_INVALID_ARGUMENT,
             "SSDPostProcessOp: inputs_metadata does not contain reg layer {}", reg_to_cls_name.first);
-        CHECK_AS_EXPECTED(inputs_metadata.count(reg_to_cls_name.second), HAILO_INVALID_ARGUMENT,
+        CHECK(m_inputs_metadata.count(reg_to_cls_name.second), HAILO_INVALID_ARGUMENT,
             "SSDPostProcessOp: inputs_metadata does not contain cls layer {}", reg_to_cls_name.second);
-        const auto &reg_input_metadata = inputs_metadata.at(reg_to_cls_name.first);
-        const auto &cls_input_metadata = inputs_metadata.at(reg_to_cls_name.second);
+        assert(contains(m_inputs_metadata, reg_to_cls_name.first));
+        const auto &reg_input_metadata = m_inputs_metadata.at(reg_to_cls_name.first);
+        assert(contains(m_inputs_metadata, reg_to_cls_name.second));
+        const auto &cls_input_metadata = m_inputs_metadata.at(reg_to_cls_name.second);
         // NOTE: padded shape might be different because features might be different,
         // and padding is added when width*features % 8 != 0
-        CHECK_AS_EXPECTED((reg_input_metadata.shape.height == cls_input_metadata.shape.height)
+        CHECK((reg_input_metadata.shape.height == cls_input_metadata.shape.height)
             && (reg_input_metadata.shape.width == cls_input_metadata.shape.width),
             HAILO_INVALID_ARGUMENT, "SSDPostProcessOp: reg input {} has different shape than cls input {}",
                 reg_to_cls_name.first, reg_to_cls_name.second);
     }
-    auto op = std::shared_ptr<SSDPostProcessOp>(new (std::nothrow) SSDPostProcessOp(inputs_metadata, outputs_metadata, nms_post_process_config, ssd_post_process_config));
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status SSDOpMetadata::validate_format_info()
+{
+    return NmsOpMetadata::validate_format_info();
+}
+
+std::string SSDOpMetadata::get_op_description()
+{
+    auto nms_config_info = get_nms_config_description();
+    auto config_info = fmt::format("Op {}, Name: {}, {}, Image height: {:.2f}, Image width: {:.2f}, Centers scales factor: {}, "
+        "Bbox dimension scale factor: {}, Normalize boxes: {}", OpMetadata::get_operation_type_str(m_type), m_name, nms_config_info,
+        m_ssd_config.image_height, m_ssd_config.image_width, m_ssd_config.centers_scale_factor, m_ssd_config.bbox_dimensions_scale_factor,
+        m_ssd_config.normalize_boxes);
+    return config_info;
+}
+
+Expected<std::shared_ptr<Op>> SSDPostProcessOp::create(std::shared_ptr<SSDOpMetadata> metadata)
+{
+    auto status = metadata->validate_format_info();
+    CHECK_SUCCESS_AS_EXPECTED(status);
+
+    auto op = std::shared_ptr<SSDPostProcessOp>(new (std::nothrow) SSDPostProcessOp(metadata));
     CHECK_AS_EXPECTED(op != nullptr, HAILO_OUT_OF_HOST_MEMORY);
 
     return std::shared_ptr<Op>(std::move(op));
@@ -74,16 +110,16 @@ Expected<std::shared_ptr<Op>> SSDPostProcessOp::create(const std::map<std::strin
 
 hailo_status SSDPostProcessOp::execute(const std::map<std::string, MemoryView> &inputs, std::map<std::string, MemoryView> &outputs)
 {
-    CHECK(inputs.size() == m_ssd_config.anchors.size(), HAILO_INVALID_ARGUMENT,
+    CHECK(inputs.size() == m_metadata->ssd_config().anchors.size(), HAILO_INVALID_ARGUMENT,
         "Anchors vector count must be equal to data vector count. Anchors size is {}, data size is {}",
-            m_ssd_config.anchors.size(), inputs.size());
+            m_metadata->ssd_config().anchors.size(), inputs.size());
 
     std::vector<DetectionBbox> detections;
-    std::vector<uint32_t> classes_detections_count(m_nms_config.number_of_classes, 0);
-    detections.reserve(m_nms_config.max_proposals_per_class * m_nms_config.number_of_classes);
-    for (const auto &reg_to_cls : m_ssd_config.reg_to_cls_inputs) {
-        assert(inputs.count(reg_to_cls.first));
-        assert(inputs.count(reg_to_cls.second));
+    std::vector<uint32_t> classes_detections_count(m_metadata->nms_config().number_of_classes, 0);
+    detections.reserve(m_metadata->nms_config().max_proposals_per_class * m_metadata->nms_config().number_of_classes);
+    for (const auto &reg_to_cls : m_metadata->ssd_config().reg_to_cls_inputs) {
+        assert(contains(inputs, reg_to_cls.first));
+        assert(contains(inputs, reg_to_cls.second));
         auto status = extract_detections(reg_to_cls.first, reg_to_cls.second,
             inputs.at(reg_to_cls.first), inputs.at(reg_to_cls.second),
             detections, classes_detections_count);
@@ -98,14 +134,20 @@ hailo_status SSDPostProcessOp::extract_detections(const std::string &reg_input_n
     const MemoryView &reg_buffer, const MemoryView &cls_buffer,
     std::vector<DetectionBbox> &detections, std::vector<uint32_t> &classes_detections_count)
 {
-    const auto &reg_shape = m_inputs_metadata[reg_input_name].shape;
-    const auto &reg_padded_shape = m_inputs_metadata[reg_input_name].padded_shape;
-    const auto &cls_padded_shape = m_inputs_metadata[cls_input_name].padded_shape;
+    const auto &inputs_metadata = m_metadata->inputs_metadata();
+    const auto &ssd_config = m_metadata->ssd_config();
+    const auto &nms_config = m_metadata->nms_config();
 
-    const uint32_t X_INDEX = m_ssd_config.tx_index;
-    const uint32_t Y_INDEX = m_ssd_config.ty_index;
-    const uint32_t W_INDEX = m_ssd_config.tw_index;
-    const uint32_t H_INDEX = m_ssd_config.th_index;
+    assert(contains(inputs_metadata, reg_input_name));
+    assert(contains(inputs_metadata, cls_input_name));
+    const auto &reg_shape = inputs_metadata.at(reg_input_name).shape;
+    const auto &reg_padded_shape = inputs_metadata.at(reg_input_name).padded_shape;
+    const auto &cls_padded_shape = inputs_metadata.at(cls_input_name).padded_shape;
+
+    const uint32_t X_INDEX = ssd_config.tx_index;
+    const uint32_t Y_INDEX = ssd_config.ty_index;
+    const uint32_t W_INDEX = ssd_config.tw_index;
+    const uint32_t H_INDEX = ssd_config.th_index;
 
     const uint32_t X_OFFSET = X_INDEX * reg_padded_shape.width;
     const uint32_t Y_OFFSET = Y_INDEX * reg_padded_shape.width;
@@ -114,13 +156,13 @@ hailo_status SSDPostProcessOp::extract_detections(const std::string &reg_input_n
 
     // Each layer anchors vector is structured as {w,h} pairs.
     // For example, if we have a vector of size 6 (default SSD vector) then we have 3 anchors for this layer.
-    assert(m_ssd_config.anchors.count(reg_input_name));
-    assert(m_ssd_config.anchors.count(cls_input_name));
-    const auto &layer_anchors = m_ssd_config.anchors[reg_input_name];
+    assert(contains(ssd_config.anchors, reg_input_name));
+    assert(contains(ssd_config.anchors, cls_input_name));
+    const auto &layer_anchors = ssd_config.anchors.at(reg_input_name);
     assert(layer_anchors.size() % 2 == 0);
     const size_t num_of_anchors = (layer_anchors.size() / 2);
     // TODO: HRT-11044 support mixed data types
-    auto data_size_in_bytes = HailoRTCommon::get_data_bytes(m_inputs_metadata.begin()->second.format.type);
+    auto data_size_in_bytes = HailoRTCommon::get_data_bytes(inputs_metadata.begin()->second.format.type);
 
     // Validate reg buffer size
     static const uint32_t reg_entry_size = 4;
@@ -130,7 +172,7 @@ hailo_status SSDPostProcessOp::extract_detections(const std::string &reg_input_n
         "Failed to extract_detections, reg {} buffer_size should be {}, but is {}", reg_input_name, buffer_size, reg_buffer.size());
 
     // Validate cls buffer size
-    const uint32_t cls_entry_size = m_nms_config.number_of_classes;
+    const uint32_t cls_entry_size = nms_config.number_of_classes;
     number_of_entries = cls_padded_shape.height * cls_padded_shape.width * num_of_anchors;
     buffer_size = number_of_entries * cls_entry_size * data_size_in_bytes;
     CHECK(buffer_size == cls_buffer.size(), HAILO_INVALID_ARGUMENT,
@@ -152,7 +194,7 @@ hailo_status SSDPostProcessOp::extract_detections(const std::string &reg_input_n
                 auto xcenter_a = static_cast<float32_t>(col) * anchor_w_stride + anchor_w_offset;
                 auto ycenter_a = static_cast<float32_t>(row) * anchor_h_stride + anchor_h_offset;
                 // Decode bboxes
-                if (m_inputs_metadata[reg_input_name].format.type == HAILO_FORMAT_TYPE_UINT8) {
+                if (inputs_metadata.at(reg_input_name).format.type == HAILO_FORMAT_TYPE_UINT8) {
                     auto status = extract_bbox_detections<float32_t, uint8_t>(
                         reg_input_name, cls_input_name,
                         reg_buffer, cls_buffer,
@@ -163,7 +205,7 @@ hailo_status SSDPostProcessOp::extract_detections(const std::string &reg_input_n
                         cls_idx, wa, ha, xcenter_a, ycenter_a,
                         detections, classes_detections_count);
                     CHECK_SUCCESS(status);
-                } else if (m_inputs_metadata[reg_input_name].format.type == HAILO_FORMAT_TYPE_UINT16) {
+                } else if (inputs_metadata.at(reg_input_name).format.type == HAILO_FORMAT_TYPE_UINT16) {
                     auto status = extract_bbox_detections<float32_t, uint16_t>(
                         reg_input_name, cls_input_name,
                         reg_buffer, cls_buffer,
@@ -174,7 +216,7 @@ hailo_status SSDPostProcessOp::extract_detections(const std::string &reg_input_n
                         cls_idx, wa, ha, xcenter_a, ycenter_a,
                         detections, classes_detections_count);
                     CHECK_SUCCESS(status);
-                } else if (m_inputs_metadata[reg_input_name].format.type == HAILO_FORMAT_TYPE_FLOAT32) {
+                } else if (inputs_metadata.at(reg_input_name).format.type == HAILO_FORMAT_TYPE_FLOAT32) {
                     // For testing - TODO: HRT-9341 - Remove after generator tests are in, and return error.
                     auto status = extract_bbox_detections<float32_t, float32_t>(
                         reg_input_name, cls_input_name,
@@ -188,22 +230,13 @@ hailo_status SSDPostProcessOp::extract_detections(const std::string &reg_input_n
                     CHECK_SUCCESS(status);
                 } else {
                     CHECK_SUCCESS(HAILO_INVALID_ARGUMENT, "SSD post-process received invalid reg input type: {}",
-                        m_inputs_metadata[reg_input_name].format.type);
+                        inputs_metadata.at(reg_input_name).format.type);
                 }
             }
         }
     }
-    
-    return HAILO_SUCCESS;
-}
 
-std::string SSDPostProcessOp::get_op_description()
-{
-    auto nms_config_info = get_nms_config_description();
-    auto config_info = fmt::format("Name: {}, {}, Image height: {:.2f}, Image width: {:.2f}, Centers scales factor: {}, "
-                        "Bbox dimension scale factor: {}, Normalize boxes: {}", m_name, nms_config_info, m_ssd_config.image_height, m_ssd_config.image_width,
-                        m_ssd_config.centers_scale_factor, m_ssd_config.bbox_dimensions_scale_factor, m_ssd_config.normalize_boxes);
-    return config_info;
+    return HAILO_SUCCESS;
 }
 
 }
index bdce0142715cae3339ac80521115a8eef07d43fd..114e14d2dc8372406707111f73ef0c8c453a2f68 100644 (file)
@@ -13,6 +13,7 @@
 #define _HAILO_SSD_POST_PROCESS_HPP_
 
 #include "net_flow/ops/nms_post_process.hpp"
+#include "net_flow/ops/op_metadata.hpp"
 
 namespace hailort
 {
@@ -49,18 +50,39 @@ struct SSDPostProcessConfig
     bool normalize_boxes = false;
 };
 
+class SSDOpMetadata : public NmsOpMetadata
+{
+public:
+    static Expected<std::shared_ptr<OpMetadata>> create(const std::unordered_map<std::string, BufferMetaData> &inputs_metadata,
+                                                        const std::unordered_map<std::string, BufferMetaData> &outputs_metadata,
+                                                        const NmsPostProcessConfig &nms_post_process_config,
+                                                        const SSDPostProcessConfig &ssd_post_process_config,
+                                                        const std::string &network_name);
+    std::string get_op_description() override;
+    hailo_status validate_format_info() override;
+    SSDPostProcessConfig &ssd_config() { return m_ssd_config;};
+
+private:
+    SSDPostProcessConfig m_ssd_config;
+    SSDOpMetadata(const std::unordered_map<std::string, BufferMetaData> &inputs_metadata,
+                       const std::unordered_map<std::string, BufferMetaData> &outputs_metadata,
+                       const NmsPostProcessConfig &nms_post_process_config,
+                       const SSDPostProcessConfig &ssd_post_process_config,
+                       const std::string &network_name)
+        : NmsOpMetadata(inputs_metadata, outputs_metadata, nms_post_process_config, "SSD-Post-Process", network_name, OperationType::SSD)
+        , m_ssd_config(ssd_post_process_config)
+    {}
+
+    hailo_status validate_params() override;
+};
+
 class SSDPostProcessOp : public NmsPostProcessOp
 {
 
 public:
-    static Expected<std::shared_ptr<Op>> create(const std::map<std::string, BufferMetaData> &inputs_metadata,
-                                                const std::map<std::string, BufferMetaData> &outputs_metadata,
-                                                const NmsPostProcessConfig &nms_post_process_config,
-                                                const SSDPostProcessConfig &ssd_post_process_config);
+    static Expected<std::shared_ptr<Op>> create(std::shared_ptr<SSDOpMetadata> metadata);
 
     hailo_status execute(const std::map<std::string, MemoryView> &inputs, std::map<std::string, MemoryView> &outputs) override;
-    std::string get_op_description() override;
-    hailo_status validate_metadata() override; // TODO: HRT-10676
 
     static const uint32_t DEFAULT_Y_OFFSET_IDX = 0;
     static const uint32_t DEFAULT_X_OFFSET_IDX = 1;
@@ -68,39 +90,36 @@ public:
     static const uint32_t DEFAULT_W_OFFSET_IDX = 3;
 
 private:
-    SSDPostProcessOp(const std::map<std::string, BufferMetaData> &inputs_metadata,
-                     const std::map<std::string, BufferMetaData> &outputs_metadata,
-                     const NmsPostProcessConfig &nms_post_process_config,
-                     const SSDPostProcessConfig &ssd_post_process_config)
-        : NmsPostProcessOp(inputs_metadata, outputs_metadata, nms_post_process_config, "SSD-Post-Process")
-        , m_ssd_config(ssd_post_process_config)
+    SSDPostProcessOp(std::shared_ptr<SSDOpMetadata> metadata)
+        : NmsPostProcessOp(static_cast<std::shared_ptr<NmsOpMetadata>>(metadata))
+        , m_metadata(metadata)
     {}
+    std::shared_ptr<SSDOpMetadata> m_metadata;
 
-    SSDPostProcessConfig m_ssd_config;
-
-    template<typename HostType = float32_t, typename DeviceType>
-    void extract_bbox_classes(const hailo_bbox_float32_t &dims_bbox, DeviceType *cls_data, const BufferMetaData &cls_metadata, uint32_t cls_index,
+    template<typename DstType = float32_t, typename SrcType>
+    void extract_bbox_classes(const hailo_bbox_float32_t &dims_bbox, SrcType *cls_data, const BufferMetaData &cls_metadata, uint32_t cls_index,
         std::vector<DetectionBbox> &detections, std::vector<uint32_t> &classes_detections_count)
     {
-        if (m_nms_config.cross_classes) {
-            // Pre-NMS optimization. If NMS checks IOU over different classes, only the maximum class is relevant
-            auto max_id_score_pair = get_max_class<HostType, DeviceType>(cls_data, cls_index, 0, 1,
+        const auto &nms_config = m_metadata->nms_config();
+        if (nms_config.cross_classes) {
+            // Pre-NMS optimization. If NMS checks IoU over different classes, only the maximum class is relevant
+            auto max_id_score_pair = get_max_class<DstType, SrcType>(cls_data, cls_index, 0, 1,
                 cls_metadata.quant_info, cls_metadata.padded_shape.width);
             auto bbox = dims_bbox;
             bbox.score = max_id_score_pair.second;
-            if (max_id_score_pair.second >= m_nms_config.nms_score_th) {
+            if (max_id_score_pair.second >= nms_config.nms_score_th) {
                 detections.emplace_back(DetectionBbox(bbox, max_id_score_pair.first));
                 classes_detections_count[max_id_score_pair.first]++;
             }
         } else {
-            for (uint32_t class_index = 0; class_index < m_nms_config.number_of_classes; class_index++) {
+            for (uint32_t class_index = 0; class_index < nms_config.number_of_classes; class_index++) {
                 auto class_id = class_index;
-                if (m_nms_config.background_removal) {
-                    if (m_nms_config.background_removal_index == class_index) {
+                if (nms_config.background_removal) {
+                    if (nms_config.background_removal_index == class_index) {
                         // Ignore if class_index is background_removal_index
                         continue;
                     }
-                    else if (0 == m_nms_config.background_removal_index) {
+                    else if (0 == nms_config.background_removal_index) {
                         // background_removal_index will always be the first or last index.
                         // If it is the first one we need to reduce all classes id's in 1.
                         // If it is the last one we just ignore it in the previous if case.
@@ -109,9 +128,9 @@ private:
                 }
 
                 auto class_entry_idx = cls_index + (class_index * cls_metadata.padded_shape.width);
-                auto class_score = Quantization::dequantize_output<HostType, DeviceType>(cls_data[class_entry_idx],
+                auto class_score = Quantization::dequantize_output<DstType, SrcType>(cls_data[class_entry_idx],
                     cls_metadata.quant_info);
-                if (class_score < m_nms_config.nms_score_th) {
+                if (class_score < nms_config.nms_score_th) {
                     continue;
                 }
                 auto bbox = dims_bbox;
@@ -122,25 +141,28 @@ private:
         }
     }
 
-    template<typename HostType = float32_t, typename DeviceType>
+    template<typename DstType = float32_t, typename SrcType>
     hailo_status extract_bbox_detections(const std::string &reg_input_name, const std::string &cls_input_name,
         const MemoryView &reg_buffer, const MemoryView &cls_buffer,
         uint64_t x_index, uint64_t y_index, uint64_t w_index, uint64_t h_index,
         uint32_t cls_index, float32_t wa, float32_t ha, float32_t xcenter_a, float32_t ycenter_a,
         std::vector<DetectionBbox> &detections, std::vector<uint32_t> &classes_detections_count)
     {
-        const auto &shape = m_inputs_metadata[reg_input_name].shape;
-        const auto &reg_quant_info = m_inputs_metadata[reg_input_name].quant_info;
-        DeviceType *reg_data = (DeviceType*)reg_buffer.data();
+        const auto &inputs_metadata = m_metadata->inputs_metadata();
+        const auto &ssd_config = m_metadata->ssd_config();
+        assert(contains(inputs_metadata, reg_input_name));
+        const auto &shape = inputs_metadata.at(reg_input_name).shape;
+        const auto &reg_quant_info = inputs_metadata.at(reg_input_name).quant_info;
+        SrcType *reg_data = (SrcType*)reg_buffer.data();
         auto *cls_data = cls_buffer.data();
-        auto tx = Quantization::dequantize_output<HostType, DeviceType>(reg_data[x_index], reg_quant_info);
-        auto ty = Quantization::dequantize_output<HostType, DeviceType>(reg_data[y_index], reg_quant_info);
-        auto tw = Quantization::dequantize_output<HostType, DeviceType>(reg_data[w_index], reg_quant_info);
-        auto th = Quantization::dequantize_output<HostType, DeviceType>(reg_data[h_index], reg_quant_info);
-        tx /= static_cast<float32_t>(m_ssd_config.centers_scale_factor);
-        ty /= static_cast<float32_t>(m_ssd_config.centers_scale_factor);
-        tw /= static_cast<float32_t>(m_ssd_config.bbox_dimensions_scale_factor);
-        th /= static_cast<float32_t>(m_ssd_config.bbox_dimensions_scale_factor);
+        auto tx = Quantization::dequantize_output<DstType, SrcType>(reg_data[x_index], reg_quant_info);
+        auto ty = Quantization::dequantize_output<DstType, SrcType>(reg_data[y_index], reg_quant_info);
+        auto tw = Quantization::dequantize_output<DstType, SrcType>(reg_data[w_index], reg_quant_info);
+        auto th = Quantization::dequantize_output<DstType, SrcType>(reg_data[h_index], reg_quant_info);
+        tx /= static_cast<float32_t>(ssd_config.centers_scale_factor);
+        ty /= static_cast<float32_t>(ssd_config.centers_scale_factor);
+        tw /= static_cast<float32_t>(ssd_config.bbox_dimensions_scale_factor);
+        th /= static_cast<float32_t>(ssd_config.bbox_dimensions_scale_factor);
         auto w = exp(tw) * wa;
         auto h = exp(th) * ha;
         auto x_center = tx * wa + xcenter_a;
@@ -152,26 +174,27 @@ private:
 
         // TODO: HRT-10033 - Fix support for clip_boxes and normalize_output
         // Currently `normalize_boxes` is always false
-        if (m_ssd_config.normalize_boxes) {
+        if (ssd_config.normalize_boxes) {
             x_min = Quantization::clip(x_min, 0, static_cast<float32_t>(shape.width-1));
             y_min = Quantization::clip(y_min, 0, static_cast<float32_t>(shape.height-1));
             x_max = Quantization::clip(x_max, 0, static_cast<float32_t>(shape.width-1));
             y_max = Quantization::clip(y_max, 0, static_cast<float32_t>(shape.height-1));
         }
         hailo_bbox_float32_t dims_bbox{y_min, x_min, y_max, x_max, 0};
-        const auto &cls_metadata = m_inputs_metadata[cls_input_name];
+        assert(contains(inputs_metadata, cls_input_name));
+        const auto &cls_metadata = inputs_metadata.at(cls_input_name);
         if (cls_metadata.format.type == HAILO_FORMAT_TYPE_UINT8) {
-            extract_bbox_classes<HostType, uint8_t>(dims_bbox, (uint8_t*)cls_data, m_inputs_metadata[cls_input_name],
+            extract_bbox_classes<DstType, uint8_t>(dims_bbox, (uint8_t*)cls_data, cls_metadata,
                 cls_index, detections, classes_detections_count);
         } else if (cls_metadata.format.type == HAILO_FORMAT_TYPE_UINT16) {
-            extract_bbox_classes<HostType, uint16_t>(dims_bbox, (uint16_t*)cls_data, m_inputs_metadata[cls_input_name],
+            extract_bbox_classes<DstType, uint16_t>(dims_bbox, (uint16_t*)cls_data, cls_metadata,
                 cls_index, detections, classes_detections_count);
         } else if (cls_metadata.format.type == HAILO_FORMAT_TYPE_FLOAT32) {
-            extract_bbox_classes<HostType, float32_t>(dims_bbox, (float32_t*)cls_data, m_inputs_metadata[cls_input_name],
+            extract_bbox_classes<DstType, float32_t>(dims_bbox, (float32_t*)cls_data, cls_metadata,
                 cls_index, detections, classes_detections_count);
         } else {
             CHECK_SUCCESS(HAILO_INVALID_ARGUMENT, "SSD post-process received invalid cls input type: {}",
-                m_inputs_metadata[cls_input_name].format.type);
+                cls_metadata.format.type);
         }
         return HAILO_SUCCESS;
     }
@@ -185,7 +208,7 @@ private:
      * @param[in] cls_buffer                    Buffer containing the classes ids after inference.
      * @param[inout] detections                 A vector of ::DetectionBbox objects, to add the detected bboxes to.
      * @param[inout] classes_detections_count   A vector of uint32_t, to add count of detections count per class to.
-     * 
+     *
      * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
     */
     hailo_status extract_detections(const std::string &reg_input_name, const std::string &cls_input_name,
@@ -193,7 +216,6 @@ private:
         std::vector<DetectionBbox> &detections, std::vector<uint32_t> &classes_detections_count);
 };
 
-
 }
 
 }
diff --git a/hailort/libhailort/src/net_flow/ops/stb_image_resize.h b/hailort/libhailort/src/net_flow/ops/stb_image_resize.h
new file mode 100644 (file)
index 0000000..1ed0e96
--- /dev/null
@@ -0,0 +1,2639 @@
+// Note:
+// * This module is taken from https://github.com/nothings/stb
+//   * Original file: https://github.com/nothings/stb/blob/master/stb_image_resize.h
+// * See end of file for license information.
+
+/* stb_image_resize - v0.97 - public domain image resizing
+   by Jorge L Rodriguez (@VinoBS) - 2014
+   http://github.com/nothings/stb
+
+   Written with emphasis on usability, portability, and efficiency. (No
+   SIMD or threads, so it be easily outperformed by libs that use those.)
+   Only scaling and translation is supported, no rotations or shears.
+   Easy API downsamples w/Mitchell filter, upsamples w/cubic interpolation.
+
+   COMPILING & LINKING
+      In one C/C++ file that #includes this file, do this:
+         #define STB_IMAGE_RESIZE_IMPLEMENTATION
+      before the #include. That will create the implementation in that file.
+
+   QUICKSTART
+      stbir_resize_uint8(      input_pixels , in_w , in_h , 0,
+                               output_pixels, out_w, out_h, 0, num_channels)
+      stbir_resize_float(...)
+      stbir_resize_uint8_srgb( input_pixels , in_w , in_h , 0,
+                               output_pixels, out_w, out_h, 0,
+                               num_channels , alpha_chan  , 0)
+      stbir_resize_uint8_srgb_edgemode(
+                               input_pixels , in_w , in_h , 0,
+                               output_pixels, out_w, out_h, 0,
+                               num_channels , alpha_chan  , 0, STBIR_EDGE_CLAMP)
+                                                            // WRAP/REFLECT/ZERO
+
+   FULL API
+      See the "header file" section of the source for API documentation.
+
+   ADDITIONAL DOCUMENTATION
+
+      SRGB & FLOATING POINT REPRESENTATION
+         The sRGB functions presume IEEE floating point. If you do not have
+         IEEE floating point, define STBIR_NON_IEEE_FLOAT. This will use
+         a slower implementation.
+
+      MEMORY ALLOCATION
+         The resize functions here perform a single memory allocation using
+         malloc. To control the memory allocation, before the #include that
+         triggers the implementation, do:
+
+            #define STBIR_MALLOC(size,context) ...
+            #define STBIR_FREE(ptr,context)   ...
+
+         Each resize function makes exactly one call to malloc/free, so to use
+         temp memory, store the temp memory in the context and return that.
+
+      ASSERT
+         Define STBIR_ASSERT(boolval) to override assert() and not use assert.h
+
+      OPTIMIZATION
+         Define STBIR_SATURATE_INT to compute clamp values in-range using
+         integer operations instead of float operations. This may be faster
+         on some platforms.
+
+      DEFAULT FILTERS
+         For functions which don't provide explicit control over what filters
+         to use, you can change the compile-time defaults with
+
+            #define STBIR_DEFAULT_FILTER_UPSAMPLE     STBIR_FILTER_something
+            #define STBIR_DEFAULT_FILTER_DOWNSAMPLE   STBIR_FILTER_something
+
+         See stbir_filter in the header-file section for the list of filters.
+
+      NEW FILTERS
+         A number of 1D filter kernels are used. For a list of
+         supported filters see the stbir_filter enum. To add a new filter,
+         write a filter function and add it to stbir__filter_info_table.
+
+      PROGRESS
+         For interactive use with slow resize operations, you can install
+         a progress-report callback:
+
+            #define STBIR_PROGRESS_REPORT(val)   some_func(val)
+
+         The parameter val is a float which goes from 0 to 1 as progress is made.
+
+         For example:
+
+            static void my_progress_report(float progress);
+            #define STBIR_PROGRESS_REPORT(val) my_progress_report(val)
+
+            #define STB_IMAGE_RESIZE_IMPLEMENTATION
+            #include "stb_image_resize.h"
+
+            static void my_progress_report(float progress)
+            {
+               printf("Progress: %f%%\n", progress*100);
+            }
+
+      MAX CHANNELS
+         If your image has more than 64 channels, define STBIR_MAX_CHANNELS
+         to the max you'll have.
+
+      ALPHA CHANNEL
+         Most of the resizing functions provide the ability to control how
+         the alpha channel of an image is processed. The important things
+         to know about this:
+
+         1. The best mathematically-behaved version of alpha to use is
+         called "premultiplied alpha", in which the other color channels
+         have had the alpha value multiplied in. If you use premultiplied
+         alpha, linear filtering (such as image resampling done by this
+         library, or performed in texture units on GPUs) does the "right
+         thing". While premultiplied alpha is standard in the movie CGI
+         industry, it is still uncommon in the videogame/real-time world.
+
+         If you linearly filter non-premultiplied alpha, strange effects
+         occur. (For example, the 50/50 average of 99% transparent bright green
+         and 1% transparent black produces 50% transparent dark green when
+         non-premultiplied, whereas premultiplied it produces 50%
+         transparent near-black. The former introduces green energy
+         that doesn't exist in the source image.)
+
+         2. Artists should not edit premultiplied-alpha images; artists
+         want non-premultiplied alpha images. Thus, art tools generally output
+         non-premultiplied alpha images.
+
+         3. You will get best results in most cases by converting images
+         to premultiplied alpha before processing them mathematically.
+
+         4. If you pass the flag STBIR_FLAG_ALPHA_PREMULTIPLIED, the
+         resizer does not do anything special for the alpha channel;
+         it is resampled identically to other channels. This produces
+         the correct results for premultiplied-alpha images, but produces
+         less-than-ideal results for non-premultiplied-alpha images.
+
+         5. If you do not pass the flag STBIR_FLAG_ALPHA_PREMULTIPLIED,
+         then the resizer weights the contribution of input pixels
+         based on their alpha values, or, equivalently, it multiplies
+         the alpha value into the color channels, resamples, then divides
+         by the resultant alpha value. Input pixels which have alpha=0 do
+         not contribute at all to output pixels unless _all_ of the input
+         pixels affecting that output pixel have alpha=0, in which case
+         the result for that pixel is the same as it would be without
+         STBIR_FLAG_ALPHA_PREMULTIPLIED. However, this is only true for
+         input images in integer formats. For input images in float format,
+         input pixels with alpha=0 have no effect, and output pixels
+         which have alpha=0 will be 0 in all channels. (For float images,
+         you can manually achieve the same result by adding a tiny epsilon
+         value to the alpha channel of every image, and then subtracting
+         or clamping it at the end.)
+
+         6. You can suppress the behavior described in #5 and make
+         all-0-alpha pixels have 0 in all channels by #defining
+         STBIR_NO_ALPHA_EPSILON.
+
+         7. You can separately control whether the alpha channel is
+         interpreted as linear or affected by the colorspace. By default
+         it is linear; you almost never want to apply the colorspace.
+         (For example, graphics hardware does not apply sRGB conversion
+         to the alpha channel.)
+
+   CONTRIBUTORS
+      Jorge L Rodriguez: Implementation
+      Sean Barrett: API design, optimizations
+      Aras Pranckevicius: bugfix
+      Nathan Reed: warning fixes
+
+   REVISIONS
+      0.97 (2020-02-02) fixed warning
+      0.96 (2019-03-04) fixed warnings
+      0.95 (2017-07-23) fixed warnings
+      0.94 (2017-03-18) fixed warnings
+      0.93 (2017-03-03) fixed bug with certain combinations of heights
+      0.92 (2017-01-02) fix integer overflow on large (>2GB) images
+      0.91 (2016-04-02) fix warnings; fix handling of subpixel regions
+      0.90 (2014-09-17) first released version
+
+   LICENSE
+     See end of file for license information.
+
+   TODO
+      Don't decode all of the image data when only processing a partial tile
+      Don't use full-width decode buffers when only processing a partial tile
+      When processing wide images, break processing into tiles so data fits in L1 cache
+      Installable filters?
+      Resize that respects alpha test coverage
+         (Reference code: FloatImage::alphaTestCoverage and FloatImage::scaleAlphaToCoverage:
+         https://code.google.com/p/nvidia-texture-tools/source/browse/trunk/src/nvimage/FloatImage.cpp )
+*/
+
+#ifndef STBIR_INCLUDE_STB_IMAGE_RESIZE_H
+#define STBIR_INCLUDE_STB_IMAGE_RESIZE_H
+
+#ifdef _MSC_VER
+typedef unsigned char  stbir_uint8;
+typedef unsigned short stbir_uint16;
+typedef unsigned int   stbir_uint32;
+#else
+#include <stdint.h>
+typedef uint8_t  stbir_uint8;
+typedef uint16_t stbir_uint16;
+typedef uint32_t stbir_uint32;
+#endif
+
+#ifndef STBIRDEF
+#ifdef STB_IMAGE_RESIZE_STATIC
+#define STBIRDEF static
+#else
+#ifdef __cplusplus
+#define STBIRDEF extern "C"
+#else
+#define STBIRDEF extern
+#endif
+#endif
+#endif
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Easy-to-use API:
+//
+//     * "input pixels" points to an array of image data with 'num_channels' channels (e.g. RGB=3, RGBA=4)
+//     * input_w is input image width (x-axis), input_h is input image height (y-axis)
+//     * stride is the offset between successive rows of image data in memory, in bytes. you can
+//       specify 0 to mean packed continuously in memory
+//     * alpha channel is treated identically to other channels.
+//     * colorspace is linear or sRGB as specified by function name
+//     * returned result is 1 for success or 0 in case of an error.
+//       #define STBIR_ASSERT() to trigger an assert on parameter validation errors.
+//     * Memory required grows approximately linearly with input and output size, but with
+//       discontinuities at input_w == output_w and input_h == output_h.
+//     * These functions use a "default" resampling filter defined at compile time. To change the filter,
+//       you can change the compile-time defaults by #defining STBIR_DEFAULT_FILTER_UPSAMPLE
+//       and STBIR_DEFAULT_FILTER_DOWNSAMPLE, or you can use the medium-complexity API.
+
+STBIRDEF int stbir_resize_uint8(     const unsigned char *input_pixels , int input_w , int input_h , int input_stride_in_bytes,
+                                           unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes,
+                                     int num_channels);
+
+STBIRDEF int stbir_resize_float(     const float *input_pixels , int input_w , int input_h , int input_stride_in_bytes,
+                                           float *output_pixels, int output_w, int output_h, int output_stride_in_bytes,
+                                     int num_channels);
+
+
+// The following functions interpret image data as gamma-corrected sRGB.
+// Specify STBIR_ALPHA_CHANNEL_NONE if you have no alpha channel,
+// or otherwise provide the index of the alpha channel. Flags value
+// of 0 will probably do the right thing if you're not sure what
+// the flags mean.
+
+#define STBIR_ALPHA_CHANNEL_NONE       -1
+
+// Set this flag if your texture has premultiplied alpha. Otherwise, stbir will
+// use alpha-weighted resampling (effectively premultiplying, resampling,
+// then unpremultiplying).
+#define STBIR_FLAG_ALPHA_PREMULTIPLIED    (1 << 0)
+// The specified alpha channel should be handled as gamma-corrected value even
+// when doing sRGB operations.
+#define STBIR_FLAG_ALPHA_USES_COLORSPACE  (1 << 1)
+
+STBIRDEF int stbir_resize_uint8_srgb(const unsigned char *input_pixels , int input_w , int input_h , int input_stride_in_bytes,
+                                           unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes,
+                                     int num_channels, int alpha_channel, int flags);
+
+
+typedef enum
+{
+    STBIR_EDGE_CLAMP   = 1,
+    STBIR_EDGE_REFLECT = 2,
+    STBIR_EDGE_WRAP    = 3,
+    STBIR_EDGE_ZERO    = 4,
+} stbir_edge;
+
+// This function adds the ability to specify how requests to sample off the edge of the image are handled.
+STBIRDEF int stbir_resize_uint8_srgb_edgemode(const unsigned char *input_pixels , int input_w , int input_h , int input_stride_in_bytes,
+                                                    unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes,
+                                              int num_channels, int alpha_channel, int flags,
+                                              stbir_edge edge_wrap_mode);
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Medium-complexity API
+//
+// This extends the easy-to-use API as follows:
+//
+//     * Alpha-channel can be processed separately
+//       * If alpha_channel is not STBIR_ALPHA_CHANNEL_NONE
+//         * Alpha channel will not be gamma corrected (unless flags&STBIR_FLAG_GAMMA_CORRECT)
+//         * Filters will be weighted by alpha channel (unless flags&STBIR_FLAG_ALPHA_PREMULTIPLIED)
+//     * Filter can be selected explicitly
+//     * uint16 image type
+//     * sRGB colorspace available for all types
+//     * context parameter for passing to STBIR_MALLOC
+
+typedef enum
+{
+    STBIR_FILTER_DEFAULT      = 0,  // use same filter type that easy-to-use API chooses
+    STBIR_FILTER_BOX          = 1,  // A trapezoid w/1-pixel wide ramps, same result as box for integer scale ratios
+    STBIR_FILTER_TRIANGLE     = 2,  // On upsampling, produces same results as bilinear texture filtering
+    STBIR_FILTER_CUBICBSPLINE = 3,  // The cubic b-spline (aka Mitchell-Netrevalli with B=1,C=0), gaussian-esque
+    STBIR_FILTER_CATMULLROM   = 4,  // An interpolating cubic spline
+    STBIR_FILTER_MITCHELL     = 5,  // Mitchell-Netrevalli filter with B=1/3, C=1/3
+} stbir_filter;
+
+typedef enum
+{
+    STBIR_COLORSPACE_LINEAR,
+    STBIR_COLORSPACE_SRGB,
+
+    STBIR_MAX_COLORSPACES,
+} stbir_colorspace;
+
+// The following functions are all identical except for the type of the image data
+
+STBIRDEF int stbir_resize_uint8_generic( const unsigned char *input_pixels , int input_w , int input_h , int input_stride_in_bytes,
+                                               unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes,
+                                         int num_channels, int alpha_channel, int flags,
+                                         stbir_edge edge_wrap_mode, stbir_filter filter, stbir_colorspace space,
+                                         void *alloc_context);
+
+STBIRDEF int stbir_resize_uint16_generic(const stbir_uint16 *input_pixels  , int input_w , int input_h , int input_stride_in_bytes,
+                                               stbir_uint16 *output_pixels , int output_w, int output_h, int output_stride_in_bytes,
+                                         int num_channels, int alpha_channel, int flags,
+                                         stbir_edge edge_wrap_mode, stbir_filter filter, stbir_colorspace space,
+                                         void *alloc_context);
+
+STBIRDEF int stbir_resize_float_generic( const float *input_pixels         , int input_w , int input_h , int input_stride_in_bytes,
+                                               float *output_pixels        , int output_w, int output_h, int output_stride_in_bytes,
+                                         int num_channels, int alpha_channel, int flags,
+                                         stbir_edge edge_wrap_mode, stbir_filter filter, stbir_colorspace space,
+                                         void *alloc_context);
+
+
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Full-complexity API
+//
+// This extends the medium API as follows:
+//
+//       * uint32 image type
+//     * not typesafe
+//     * separate filter types for each axis
+//     * separate edge modes for each axis
+//     * can specify scale explicitly for subpixel correctness
+//     * can specify image source tile using texture coordinates
+
+typedef enum
+{
+    STBIR_TYPE_UINT8 ,
+    STBIR_TYPE_UINT16,
+    STBIR_TYPE_UINT32,
+    STBIR_TYPE_FLOAT ,
+
+    STBIR_MAX_TYPES
+} stbir_datatype;
+
+STBIRDEF int stbir_resize(         const void *input_pixels , int input_w , int input_h , int input_stride_in_bytes,
+                                         void *output_pixels, int output_w, int output_h, int output_stride_in_bytes,
+                                   stbir_datatype datatype,
+                                   int num_channels, int alpha_channel, int flags,
+                                   stbir_edge edge_mode_horizontal, stbir_edge edge_mode_vertical,
+                                   stbir_filter filter_horizontal,  stbir_filter filter_vertical,
+                                   stbir_colorspace space, void *alloc_context);
+
+STBIRDEF int stbir_resize_subpixel(const void *input_pixels , int input_w , int input_h , int input_stride_in_bytes,
+                                         void *output_pixels, int output_w, int output_h, int output_stride_in_bytes,
+                                   stbir_datatype datatype,
+                                   int num_channels, int alpha_channel, int flags,
+                                   stbir_edge edge_mode_horizontal, stbir_edge edge_mode_vertical,
+                                   stbir_filter filter_horizontal,  stbir_filter filter_vertical,
+                                   stbir_colorspace space, void *alloc_context,
+                                   float x_scale, float y_scale,
+                                   float x_offset, float y_offset);
+
+STBIRDEF int stbir_resize_region(  const void *input_pixels , int input_w , int input_h , int input_stride_in_bytes,
+                                         void *output_pixels, int output_w, int output_h, int output_stride_in_bytes,
+                                   stbir_datatype datatype,
+                                   int num_channels, int alpha_channel, int flags,
+                                   stbir_edge edge_mode_horizontal, stbir_edge edge_mode_vertical,
+                                   stbir_filter filter_horizontal,  stbir_filter filter_vertical,
+                                   stbir_colorspace space, void *alloc_context,
+                                   float s0, float t0, float s1, float t1);
+// (s0, t0) & (s1, t1) are the top-left and bottom right corner (uv addressing style: [0, 1]x[0, 1]) of a region of the input image to use.
+
+//
+//
+////   end header file   /////////////////////////////////////////////////////
+#endif // STBIR_INCLUDE_STB_IMAGE_RESIZE_H
+
+
+
+
+
+#ifdef STB_IMAGE_RESIZE_IMPLEMENTATION
+
+#ifndef STBIR_ASSERT
+#include <assert.h>
+#define STBIR_ASSERT(x) assert(x)
+#endif
+
+// For memset
+#include <string.h>
+
+#include <math.h>
+
+#ifndef STBIR_MALLOC
+#include <stdlib.h>
+// use comma operator to evaluate c, to avoid "unused parameter" warnings
+#define STBIR_MALLOC(size,c) ((void)(c), malloc(size))
+#define STBIR_FREE(ptr,c)    ((void)(c), free(ptr))
+#endif
+
+#ifndef _MSC_VER
+#ifdef __cplusplus
+#define stbir__inline inline
+#else
+#define stbir__inline
+#endif
+#else
+#define stbir__inline __forceinline
+#endif
+
+
+// should produce compiler error if size is wrong
+typedef unsigned char stbir__validate_uint32[sizeof(stbir_uint32) == 4 ? 1 : -1];
+
+#ifdef _MSC_VER
+#define STBIR__NOTUSED(v)  (void)(v)
+#else
+#define STBIR__NOTUSED(v)  (void)sizeof(v)
+#endif
+
+#define STBIR__ARRAY_SIZE(a) (sizeof((a))/sizeof((a)[0]))
+
+#ifndef STBIR_DEFAULT_FILTER_UPSAMPLE
+#define STBIR_DEFAULT_FILTER_UPSAMPLE    STBIR_FILTER_CATMULLROM
+#endif
+
+#ifndef STBIR_DEFAULT_FILTER_DOWNSAMPLE
+#define STBIR_DEFAULT_FILTER_DOWNSAMPLE  STBIR_FILTER_MITCHELL
+#endif
+
+#ifndef STBIR_PROGRESS_REPORT
+#define STBIR_PROGRESS_REPORT(float_0_to_1)
+#endif
+
+#ifndef STBIR_MAX_CHANNELS
+#define STBIR_MAX_CHANNELS 64
+#endif
+
+#if STBIR_MAX_CHANNELS > 65536
+#error "Too many channels; STBIR_MAX_CHANNELS must be no more than 65536."
+// because we store the indices in 16-bit variables
+#endif
+
+// This value is added to alpha just before premultiplication to avoid
+// zeroing out color values. It is equivalent to 2^-80. If you don't want
+// that behavior (it may interfere if you have floating point images with
+// very small alpha values) then you can define STBIR_NO_ALPHA_EPSILON to
+// disable it.
+#ifndef STBIR_ALPHA_EPSILON
+#define STBIR_ALPHA_EPSILON ((float)1 / (1 << 20) / (1 << 20) / (1 << 20) / (1 << 20))
+#endif
+
+
+
+#ifdef _MSC_VER
+#define STBIR__UNUSED_PARAM(v)  (void)(v)
+#else
+#define STBIR__UNUSED_PARAM(v)  (void)sizeof(v)
+#endif
+
+// must match stbir_datatype
+static unsigned char stbir__type_size[] = {
+    1, // STBIR_TYPE_UINT8
+    2, // STBIR_TYPE_UINT16
+    4, // STBIR_TYPE_UINT32
+    4, // STBIR_TYPE_FLOAT
+};
+
+// Kernel function centered at 0
+typedef float (stbir__kernel_fn)(float x, float scale);
+typedef float (stbir__support_fn)(float scale);
+
+typedef struct
+{
+    stbir__kernel_fn* kernel;
+    stbir__support_fn* support;
+} stbir__filter_info;
+
+// When upsampling, the contributors are which source pixels contribute.
+// When downsampling, the contributors are which destination pixels are contributed to.
+typedef struct
+{
+    int n0; // First contributing pixel
+    int n1; // Last contributing pixel
+} stbir__contributors;
+
+typedef struct
+{
+    const void* input_data;
+    int input_w;
+    int input_h;
+    int input_stride_bytes;
+
+    void* output_data;
+    int output_w;
+    int output_h;
+    int output_stride_bytes;
+
+    float s0, t0, s1, t1;
+
+    float horizontal_shift; // Units: output pixels
+    float vertical_shift;   // Units: output pixels
+    float horizontal_scale;
+    float vertical_scale;
+
+    int channels;
+    int alpha_channel;
+    stbir_uint32 flags;
+    stbir_datatype type;
+    stbir_filter horizontal_filter;
+    stbir_filter vertical_filter;
+    stbir_edge edge_horizontal;
+    stbir_edge edge_vertical;
+    stbir_colorspace colorspace;
+
+    stbir__contributors* horizontal_contributors;
+    float* horizontal_coefficients;
+
+    stbir__contributors* vertical_contributors;
+    float* vertical_coefficients;
+
+    int decode_buffer_pixels;
+    float* decode_buffer;
+
+    float* horizontal_buffer;
+
+    // cache these because ceil/floor are inexplicably showing up in profile
+    int horizontal_coefficient_width;
+    int vertical_coefficient_width;
+    int horizontal_filter_pixel_width;
+    int vertical_filter_pixel_width;
+    int horizontal_filter_pixel_margin;
+    int vertical_filter_pixel_margin;
+    int horizontal_num_contributors;
+    int vertical_num_contributors;
+
+    int ring_buffer_length_bytes;   // The length of an individual entry in the ring buffer. The total number of ring buffers is stbir__get_filter_pixel_width(filter)
+    int ring_buffer_num_entries;    // Total number of entries in the ring buffer.
+    int ring_buffer_first_scanline;
+    int ring_buffer_last_scanline;
+    int ring_buffer_begin_index;    // first_scanline is at this index in the ring buffer
+    float* ring_buffer;
+
+    float* encode_buffer; // A temporary buffer to store floats so we don't lose precision while we do multiply-adds.
+
+    int horizontal_contributors_size;
+    int horizontal_coefficients_size;
+    int vertical_contributors_size;
+    int vertical_coefficients_size;
+    int decode_buffer_size;
+    int horizontal_buffer_size;
+    int ring_buffer_size;
+    int encode_buffer_size;
+} stbir__info;
+
+
+static const float stbir__max_uint8_as_float  = 255.0f;
+static const float stbir__max_uint16_as_float = 65535.0f;
+static const double stbir__max_uint32_as_float = 4294967295.0;
+
+
+static stbir__inline int stbir__min(int a, int b)
+{
+    return a < b ? a : b;
+}
+
+static stbir__inline float stbir__saturate(float x)
+{
+    if (x < 0)
+        return 0;
+
+    if (x > 1)
+        return 1;
+
+    return x;
+}
+
+#ifdef STBIR_SATURATE_INT
+static stbir__inline stbir_uint8 stbir__saturate8(int x)
+{
+    if ((unsigned int) x <= 255)
+        return x;
+
+    if (x < 0)
+        return 0;
+
+    return 255;
+}
+
+static stbir__inline stbir_uint16 stbir__saturate16(int x)
+{
+    if ((unsigned int) x <= 65535)
+        return x;
+
+    if (x < 0)
+        return 0;
+
+    return 65535;
+}
+#endif
+
+static float stbir__srgb_uchar_to_linear_float[256] = {
+    0.000000f, 0.000304f, 0.000607f, 0.000911f, 0.001214f, 0.001518f, 0.001821f, 0.002125f, 0.002428f, 0.002732f, 0.003035f,
+    0.003347f, 0.003677f, 0.004025f, 0.004391f, 0.004777f, 0.005182f, 0.005605f, 0.006049f, 0.006512f, 0.006995f, 0.007499f,
+    0.008023f, 0.008568f, 0.009134f, 0.009721f, 0.010330f, 0.010960f, 0.011612f, 0.012286f, 0.012983f, 0.013702f, 0.014444f,
+    0.015209f, 0.015996f, 0.016807f, 0.017642f, 0.018500f, 0.019382f, 0.020289f, 0.021219f, 0.022174f, 0.023153f, 0.024158f,
+    0.025187f, 0.026241f, 0.027321f, 0.028426f, 0.029557f, 0.030713f, 0.031896f, 0.033105f, 0.034340f, 0.035601f, 0.036889f,
+    0.038204f, 0.039546f, 0.040915f, 0.042311f, 0.043735f, 0.045186f, 0.046665f, 0.048172f, 0.049707f, 0.051269f, 0.052861f,
+    0.054480f, 0.056128f, 0.057805f, 0.059511f, 0.061246f, 0.063010f, 0.064803f, 0.066626f, 0.068478f, 0.070360f, 0.072272f,
+    0.074214f, 0.076185f, 0.078187f, 0.080220f, 0.082283f, 0.084376f, 0.086500f, 0.088656f, 0.090842f, 0.093059f, 0.095307f,
+    0.097587f, 0.099899f, 0.102242f, 0.104616f, 0.107023f, 0.109462f, 0.111932f, 0.114435f, 0.116971f, 0.119538f, 0.122139f,
+    0.124772f, 0.127438f, 0.130136f, 0.132868f, 0.135633f, 0.138432f, 0.141263f, 0.144128f, 0.147027f, 0.149960f, 0.152926f,
+    0.155926f, 0.158961f, 0.162029f, 0.165132f, 0.168269f, 0.171441f, 0.174647f, 0.177888f, 0.181164f, 0.184475f, 0.187821f,
+    0.191202f, 0.194618f, 0.198069f, 0.201556f, 0.205079f, 0.208637f, 0.212231f, 0.215861f, 0.219526f, 0.223228f, 0.226966f,
+    0.230740f, 0.234551f, 0.238398f, 0.242281f, 0.246201f, 0.250158f, 0.254152f, 0.258183f, 0.262251f, 0.266356f, 0.270498f,
+    0.274677f, 0.278894f, 0.283149f, 0.287441f, 0.291771f, 0.296138f, 0.300544f, 0.304987f, 0.309469f, 0.313989f, 0.318547f,
+    0.323143f, 0.327778f, 0.332452f, 0.337164f, 0.341914f, 0.346704f, 0.351533f, 0.356400f, 0.361307f, 0.366253f, 0.371238f,
+    0.376262f, 0.381326f, 0.386430f, 0.391573f, 0.396755f, 0.401978f, 0.407240f, 0.412543f, 0.417885f, 0.423268f, 0.428691f,
+    0.434154f, 0.439657f, 0.445201f, 0.450786f, 0.456411f, 0.462077f, 0.467784f, 0.473532f, 0.479320f, 0.485150f, 0.491021f,
+    0.496933f, 0.502887f, 0.508881f, 0.514918f, 0.520996f, 0.527115f, 0.533276f, 0.539480f, 0.545725f, 0.552011f, 0.558340f,
+    0.564712f, 0.571125f, 0.577581f, 0.584078f, 0.590619f, 0.597202f, 0.603827f, 0.610496f, 0.617207f, 0.623960f, 0.630757f,
+    0.637597f, 0.644480f, 0.651406f, 0.658375f, 0.665387f, 0.672443f, 0.679543f, 0.686685f, 0.693872f, 0.701102f, 0.708376f,
+    0.715694f, 0.723055f, 0.730461f, 0.737911f, 0.745404f, 0.752942f, 0.760525f, 0.768151f, 0.775822f, 0.783538f, 0.791298f,
+    0.799103f, 0.806952f, 0.814847f, 0.822786f, 0.830770f, 0.838799f, 0.846873f, 0.854993f, 0.863157f, 0.871367f, 0.879622f,
+    0.887923f, 0.896269f, 0.904661f, 0.913099f, 0.921582f, 0.930111f, 0.938686f, 0.947307f, 0.955974f, 0.964686f, 0.973445f,
+    0.982251f, 0.991102f, 1.0f
+};
+
+static float stbir__srgb_to_linear(float f)
+{
+    if (f <= 0.04045f)
+        return f / 12.92f;
+    else
+        return (float)pow((f + 0.055f) / 1.055f, 2.4f);
+}
+
+static float stbir__linear_to_srgb(float f)
+{
+    if (f <= 0.0031308f)
+        return f * 12.92f;
+    else
+        return 1.055f * (float)pow(f, 1 / 2.4f) - 0.055f;
+}
+
+#ifndef STBIR_NON_IEEE_FLOAT
+// From https://gist.github.com/rygorous/2203834
+
+typedef union
+{
+    stbir_uint32 u;
+    float f;
+} stbir__FP32;
+
+static const stbir_uint32 fp32_to_srgb8_tab4[104] = {
+    0x0073000d, 0x007a000d, 0x0080000d, 0x0087000d, 0x008d000d, 0x0094000d, 0x009a000d, 0x00a1000d,
+    0x00a7001a, 0x00b4001a, 0x00c1001a, 0x00ce001a, 0x00da001a, 0x00e7001a, 0x00f4001a, 0x0101001a,
+    0x010e0033, 0x01280033, 0x01410033, 0x015b0033, 0x01750033, 0x018f0033, 0x01a80033, 0x01c20033,
+    0x01dc0067, 0x020f0067, 0x02430067, 0x02760067, 0x02aa0067, 0x02dd0067, 0x03110067, 0x03440067,
+    0x037800ce, 0x03df00ce, 0x044600ce, 0x04ad00ce, 0x051400ce, 0x057b00c5, 0x05dd00bc, 0x063b00b5,
+    0x06970158, 0x07420142, 0x07e30130, 0x087b0120, 0x090b0112, 0x09940106, 0x0a1700fc, 0x0a9500f2,
+    0x0b0f01cb, 0x0bf401ae, 0x0ccb0195, 0x0d950180, 0x0e56016e, 0x0f0d015e, 0x0fbc0150, 0x10630143,
+    0x11070264, 0x1238023e, 0x1357021d, 0x14660201, 0x156601e9, 0x165a01d3, 0x174401c0, 0x182401af,
+    0x18fe0331, 0x1a9602fe, 0x1c1502d2, 0x1d7e02ad, 0x1ed4028d, 0x201a0270, 0x21520256, 0x227d0240,
+    0x239f0443, 0x25c003fe, 0x27bf03c4, 0x29a10392, 0x2b6a0367, 0x2d1d0341, 0x2ebe031f, 0x304d0300,
+    0x31d105b0, 0x34a80555, 0x37520507, 0x39d504c5, 0x3c37048b, 0x3e7c0458, 0x40a8042a, 0x42bd0401,
+    0x44c20798, 0x488e071e, 0x4c1c06b6, 0x4f76065d, 0x52a50610, 0x55ac05cc, 0x5892058f, 0x5b590559,
+    0x5e0c0a23, 0x631c0980, 0x67db08f6, 0x6c55087f, 0x70940818, 0x74a007bd, 0x787d076c, 0x7c330723,
+};
+
+static stbir_uint8 stbir__linear_to_srgb_uchar(float in)
+{
+    static const stbir__FP32 almostone = { 0x3f7fffff }; // 1-eps
+    static const stbir__FP32 minval = { (127-13) << 23 };
+    stbir_uint32 tab,bias,scale,t;
+    stbir__FP32 f;
+
+    // Clamp to [2^(-13), 1-eps]; these two values map to 0 and 1, respectively.
+    // The tests are carefully written so that NaNs map to 0, same as in the reference
+    // implementation.
+    if (!(in > minval.f)) // written this way to catch NaNs
+        in = minval.f;
+    if (in > almostone.f)
+        in = almostone.f;
+
+    // Do the table lookup and unpack bias, scale
+    f.f = in;
+    tab = fp32_to_srgb8_tab4[(f.u - minval.u) >> 20];
+    bias = (tab >> 16) << 9;
+    scale = tab & 0xffff;
+
+    // Grab next-highest mantissa bits and perform linear interpolation
+    t = (f.u >> 12) & 0xff;
+    return (unsigned char) ((bias + scale*t) >> 16);
+}
+
+#else
+// sRGB transition values, scaled by 1<<28
+static int stbir__srgb_offset_to_linear_scaled[256] =
+{
+            0,     40738,    122216,    203693,    285170,    366648,    448125,    529603,
+       611080,    692557,    774035,    855852,    942009,   1033024,   1128971,   1229926,
+      1335959,   1447142,   1563542,   1685229,   1812268,   1944725,   2082664,   2226148,
+      2375238,   2529996,   2690481,   2856753,   3028870,   3206888,   3390865,   3580856,
+      3776916,   3979100,   4187460,   4402049,   4622919,   4850123,   5083710,   5323731,
+      5570236,   5823273,   6082892,   6349140,   6622065,   6901714,   7188133,   7481369,
+      7781466,   8088471,   8402427,   8723380,   9051372,   9386448,   9728650,  10078021,
+     10434603,  10798439,  11169569,  11548036,  11933879,  12327139,  12727857,  13136073,
+     13551826,  13975156,  14406100,  14844697,  15290987,  15745007,  16206795,  16676389,
+     17153826,  17639142,  18132374,  18633560,  19142734,  19659934,  20185196,  20718552,
+     21260042,  21809696,  22367554,  22933648,  23508010,  24090680,  24681686,  25281066,
+     25888850,  26505076,  27129772,  27762974,  28404716,  29055026,  29713942,  30381490,
+     31057708,  31742624,  32436272,  33138682,  33849884,  34569912,  35298800,  36036568,
+     36783260,  37538896,  38303512,  39077136,  39859796,  40651528,  41452360,  42262316,
+     43081432,  43909732,  44747252,  45594016,  46450052,  47315392,  48190064,  49074096,
+     49967516,  50870356,  51782636,  52704392,  53635648,  54576432,  55526772,  56486700,
+     57456236,  58435408,  59424248,  60422780,  61431036,  62449032,  63476804,  64514376,
+     65561776,  66619028,  67686160,  68763192,  69850160,  70947088,  72053992,  73170912,
+     74297864,  75434880,  76581976,  77739184,  78906536,  80084040,  81271736,  82469648,
+     83677792,  84896192,  86124888,  87363888,  88613232,  89872928,  91143016,  92423512,
+     93714432,  95015816,  96327688,  97650056,  98982952, 100326408, 101680440, 103045072,
+    104420320, 105806224, 107202800, 108610064, 110028048, 111456776, 112896264, 114346544,
+    115807632, 117279552, 118762328, 120255976, 121760536, 123276016, 124802440, 126339832,
+    127888216, 129447616, 131018048, 132599544, 134192112, 135795792, 137410592, 139036528,
+    140673648, 142321952, 143981456, 145652208, 147334208, 149027488, 150732064, 152447968,
+    154175200, 155913792, 157663776, 159425168, 161197984, 162982240, 164777968, 166585184,
+    168403904, 170234160, 172075968, 173929344, 175794320, 177670896, 179559120, 181458992,
+    183370528, 185293776, 187228736, 189175424, 191133888, 193104112, 195086128, 197079968,
+    199085648, 201103184, 203132592, 205173888, 207227120, 209292272, 211369392, 213458480,
+    215559568, 217672656, 219797792, 221934976, 224084240, 226245600, 228419056, 230604656,
+    232802400, 235012320, 237234432, 239468736, 241715280, 243974080, 246245120, 248528464,
+    250824112, 253132064, 255452368, 257785040, 260130080, 262487520, 264857376, 267239664,
+};
+
+static stbir_uint8 stbir__linear_to_srgb_uchar(float f)
+{
+    int x = (int) (f * (1 << 28)); // has headroom so you don't need to clamp
+    int v = 0;
+    int i;
+
+    // Refine the guess with a short binary search.
+    i = v + 128; if (x >= stbir__srgb_offset_to_linear_scaled[i]) v = i;
+    i = v +  64; if (x >= stbir__srgb_offset_to_linear_scaled[i]) v = i;
+    i = v +  32; if (x >= stbir__srgb_offset_to_linear_scaled[i]) v = i;
+    i = v +  16; if (x >= stbir__srgb_offset_to_linear_scaled[i]) v = i;
+    i = v +   8; if (x >= stbir__srgb_offset_to_linear_scaled[i]) v = i;
+    i = v +   4; if (x >= stbir__srgb_offset_to_linear_scaled[i]) v = i;
+    i = v +   2; if (x >= stbir__srgb_offset_to_linear_scaled[i]) v = i;
+    i = v +   1; if (x >= stbir__srgb_offset_to_linear_scaled[i]) v = i;
+
+    return (stbir_uint8) v;
+}
+#endif
+
+static float stbir__filter_trapezoid(float x, float scale)
+{
+    float halfscale = scale / 2;
+    float t = 0.5f + halfscale;
+    STBIR_ASSERT(scale <= 1);
+
+    x = (float)fabs(x);
+
+    if (x >= t)
+        return 0;
+    else
+    {
+        float r = 0.5f - halfscale;
+        if (x <= r)
+            return 1;
+        else
+            return (t - x) / scale;
+    }
+}
+
+static float stbir__support_trapezoid(float scale)
+{
+    STBIR_ASSERT(scale <= 1);
+    return 0.5f + scale / 2;
+}
+
+static float stbir__filter_triangle(float x, float s)
+{
+    STBIR__UNUSED_PARAM(s);
+
+    x = (float)fabs(x);
+
+    if (x <= 1.0f)
+        return 1 - x;
+    else
+        return 0;
+}
+
+static float stbir__filter_cubic(float x, float s)
+{
+    STBIR__UNUSED_PARAM(s);
+
+    x = (float)fabs(x);
+
+    if (x < 1.0f)
+        return (4 + x*x*(3*x - 6))/6;
+    else if (x < 2.0f)
+        return (8 + x*(-12 + x*(6 - x)))/6;
+
+    return (0.0f);
+}
+
+static float stbir__filter_catmullrom(float x, float s)
+{
+    STBIR__UNUSED_PARAM(s);
+
+    x = (float)fabs(x);
+
+    if (x < 1.0f)
+        return 1 - x*x*(2.5f - 1.5f*x);
+    else if (x < 2.0f)
+        return 2 - x*(4 + x*(0.5f*x - 2.5f));
+
+    return (0.0f);
+}
+
+static float stbir__filter_mitchell(float x, float s)
+{
+    STBIR__UNUSED_PARAM(s);
+
+    x = (float)fabs(x);
+
+    if (x < 1.0f)
+        return (16 + x*x*(21 * x - 36))/18;
+    else if (x < 2.0f)
+        return (32 + x*(-60 + x*(36 - 7*x)))/18;
+
+    return (0.0f);
+}
+
+static float stbir__support_zero(float s)
+{
+    STBIR__UNUSED_PARAM(s);
+    return 0;
+}
+
+static float stbir__support_one(float s)
+{
+    STBIR__UNUSED_PARAM(s);
+    return 1;
+}
+
+static float stbir__support_two(float s)
+{
+    STBIR__UNUSED_PARAM(s);
+    return 2;
+}
+
+static stbir__filter_info stbir__filter_info_table[] = {
+        { NULL,                     stbir__support_zero },
+        { stbir__filter_trapezoid,  stbir__support_trapezoid },
+        { stbir__filter_triangle,   stbir__support_one },
+        { stbir__filter_cubic,      stbir__support_two },
+        { stbir__filter_catmullrom, stbir__support_two },
+        { stbir__filter_mitchell,   stbir__support_two },
+};
+
+stbir__inline static int stbir__use_upsampling(float ratio)
+{
+    return ratio > 1;
+}
+
+stbir__inline static int stbir__use_width_upsampling(stbir__info* stbir_info)
+{
+    return stbir__use_upsampling(stbir_info->horizontal_scale);
+}
+
+stbir__inline static int stbir__use_height_upsampling(stbir__info* stbir_info)
+{
+    return stbir__use_upsampling(stbir_info->vertical_scale);
+}
+
+// This is the maximum number of input samples that can affect an output sample
+// with the given filter
+static int stbir__get_filter_pixel_width(stbir_filter filter, float scale)
+{
+    STBIR_ASSERT(filter != 0);
+    STBIR_ASSERT(filter < STBIR__ARRAY_SIZE(stbir__filter_info_table));
+
+    if (stbir__use_upsampling(scale))
+        return (int)ceil(stbir__filter_info_table[filter].support(1/scale) * 2);
+    else
+        return (int)ceil(stbir__filter_info_table[filter].support(scale) * 2 / scale);
+}
+
+// This is how much to expand buffers to account for filters seeking outside
+// the image boundaries.
+static int stbir__get_filter_pixel_margin(stbir_filter filter, float scale)
+{
+    return stbir__get_filter_pixel_width(filter, scale) / 2;
+}
+
+static int stbir__get_coefficient_width(stbir_filter filter, float scale)
+{
+    if (stbir__use_upsampling(scale))
+        return (int)ceil(stbir__filter_info_table[filter].support(1 / scale) * 2);
+    else
+        return (int)ceil(stbir__filter_info_table[filter].support(scale) * 2);
+}
+
+static int stbir__get_contributors(float scale, stbir_filter filter, int input_size, int output_size)
+{
+    if (stbir__use_upsampling(scale))
+        return output_size;
+    else
+        return (input_size + stbir__get_filter_pixel_margin(filter, scale) * 2);
+}
+
+static int stbir__get_total_horizontal_coefficients(stbir__info* info)
+{
+    return info->horizontal_num_contributors
+         * stbir__get_coefficient_width      (info->horizontal_filter, info->horizontal_scale);
+}
+
+static int stbir__get_total_vertical_coefficients(stbir__info* info)
+{
+    return info->vertical_num_contributors
+         * stbir__get_coefficient_width      (info->vertical_filter, info->vertical_scale);
+}
+
+static stbir__contributors* stbir__get_contributor(stbir__contributors* contributors, int n)
+{
+    return &contributors[n];
+}
+
+// For perf reasons this code is duplicated in stbir__resample_horizontal_upsample/downsample,
+// if you change it here change it there too.
+static float* stbir__get_coefficient(float* coefficients, stbir_filter filter, float scale, int n, int c)
+{
+    int width = stbir__get_coefficient_width(filter, scale);
+    return &coefficients[width*n + c];
+}
+
+static int stbir__edge_wrap_slow(stbir_edge edge, int n, int max)
+{
+    switch (edge)
+    {
+    case STBIR_EDGE_ZERO:
+        return 0; // we'll decode the wrong pixel here, and then overwrite with 0s later
+
+    case STBIR_EDGE_CLAMP:
+        if (n < 0)
+            return 0;
+
+        if (n >= max)
+            return max - 1;
+
+        return n; // NOTREACHED
+
+    case STBIR_EDGE_REFLECT:
+    {
+        if (n < 0)
+        {
+            if (n < max)
+                return -n;
+            else
+                return max - 1;
+        }
+
+        if (n >= max)
+        {
+            int max2 = max * 2;
+            if (n >= max2)
+                return 0;
+            else
+                return max2 - n - 1;
+        }
+
+        return n; // NOTREACHED
+    }
+
+    case STBIR_EDGE_WRAP:
+        if (n >= 0)
+            return (n % max);
+        else
+        {
+            int m = (-n) % max;
+
+            if (m != 0)
+                m = max - m;
+
+            return (m);
+        }
+        // NOTREACHED
+
+    default:
+        STBIR_ASSERT(!"Unimplemented edge type");
+        return 0;
+    }
+}
+
+stbir__inline static int stbir__edge_wrap(stbir_edge edge, int n, int max)
+{
+    // avoid per-pixel switch
+    if (n >= 0 && n < max)
+        return n;
+    return stbir__edge_wrap_slow(edge, n, max);
+}
+
+// What input pixels contribute to this output pixel?
+static void stbir__calculate_sample_range_upsample(int n, float out_filter_radius, float scale_ratio, float out_shift, int* in_first_pixel, int* in_last_pixel, float* in_center_of_out)
+{
+    float out_pixel_center = (float)n + 0.5f;
+    float out_pixel_influence_lowerbound = out_pixel_center - out_filter_radius;
+    float out_pixel_influence_upperbound = out_pixel_center + out_filter_radius;
+
+    float in_pixel_influence_lowerbound = (out_pixel_influence_lowerbound + out_shift) / scale_ratio;
+    float in_pixel_influence_upperbound = (out_pixel_influence_upperbound + out_shift) / scale_ratio;
+
+    *in_center_of_out = (out_pixel_center + out_shift) / scale_ratio;
+    *in_first_pixel = (int)(floor(in_pixel_influence_lowerbound + 0.5));
+    *in_last_pixel = (int)(floor(in_pixel_influence_upperbound - 0.5));
+}
+
+// What output pixels does this input pixel contribute to?
+static void stbir__calculate_sample_range_downsample(int n, float in_pixels_radius, float scale_ratio, float out_shift, int* out_first_pixel, int* out_last_pixel, float* out_center_of_in)
+{
+    float in_pixel_center = (float)n + 0.5f;
+    float in_pixel_influence_lowerbound = in_pixel_center - in_pixels_radius;
+    float in_pixel_influence_upperbound = in_pixel_center + in_pixels_radius;
+
+    float out_pixel_influence_lowerbound = in_pixel_influence_lowerbound * scale_ratio - out_shift;
+    float out_pixel_influence_upperbound = in_pixel_influence_upperbound * scale_ratio - out_shift;
+
+    *out_center_of_in = in_pixel_center * scale_ratio - out_shift;
+    *out_first_pixel = (int)(floor(out_pixel_influence_lowerbound + 0.5));
+    *out_last_pixel = (int)(floor(out_pixel_influence_upperbound - 0.5));
+}
+
+static void stbir__calculate_coefficients_upsample(stbir_filter filter, float scale, int in_first_pixel, int in_last_pixel, float in_center_of_out, stbir__contributors* contributor, float* coefficient_group)
+{
+    int i;
+    float total_filter = 0;
+    float filter_scale;
+
+    STBIR_ASSERT(in_last_pixel - in_first_pixel <= (int)ceil(stbir__filter_info_table[filter].support(1/scale) * 2)); // Taken directly from stbir__get_coefficient_width() which we can't call because we don't know if we're horizontal or vertical.
+
+    contributor->n0 = in_first_pixel;
+    contributor->n1 = in_last_pixel;
+
+    STBIR_ASSERT(contributor->n1 >= contributor->n0);
+
+    for (i = 0; i <= in_last_pixel - in_first_pixel; i++)
+    {
+        float in_pixel_center = (float)(i + in_first_pixel) + 0.5f;
+        coefficient_group[i] = stbir__filter_info_table[filter].kernel(in_center_of_out - in_pixel_center, 1 / scale);
+
+        // If the coefficient is zero, skip it. (Don't do the <0 check here, we want the influence of those outside pixels.)
+        if (i == 0 && !coefficient_group[i])
+        {
+            contributor->n0 = ++in_first_pixel;
+            i--;
+            continue;
+        }
+
+        total_filter += coefficient_group[i];
+    }
+
+    // NOTE(fg): Not actually true in general, nor is there any reason to expect it should be.
+    // It would be true in exact math but is at best approximately true in floating-point math,
+    // and it would not make sense to try and put actual bounds on this here because it depends
+    // on the image aspect ratio which can get pretty extreme.
+    //STBIR_ASSERT(stbir__filter_info_table[filter].kernel((float)(in_last_pixel + 1) + 0.5f - in_center_of_out, 1/scale) == 0);
+
+    STBIR_ASSERT(total_filter > 0.9);
+    STBIR_ASSERT(total_filter < 1.1f); // Make sure it's not way off.
+
+    // Make sure the sum of all coefficients is 1.
+    filter_scale = 1 / total_filter;
+
+    for (i = 0; i <= in_last_pixel - in_first_pixel; i++)
+        coefficient_group[i] *= filter_scale;
+
+    for (i = in_last_pixel - in_first_pixel; i >= 0; i--)
+    {
+        if (coefficient_group[i])
+            break;
+
+        // This line has no weight. We can skip it.
+        contributor->n1 = contributor->n0 + i - 1;
+    }
+}
+
+static void stbir__calculate_coefficients_downsample(stbir_filter filter, float scale_ratio, int out_first_pixel, int out_last_pixel, float out_center_of_in, stbir__contributors* contributor, float* coefficient_group)
+{
+    int i;
+
+    STBIR_ASSERT(out_last_pixel - out_first_pixel <= (int)ceil(stbir__filter_info_table[filter].support(scale_ratio) * 2)); // Taken directly from stbir__get_coefficient_width() which we can't call because we don't know if we're horizontal or vertical.
+
+    contributor->n0 = out_first_pixel;
+    contributor->n1 = out_last_pixel;
+
+    STBIR_ASSERT(contributor->n1 >= contributor->n0);
+
+    for (i = 0; i <= out_last_pixel - out_first_pixel; i++)
+    {
+        float out_pixel_center = (float)(i + out_first_pixel) + 0.5f;
+        float x = out_pixel_center - out_center_of_in;
+        coefficient_group[i] = stbir__filter_info_table[filter].kernel(x, scale_ratio) * scale_ratio;
+    }
+
+    // NOTE(fg): Not actually true in general, nor is there any reason to expect it should be.
+    // It would be true in exact math but is at best approximately true in floating-point math,
+    // and it would not make sense to try and put actual bounds on this here because it depends
+    // on the image aspect ratio which can get pretty extreme.
+    //STBIR_ASSERT(stbir__filter_info_table[filter].kernel((float)(out_last_pixel + 1) + 0.5f - out_center_of_in, scale_ratio) == 0);
+
+    for (i = out_last_pixel - out_first_pixel; i >= 0; i--)
+    {
+        if (coefficient_group[i])
+            break;
+
+        // This line has no weight. We can skip it.
+        contributor->n1 = contributor->n0 + i - 1;
+    }
+}
+
+static void stbir__normalize_downsample_coefficients(stbir__contributors* contributors, float* coefficients, stbir_filter filter, float scale_ratio, int input_size, int output_size)
+{
+    int num_contributors = stbir__get_contributors(scale_ratio, filter, input_size, output_size);
+    int num_coefficients = stbir__get_coefficient_width(filter, scale_ratio);
+    int i, j;
+    int skip;
+
+    for (i = 0; i < output_size; i++)
+    {
+        float scale;
+        float total = 0;
+
+        for (j = 0; j < num_contributors; j++)
+        {
+            if (i >= contributors[j].n0 && i <= contributors[j].n1)
+            {
+                float coefficient = *stbir__get_coefficient(coefficients, filter, scale_ratio, j, i - contributors[j].n0);
+                total += coefficient;
+            }
+            else if (i < contributors[j].n0)
+                break;
+        }
+
+        STBIR_ASSERT(total > 0.9f);
+        STBIR_ASSERT(total < 1.1f);
+
+        scale = 1 / total;
+
+        for (j = 0; j < num_contributors; j++)
+        {
+            if (i >= contributors[j].n0 && i <= contributors[j].n1)
+                *stbir__get_coefficient(coefficients, filter, scale_ratio, j, i - contributors[j].n0) *= scale;
+            else if (i < contributors[j].n0)
+                break;
+        }
+    }
+
+    // Optimize: Skip zero coefficients and contributions outside of image bounds.
+    // Do this after normalizing because normalization depends on the n0/n1 values.
+    for (j = 0; j < num_contributors; j++)
+    {
+        int range, max, width;
+
+        skip = 0;
+        while (*stbir__get_coefficient(coefficients, filter, scale_ratio, j, skip) == 0)
+            skip++;
+
+        contributors[j].n0 += skip;
+
+        while (contributors[j].n0 < 0)
+        {
+            contributors[j].n0++;
+            skip++;
+        }
+
+        range = contributors[j].n1 - contributors[j].n0 + 1;
+        max = stbir__min(num_coefficients, range);
+
+        width = stbir__get_coefficient_width(filter, scale_ratio);
+        for (i = 0; i < max; i++)
+        {
+            if (i + skip >= width)
+                break;
+
+            *stbir__get_coefficient(coefficients, filter, scale_ratio, j, i) = *stbir__get_coefficient(coefficients, filter, scale_ratio, j, i + skip);
+        }
+
+        continue;
+    }
+
+    // Using min to avoid writing into invalid pixels.
+    for (i = 0; i < num_contributors; i++)
+        contributors[i].n1 = stbir__min(contributors[i].n1, output_size - 1);
+}
+
+// Each scan line uses the same kernel values so we should calculate the kernel
+// values once and then we can use them for every scan line.
+static void stbir__calculate_filters(stbir__contributors* contributors, float* coefficients, stbir_filter filter, float scale_ratio, float shift, int input_size, int output_size)
+{
+    int n;
+    int total_contributors = stbir__get_contributors(scale_ratio, filter, input_size, output_size);
+
+    if (stbir__use_upsampling(scale_ratio))
+    {
+        float out_pixels_radius = stbir__filter_info_table[filter].support(1 / scale_ratio) * scale_ratio;
+
+        // Looping through out pixels
+        for (n = 0; n < total_contributors; n++)
+        {
+            float in_center_of_out; // Center of the current out pixel in the in pixel space
+            int in_first_pixel, in_last_pixel;
+
+            stbir__calculate_sample_range_upsample(n, out_pixels_radius, scale_ratio, shift, &in_first_pixel, &in_last_pixel, &in_center_of_out);
+
+            stbir__calculate_coefficients_upsample(filter, scale_ratio, in_first_pixel, in_last_pixel, in_center_of_out, stbir__get_contributor(contributors, n), stbir__get_coefficient(coefficients, filter, scale_ratio, n, 0));
+        }
+    }
+    else
+    {
+        float in_pixels_radius = stbir__filter_info_table[filter].support(scale_ratio) / scale_ratio;
+
+        // Looping through in pixels
+        for (n = 0; n < total_contributors; n++)
+        {
+            float out_center_of_in; // Center of the current out pixel in the in pixel space
+            int out_first_pixel, out_last_pixel;
+            int n_adjusted = n - stbir__get_filter_pixel_margin(filter, scale_ratio);
+
+            stbir__calculate_sample_range_downsample(n_adjusted, in_pixels_radius, scale_ratio, shift, &out_first_pixel, &out_last_pixel, &out_center_of_in);
+
+            stbir__calculate_coefficients_downsample(filter, scale_ratio, out_first_pixel, out_last_pixel, out_center_of_in, stbir__get_contributor(contributors, n), stbir__get_coefficient(coefficients, filter, scale_ratio, n, 0));
+        }
+
+        stbir__normalize_downsample_coefficients(contributors, coefficients, filter, scale_ratio, input_size, output_size);
+    }
+}
+
+static float* stbir__get_decode_buffer(stbir__info* stbir_info)
+{
+    // The 0 index of the decode buffer starts after the margin. This makes
+    // it okay to use negative indexes on the decode buffer.
+    return &stbir_info->decode_buffer[stbir_info->horizontal_filter_pixel_margin * stbir_info->channels];
+}
+
+#define STBIR__DECODE(type, colorspace) ((int)(type) * (STBIR_MAX_COLORSPACES) + (int)(colorspace))
+
+static void stbir__decode_scanline(stbir__info* stbir_info, int n)
+{
+    int c;
+    int channels = stbir_info->channels;
+    int alpha_channel = stbir_info->alpha_channel;
+    int type = stbir_info->type;
+    int colorspace = stbir_info->colorspace;
+    int input_w = stbir_info->input_w;
+    size_t input_stride_bytes = stbir_info->input_stride_bytes;
+    float* decode_buffer = stbir__get_decode_buffer(stbir_info);
+    stbir_edge edge_horizontal = stbir_info->edge_horizontal;
+    stbir_edge edge_vertical = stbir_info->edge_vertical;
+    size_t in_buffer_row_offset = stbir__edge_wrap(edge_vertical, n, stbir_info->input_h) * input_stride_bytes;
+    const void* input_data = (char *) stbir_info->input_data + in_buffer_row_offset;
+    int max_x = input_w + stbir_info->horizontal_filter_pixel_margin;
+    int decode = STBIR__DECODE(type, colorspace);
+
+    int x = -stbir_info->horizontal_filter_pixel_margin;
+
+    // special handling for STBIR_EDGE_ZERO because it needs to return an item that doesn't appear in the input,
+    // and we want to avoid paying overhead on every pixel if not STBIR_EDGE_ZERO
+    if (edge_vertical == STBIR_EDGE_ZERO && (n < 0 || n >= stbir_info->input_h))
+    {
+        for (; x < max_x; x++)
+            for (c = 0; c < channels; c++)
+                decode_buffer[x*channels + c] = 0;
+        return;
+    }
+
+    switch (decode)
+    {
+    case STBIR__DECODE(STBIR_TYPE_UINT8, STBIR_COLORSPACE_LINEAR):
+        for (; x < max_x; x++)
+        {
+            int decode_pixel_index = x * channels;
+            int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels;
+            for (c = 0; c < channels; c++)
+                decode_buffer[decode_pixel_index + c] = ((float)((const unsigned char*)input_data)[input_pixel_index + c]) / stbir__max_uint8_as_float;
+        }
+        break;
+
+    case STBIR__DECODE(STBIR_TYPE_UINT8, STBIR_COLORSPACE_SRGB):
+        for (; x < max_x; x++)
+        {
+            int decode_pixel_index = x * channels;
+            int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels;
+            for (c = 0; c < channels; c++)
+                decode_buffer[decode_pixel_index + c] = stbir__srgb_uchar_to_linear_float[((const unsigned char*)input_data)[input_pixel_index + c]];
+
+            if (!(stbir_info->flags&STBIR_FLAG_ALPHA_USES_COLORSPACE))
+                decode_buffer[decode_pixel_index + alpha_channel] = ((float)((const unsigned char*)input_data)[input_pixel_index + alpha_channel]) / stbir__max_uint8_as_float;
+        }
+        break;
+
+    case STBIR__DECODE(STBIR_TYPE_UINT16, STBIR_COLORSPACE_LINEAR):
+        for (; x < max_x; x++)
+        {
+            int decode_pixel_index = x * channels;
+            int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels;
+            for (c = 0; c < channels; c++)
+                decode_buffer[decode_pixel_index + c] = ((float)((const unsigned short*)input_data)[input_pixel_index + c]) / stbir__max_uint16_as_float;
+        }
+        break;
+
+    case STBIR__DECODE(STBIR_TYPE_UINT16, STBIR_COLORSPACE_SRGB):
+        for (; x < max_x; x++)
+        {
+            int decode_pixel_index = x * channels;
+            int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels;
+            for (c = 0; c < channels; c++)
+                decode_buffer[decode_pixel_index + c] = stbir__srgb_to_linear(((float)((const unsigned short*)input_data)[input_pixel_index + c]) / stbir__max_uint16_as_float);
+
+            if (!(stbir_info->flags&STBIR_FLAG_ALPHA_USES_COLORSPACE))
+                decode_buffer[decode_pixel_index + alpha_channel] = ((float)((const unsigned short*)input_data)[input_pixel_index + alpha_channel]) / stbir__max_uint16_as_float;
+        }
+        break;
+
+    case STBIR__DECODE(STBIR_TYPE_UINT32, STBIR_COLORSPACE_LINEAR):
+        for (; x < max_x; x++)
+        {
+            int decode_pixel_index = x * channels;
+            int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels;
+            for (c = 0; c < channels; c++)
+                decode_buffer[decode_pixel_index + c] = (float)(((double)((const unsigned int*)input_data)[input_pixel_index + c]) / stbir__max_uint32_as_float);
+        }
+        break;
+
+    case STBIR__DECODE(STBIR_TYPE_UINT32, STBIR_COLORSPACE_SRGB):
+        for (; x < max_x; x++)
+        {
+            int decode_pixel_index = x * channels;
+            int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels;
+            for (c = 0; c < channels; c++)
+                decode_buffer[decode_pixel_index + c] = stbir__srgb_to_linear((float)(((double)((const unsigned int*)input_data)[input_pixel_index + c]) / stbir__max_uint32_as_float));
+
+            if (!(stbir_info->flags&STBIR_FLAG_ALPHA_USES_COLORSPACE))
+                decode_buffer[decode_pixel_index + alpha_channel] = (float)(((double)((const unsigned int*)input_data)[input_pixel_index + alpha_channel]) / stbir__max_uint32_as_float);
+        }
+        break;
+
+    case STBIR__DECODE(STBIR_TYPE_FLOAT, STBIR_COLORSPACE_LINEAR):
+        for (; x < max_x; x++)
+        {
+            int decode_pixel_index = x * channels;
+            int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels;
+            for (c = 0; c < channels; c++)
+                decode_buffer[decode_pixel_index + c] = ((const float*)input_data)[input_pixel_index + c];
+        }
+        break;
+
+    case STBIR__DECODE(STBIR_TYPE_FLOAT, STBIR_COLORSPACE_SRGB):
+        for (; x < max_x; x++)
+        {
+            int decode_pixel_index = x * channels;
+            int input_pixel_index = stbir__edge_wrap(edge_horizontal, x, input_w) * channels;
+            for (c = 0; c < channels; c++)
+                decode_buffer[decode_pixel_index + c] = stbir__srgb_to_linear(((const float*)input_data)[input_pixel_index + c]);
+
+            if (!(stbir_info->flags&STBIR_FLAG_ALPHA_USES_COLORSPACE))
+                decode_buffer[decode_pixel_index + alpha_channel] = ((const float*)input_data)[input_pixel_index + alpha_channel];
+        }
+
+        break;
+
+    default:
+        STBIR_ASSERT(!"Unknown type/colorspace/channels combination.");
+        break;
+    }
+
+    if (!(stbir_info->flags & STBIR_FLAG_ALPHA_PREMULTIPLIED))
+    {
+        for (x = -stbir_info->horizontal_filter_pixel_margin; x < max_x; x++)
+        {
+            int decode_pixel_index = x * channels;
+
+            // If the alpha value is 0 it will clobber the color values. Make sure it's not.
+            float alpha = decode_buffer[decode_pixel_index + alpha_channel];
+#ifndef STBIR_NO_ALPHA_EPSILON
+            if (stbir_info->type != STBIR_TYPE_FLOAT) {
+                alpha += STBIR_ALPHA_EPSILON;
+                decode_buffer[decode_pixel_index + alpha_channel] = alpha;
+            }
+#endif
+            for (c = 0; c < channels; c++)
+            {
+                if (c == alpha_channel)
+                    continue;
+
+                decode_buffer[decode_pixel_index + c] *= alpha;
+            }
+        }
+    }
+
+    if (edge_horizontal == STBIR_EDGE_ZERO)
+    {
+        for (x = -stbir_info->horizontal_filter_pixel_margin; x < 0; x++)
+        {
+            for (c = 0; c < channels; c++)
+                decode_buffer[x*channels + c] = 0;
+        }
+        for (x = input_w; x < max_x; x++)
+        {
+            for (c = 0; c < channels; c++)
+                decode_buffer[x*channels + c] = 0;
+        }
+    }
+}
+
+static float* stbir__get_ring_buffer_entry(float* ring_buffer, int index, int ring_buffer_length)
+{
+    return &ring_buffer[index * ring_buffer_length];
+}
+
+static float* stbir__add_empty_ring_buffer_entry(stbir__info* stbir_info, int n)
+{
+    int ring_buffer_index;
+    float* ring_buffer;
+
+    stbir_info->ring_buffer_last_scanline = n;
+
+    if (stbir_info->ring_buffer_begin_index < 0)
+    {
+        ring_buffer_index = stbir_info->ring_buffer_begin_index = 0;
+        stbir_info->ring_buffer_first_scanline = n;
+    }
+    else
+    {
+        ring_buffer_index = (stbir_info->ring_buffer_begin_index + (stbir_info->ring_buffer_last_scanline - stbir_info->ring_buffer_first_scanline)) % stbir_info->ring_buffer_num_entries;
+        STBIR_ASSERT(ring_buffer_index != stbir_info->ring_buffer_begin_index);
+    }
+
+    ring_buffer = stbir__get_ring_buffer_entry(stbir_info->ring_buffer, ring_buffer_index, stbir_info->ring_buffer_length_bytes / sizeof(float));
+    memset(ring_buffer, 0, stbir_info->ring_buffer_length_bytes);
+
+    return ring_buffer;
+}
+
+
+static void stbir__resample_horizontal_upsample(stbir__info* stbir_info, float* output_buffer)
+{
+    int x, k;
+    int output_w = stbir_info->output_w;
+    int channels = stbir_info->channels;
+    float* decode_buffer = stbir__get_decode_buffer(stbir_info);
+    stbir__contributors* horizontal_contributors = stbir_info->horizontal_contributors;
+    float* horizontal_coefficients = stbir_info->horizontal_coefficients;
+    int coefficient_width = stbir_info->horizontal_coefficient_width;
+
+    for (x = 0; x < output_w; x++)
+    {
+        int n0 = horizontal_contributors[x].n0;
+        int n1 = horizontal_contributors[x].n1;
+
+        int out_pixel_index = x * channels;
+        int coefficient_group = coefficient_width * x;
+        int coefficient_counter = 0;
+
+        STBIR_ASSERT(n1 >= n0);
+        STBIR_ASSERT(n0 >= -stbir_info->horizontal_filter_pixel_margin);
+        STBIR_ASSERT(n1 >= -stbir_info->horizontal_filter_pixel_margin);
+        STBIR_ASSERT(n0 < stbir_info->input_w + stbir_info->horizontal_filter_pixel_margin);
+        STBIR_ASSERT(n1 < stbir_info->input_w + stbir_info->horizontal_filter_pixel_margin);
+
+        switch (channels) {
+            case 1:
+                for (k = n0; k <= n1; k++)
+                {
+                    int in_pixel_index = k * 1;
+                    float coefficient = horizontal_coefficients[coefficient_group + coefficient_counter++];
+                    STBIR_ASSERT(coefficient != 0);
+                    output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient;
+                }
+                break;
+            case 2:
+                for (k = n0; k <= n1; k++)
+                {
+                    int in_pixel_index = k * 2;
+                    float coefficient = horizontal_coefficients[coefficient_group + coefficient_counter++];
+                    STBIR_ASSERT(coefficient != 0);
+                    output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient;
+                    output_buffer[out_pixel_index + 1] += decode_buffer[in_pixel_index + 1] * coefficient;
+                }
+                break;
+            case 3:
+                for (k = n0; k <= n1; k++)
+                {
+                    int in_pixel_index = k * 3;
+                    float coefficient = horizontal_coefficients[coefficient_group + coefficient_counter++];
+                    STBIR_ASSERT(coefficient != 0);
+                    output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient;
+                    output_buffer[out_pixel_index + 1] += decode_buffer[in_pixel_index + 1] * coefficient;
+                    output_buffer[out_pixel_index + 2] += decode_buffer[in_pixel_index + 2] * coefficient;
+                }
+                break;
+            case 4:
+                for (k = n0; k <= n1; k++)
+                {
+                    int in_pixel_index = k * 4;
+                    float coefficient = horizontal_coefficients[coefficient_group + coefficient_counter++];
+                    STBIR_ASSERT(coefficient != 0);
+                    output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient;
+                    output_buffer[out_pixel_index + 1] += decode_buffer[in_pixel_index + 1] * coefficient;
+                    output_buffer[out_pixel_index + 2] += decode_buffer[in_pixel_index + 2] * coefficient;
+                    output_buffer[out_pixel_index + 3] += decode_buffer[in_pixel_index + 3] * coefficient;
+                }
+                break;
+            default:
+                for (k = n0; k <= n1; k++)
+                {
+                    int in_pixel_index = k * channels;
+                    float coefficient = horizontal_coefficients[coefficient_group + coefficient_counter++];
+                    int c;
+                    STBIR_ASSERT(coefficient != 0);
+                    for (c = 0; c < channels; c++)
+                        output_buffer[out_pixel_index + c] += decode_buffer[in_pixel_index + c] * coefficient;
+                }
+                break;
+        }
+    }
+}
+
+static void stbir__resample_horizontal_downsample(stbir__info* stbir_info, float* output_buffer)
+{
+    int x, k;
+    int input_w = stbir_info->input_w;
+    int channels = stbir_info->channels;
+    float* decode_buffer = stbir__get_decode_buffer(stbir_info);
+    stbir__contributors* horizontal_contributors = stbir_info->horizontal_contributors;
+    float* horizontal_coefficients = stbir_info->horizontal_coefficients;
+    int coefficient_width = stbir_info->horizontal_coefficient_width;
+    int filter_pixel_margin = stbir_info->horizontal_filter_pixel_margin;
+    int max_x = input_w + filter_pixel_margin * 2;
+
+    STBIR_ASSERT(!stbir__use_width_upsampling(stbir_info));
+
+    switch (channels) {
+        case 1:
+            for (x = 0; x < max_x; x++)
+            {
+                int n0 = horizontal_contributors[x].n0;
+                int n1 = horizontal_contributors[x].n1;
+
+                int in_x = x - filter_pixel_margin;
+                int in_pixel_index = in_x * 1;
+                int max_n = n1;
+                int coefficient_group = coefficient_width * x;
+
+                for (k = n0; k <= max_n; k++)
+                {
+                    int out_pixel_index = k * 1;
+                    float coefficient = horizontal_coefficients[coefficient_group + k - n0];
+                    output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient;
+                }
+            }
+            break;
+
+        case 2:
+            for (x = 0; x < max_x; x++)
+            {
+                int n0 = horizontal_contributors[x].n0;
+                int n1 = horizontal_contributors[x].n1;
+
+                int in_x = x - filter_pixel_margin;
+                int in_pixel_index = in_x * 2;
+                int max_n = n1;
+                int coefficient_group = coefficient_width * x;
+
+                for (k = n0; k <= max_n; k++)
+                {
+                    int out_pixel_index = k * 2;
+                    float coefficient = horizontal_coefficients[coefficient_group + k - n0];
+                    output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient;
+                    output_buffer[out_pixel_index + 1] += decode_buffer[in_pixel_index + 1] * coefficient;
+                }
+            }
+            break;
+
+        case 3:
+            for (x = 0; x < max_x; x++)
+            {
+                int n0 = horizontal_contributors[x].n0;
+                int n1 = horizontal_contributors[x].n1;
+
+                int in_x = x - filter_pixel_margin;
+                int in_pixel_index = in_x * 3;
+                int max_n = n1;
+                int coefficient_group = coefficient_width * x;
+
+                for (k = n0; k <= max_n; k++)
+                {
+                    int out_pixel_index = k * 3;
+                    float coefficient = horizontal_coefficients[coefficient_group + k - n0];
+                    output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient;
+                    output_buffer[out_pixel_index + 1] += decode_buffer[in_pixel_index + 1] * coefficient;
+                    output_buffer[out_pixel_index + 2] += decode_buffer[in_pixel_index + 2] * coefficient;
+                }
+            }
+            break;
+
+        case 4:
+            for (x = 0; x < max_x; x++)
+            {
+                int n0 = horizontal_contributors[x].n0;
+                int n1 = horizontal_contributors[x].n1;
+
+                int in_x = x - filter_pixel_margin;
+                int in_pixel_index = in_x * 4;
+                int max_n = n1;
+                int coefficient_group = coefficient_width * x;
+
+                for (k = n0; k <= max_n; k++)
+                {
+                    int out_pixel_index = k * 4;
+                    float coefficient = horizontal_coefficients[coefficient_group + k - n0];
+                    output_buffer[out_pixel_index + 0] += decode_buffer[in_pixel_index + 0] * coefficient;
+                    output_buffer[out_pixel_index + 1] += decode_buffer[in_pixel_index + 1] * coefficient;
+                    output_buffer[out_pixel_index + 2] += decode_buffer[in_pixel_index + 2] * coefficient;
+                    output_buffer[out_pixel_index + 3] += decode_buffer[in_pixel_index + 3] * coefficient;
+                }
+            }
+            break;
+
+        default:
+            for (x = 0; x < max_x; x++)
+            {
+                int n0 = horizontal_contributors[x].n0;
+                int n1 = horizontal_contributors[x].n1;
+
+                int in_x = x - filter_pixel_margin;
+                int in_pixel_index = in_x * channels;
+                int max_n = n1;
+                int coefficient_group = coefficient_width * x;
+
+                for (k = n0; k <= max_n; k++)
+                {
+                    int c;
+                    int out_pixel_index = k * channels;
+                    float coefficient = horizontal_coefficients[coefficient_group + k - n0];
+                    for (c = 0; c < channels; c++)
+                        output_buffer[out_pixel_index + c] += decode_buffer[in_pixel_index + c] * coefficient;
+                }
+            }
+            break;
+    }
+}
+
+static void stbir__decode_and_resample_upsample(stbir__info* stbir_info, int n)
+{
+    // Decode the nth scanline from the source image into the decode buffer.
+    stbir__decode_scanline(stbir_info, n);
+
+    // Now resample it into the ring buffer.
+    if (stbir__use_width_upsampling(stbir_info))
+        stbir__resample_horizontal_upsample(stbir_info, stbir__add_empty_ring_buffer_entry(stbir_info, n));
+    else
+        stbir__resample_horizontal_downsample(stbir_info, stbir__add_empty_ring_buffer_entry(stbir_info, n));
+
+    // Now it's sitting in the ring buffer ready to be used as source for the vertical sampling.
+}
+
+static void stbir__decode_and_resample_downsample(stbir__info* stbir_info, int n)
+{
+    // Decode the nth scanline from the source image into the decode buffer.
+    stbir__decode_scanline(stbir_info, n);
+
+    memset(stbir_info->horizontal_buffer, 0, stbir_info->output_w * stbir_info->channels * sizeof(float));
+
+    // Now resample it into the horizontal buffer.
+    if (stbir__use_width_upsampling(stbir_info))
+        stbir__resample_horizontal_upsample(stbir_info, stbir_info->horizontal_buffer);
+    else
+        stbir__resample_horizontal_downsample(stbir_info, stbir_info->horizontal_buffer);
+
+    // Now it's sitting in the horizontal buffer ready to be distributed into the ring buffers.
+}
+
+// Get the specified scan line from the ring buffer.
+static float* stbir__get_ring_buffer_scanline(int get_scanline, float* ring_buffer, int begin_index, int first_scanline, int ring_buffer_num_entries, int ring_buffer_length)
+{
+    int ring_buffer_index = (begin_index + (get_scanline - first_scanline)) % ring_buffer_num_entries;
+    return stbir__get_ring_buffer_entry(ring_buffer, ring_buffer_index, ring_buffer_length);
+}
+
+
+static void stbir__encode_scanline(stbir__info* stbir_info, int num_pixels, void *output_buffer, float *encode_buffer, int channels, int alpha_channel, int decode)
+{
+    int x;
+    int n;
+    int num_nonalpha;
+    stbir_uint16 nonalpha[STBIR_MAX_CHANNELS];
+
+    if (!(stbir_info->flags&STBIR_FLAG_ALPHA_PREMULTIPLIED))
+    {
+        for (x=0; x < num_pixels; ++x)
+        {
+            int pixel_index = x*channels;
+
+            float alpha = encode_buffer[pixel_index + alpha_channel];
+            float reciprocal_alpha = alpha ? 1.0f / alpha : 0;
+
+            // unrolling this produced a 1% slowdown upscaling a large RGBA linear-space image on my machine - stb
+            for (n = 0; n < channels; n++)
+                if (n != alpha_channel)
+                    encode_buffer[pixel_index + n] *= reciprocal_alpha;
+
+            // We added in a small epsilon to prevent the color channel from being deleted with zero alpha.
+            // Because we only add it for integer types, it will automatically be discarded on integer
+            // conversion, so we don't need to subtract it back out (which would be problematic for
+            // numeric precision reasons).
+        }
+    }
+
+    // build a table of all channels that need colorspace correction, so
+    // we don't perform colorspace correction on channels that don't need it.
+    for (x = 0, num_nonalpha = 0; x < channels; ++x)
+    {
+        if (x != alpha_channel || (stbir_info->flags & STBIR_FLAG_ALPHA_USES_COLORSPACE))
+        {
+            nonalpha[num_nonalpha++] = (stbir_uint16)x;
+        }
+    }
+
+    #define STBIR__ROUND_INT(f)    ((int)          ((f)+0.5))
+    #define STBIR__ROUND_UINT(f)   ((stbir_uint32) ((f)+0.5))
+
+    #ifdef STBIR__SATURATE_INT
+    #define STBIR__ENCODE_LINEAR8(f)   stbir__saturate8 (STBIR__ROUND_INT((f) * stbir__max_uint8_as_float ))
+    #define STBIR__ENCODE_LINEAR16(f)  stbir__saturate16(STBIR__ROUND_INT((f) * stbir__max_uint16_as_float))
+    #else
+    #define STBIR__ENCODE_LINEAR8(f)   (unsigned char ) STBIR__ROUND_INT(stbir__saturate(f) * stbir__max_uint8_as_float )
+    #define STBIR__ENCODE_LINEAR16(f)  (unsigned short) STBIR__ROUND_INT(stbir__saturate(f) * stbir__max_uint16_as_float)
+    #endif
+
+    switch (decode)
+    {
+        case STBIR__DECODE(STBIR_TYPE_UINT8, STBIR_COLORSPACE_LINEAR):
+            for (x=0; x < num_pixels; ++x)
+            {
+                int pixel_index = x*channels;
+
+                for (n = 0; n < channels; n++)
+                {
+                    int index = pixel_index + n;
+                    ((unsigned char*)output_buffer)[index] = STBIR__ENCODE_LINEAR8(encode_buffer[index]);
+                }
+            }
+            break;
+
+        case STBIR__DECODE(STBIR_TYPE_UINT8, STBIR_COLORSPACE_SRGB):
+            for (x=0; x < num_pixels; ++x)
+            {
+                int pixel_index = x*channels;
+
+                for (n = 0; n < num_nonalpha; n++)
+                {
+                    int index = pixel_index + nonalpha[n];
+                    ((unsigned char*)output_buffer)[index] = stbir__linear_to_srgb_uchar(encode_buffer[index]);
+                }
+
+                if (!(stbir_info->flags & STBIR_FLAG_ALPHA_USES_COLORSPACE))
+                    ((unsigned char *)output_buffer)[pixel_index + alpha_channel] = STBIR__ENCODE_LINEAR8(encode_buffer[pixel_index+alpha_channel]);
+            }
+            break;
+
+        case STBIR__DECODE(STBIR_TYPE_UINT16, STBIR_COLORSPACE_LINEAR):
+            for (x=0; x < num_pixels; ++x)
+            {
+                int pixel_index = x*channels;
+
+                for (n = 0; n < channels; n++)
+                {
+                    int index = pixel_index + n;
+                    ((unsigned short*)output_buffer)[index] = STBIR__ENCODE_LINEAR16(encode_buffer[index]);
+                }
+            }
+            break;
+
+        case STBIR__DECODE(STBIR_TYPE_UINT16, STBIR_COLORSPACE_SRGB):
+            for (x=0; x < num_pixels; ++x)
+            {
+                int pixel_index = x*channels;
+
+                for (n = 0; n < num_nonalpha; n++)
+                {
+                    int index = pixel_index + nonalpha[n];
+                    ((unsigned short*)output_buffer)[index] = (unsigned short)STBIR__ROUND_INT(stbir__linear_to_srgb(stbir__saturate(encode_buffer[index])) * stbir__max_uint16_as_float);
+                }
+
+                if (!(stbir_info->flags&STBIR_FLAG_ALPHA_USES_COLORSPACE))
+                    ((unsigned short*)output_buffer)[pixel_index + alpha_channel] = STBIR__ENCODE_LINEAR16(encode_buffer[pixel_index + alpha_channel]);
+            }
+
+            break;
+
+        case STBIR__DECODE(STBIR_TYPE_UINT32, STBIR_COLORSPACE_LINEAR):
+            for (x=0; x < num_pixels; ++x)
+            {
+                int pixel_index = x*channels;
+
+                for (n = 0; n < channels; n++)
+                {
+                    int index = pixel_index + n;
+                    ((unsigned int*)output_buffer)[index] = (unsigned int)STBIR__ROUND_UINT(((double)stbir__saturate(encode_buffer[index])) * stbir__max_uint32_as_float);
+                }
+            }
+            break;
+
+        case STBIR__DECODE(STBIR_TYPE_UINT32, STBIR_COLORSPACE_SRGB):
+            for (x=0; x < num_pixels; ++x)
+            {
+                int pixel_index = x*channels;
+
+                for (n = 0; n < num_nonalpha; n++)
+                {
+                    int index = pixel_index + nonalpha[n];
+                    ((unsigned int*)output_buffer)[index] = (unsigned int)STBIR__ROUND_UINT(((double)stbir__linear_to_srgb(stbir__saturate(encode_buffer[index]))) * stbir__max_uint32_as_float);
+                }
+
+                if (!(stbir_info->flags&STBIR_FLAG_ALPHA_USES_COLORSPACE))
+                    ((unsigned int*)output_buffer)[pixel_index + alpha_channel] = (unsigned int)STBIR__ROUND_INT(((double)stbir__saturate(encode_buffer[pixel_index + alpha_channel])) * stbir__max_uint32_as_float);
+            }
+            break;
+
+        case STBIR__DECODE(STBIR_TYPE_FLOAT, STBIR_COLORSPACE_LINEAR):
+            for (x=0; x < num_pixels; ++x)
+            {
+                int pixel_index = x*channels;
+
+                for (n = 0; n < channels; n++)
+                {
+                    int index = pixel_index + n;
+                    ((float*)output_buffer)[index] = encode_buffer[index];
+                }
+            }
+            break;
+
+        case STBIR__DECODE(STBIR_TYPE_FLOAT, STBIR_COLORSPACE_SRGB):
+            for (x=0; x < num_pixels; ++x)
+            {
+                int pixel_index = x*channels;
+
+                for (n = 0; n < num_nonalpha; n++)
+                {
+                    int index = pixel_index + nonalpha[n];
+                    ((float*)output_buffer)[index] = stbir__linear_to_srgb(encode_buffer[index]);
+                }
+
+                if (!(stbir_info->flags&STBIR_FLAG_ALPHA_USES_COLORSPACE))
+                    ((float*)output_buffer)[pixel_index + alpha_channel] = encode_buffer[pixel_index + alpha_channel];
+            }
+            break;
+
+        default:
+            STBIR_ASSERT(!"Unknown type/colorspace/channels combination.");
+            break;
+    }
+}
+
+static void stbir__resample_vertical_upsample(stbir__info* stbir_info, int n)
+{
+    int x, k;
+    int output_w = stbir_info->output_w;
+    stbir__contributors* vertical_contributors = stbir_info->vertical_contributors;
+    float* vertical_coefficients = stbir_info->vertical_coefficients;
+    int channels = stbir_info->channels;
+    int alpha_channel = stbir_info->alpha_channel;
+    int type = stbir_info->type;
+    int colorspace = stbir_info->colorspace;
+    int ring_buffer_entries = stbir_info->ring_buffer_num_entries;
+    void* output_data = stbir_info->output_data;
+    float* encode_buffer = stbir_info->encode_buffer;
+    int decode = STBIR__DECODE(type, colorspace);
+    int coefficient_width = stbir_info->vertical_coefficient_width;
+    int coefficient_counter;
+    int contributor = n;
+
+    float* ring_buffer = stbir_info->ring_buffer;
+    int ring_buffer_begin_index = stbir_info->ring_buffer_begin_index;
+    int ring_buffer_first_scanline = stbir_info->ring_buffer_first_scanline;
+    int ring_buffer_length = stbir_info->ring_buffer_length_bytes/sizeof(float);
+
+    int n0,n1, output_row_start;
+    int coefficient_group = coefficient_width * contributor;
+
+    n0 = vertical_contributors[contributor].n0;
+    n1 = vertical_contributors[contributor].n1;
+
+    output_row_start = n * stbir_info->output_stride_bytes;
+
+    STBIR_ASSERT(stbir__use_height_upsampling(stbir_info));
+
+    memset(encode_buffer, 0, output_w * sizeof(float) * channels);
+
+    // I tried reblocking this for better cache usage of encode_buffer
+    // (using x_outer, k, x_inner), but it lost speed. -- stb
+
+    coefficient_counter = 0;
+    switch (channels) {
+        case 1:
+            for (k = n0; k <= n1; k++)
+            {
+                int coefficient_index = coefficient_counter++;
+                float* ring_buffer_entry = stbir__get_ring_buffer_scanline(k, ring_buffer, ring_buffer_begin_index, ring_buffer_first_scanline, ring_buffer_entries, ring_buffer_length);
+                float coefficient = vertical_coefficients[coefficient_group + coefficient_index];
+                for (x = 0; x < output_w; ++x)
+                {
+                    int in_pixel_index = x * 1;
+                    encode_buffer[in_pixel_index + 0] += ring_buffer_entry[in_pixel_index + 0] * coefficient;
+                }
+            }
+            break;
+        case 2:
+            for (k = n0; k <= n1; k++)
+            {
+                int coefficient_index = coefficient_counter++;
+                float* ring_buffer_entry = stbir__get_ring_buffer_scanline(k, ring_buffer, ring_buffer_begin_index, ring_buffer_first_scanline, ring_buffer_entries, ring_buffer_length);
+                float coefficient = vertical_coefficients[coefficient_group + coefficient_index];
+                for (x = 0; x < output_w; ++x)
+                {
+                    int in_pixel_index = x * 2;
+                    encode_buffer[in_pixel_index + 0] += ring_buffer_entry[in_pixel_index + 0] * coefficient;
+                    encode_buffer[in_pixel_index + 1] += ring_buffer_entry[in_pixel_index + 1] * coefficient;
+                }
+            }
+            break;
+        case 3:
+            for (k = n0; k <= n1; k++)
+            {
+                int coefficient_index = coefficient_counter++;
+                float* ring_buffer_entry = stbir__get_ring_buffer_scanline(k, ring_buffer, ring_buffer_begin_index, ring_buffer_first_scanline, ring_buffer_entries, ring_buffer_length);
+                float coefficient = vertical_coefficients[coefficient_group + coefficient_index];
+                for (x = 0; x < output_w; ++x)
+                {
+                    int in_pixel_index = x * 3;
+                    encode_buffer[in_pixel_index + 0] += ring_buffer_entry[in_pixel_index + 0] * coefficient;
+                    encode_buffer[in_pixel_index + 1] += ring_buffer_entry[in_pixel_index + 1] * coefficient;
+                    encode_buffer[in_pixel_index + 2] += ring_buffer_entry[in_pixel_index + 2] * coefficient;
+                }
+            }
+            break;
+        case 4:
+            for (k = n0; k <= n1; k++)
+            {
+                int coefficient_index = coefficient_counter++;
+                float* ring_buffer_entry = stbir__get_ring_buffer_scanline(k, ring_buffer, ring_buffer_begin_index, ring_buffer_first_scanline, ring_buffer_entries, ring_buffer_length);
+                float coefficient = vertical_coefficients[coefficient_group + coefficient_index];
+                for (x = 0; x < output_w; ++x)
+                {
+                    int in_pixel_index = x * 4;
+                    encode_buffer[in_pixel_index + 0] += ring_buffer_entry[in_pixel_index + 0] * coefficient;
+                    encode_buffer[in_pixel_index + 1] += ring_buffer_entry[in_pixel_index + 1] * coefficient;
+                    encode_buffer[in_pixel_index + 2] += ring_buffer_entry[in_pixel_index + 2] * coefficient;
+                    encode_buffer[in_pixel_index + 3] += ring_buffer_entry[in_pixel_index + 3] * coefficient;
+                }
+            }
+            break;
+        default:
+            for (k = n0; k <= n1; k++)
+            {
+                int coefficient_index = coefficient_counter++;
+                float* ring_buffer_entry = stbir__get_ring_buffer_scanline(k, ring_buffer, ring_buffer_begin_index, ring_buffer_first_scanline, ring_buffer_entries, ring_buffer_length);
+                float coefficient = vertical_coefficients[coefficient_group + coefficient_index];
+                for (x = 0; x < output_w; ++x)
+                {
+                    int in_pixel_index = x * channels;
+                    int c;
+                    for (c = 0; c < channels; c++)
+                        encode_buffer[in_pixel_index + c] += ring_buffer_entry[in_pixel_index + c] * coefficient;
+                }
+            }
+            break;
+    }
+    stbir__encode_scanline(stbir_info, output_w, (char *) output_data + output_row_start, encode_buffer, channels, alpha_channel, decode);
+}
+
+static void stbir__resample_vertical_downsample(stbir__info* stbir_info, int n)
+{
+    int x, k;
+    int output_w = stbir_info->output_w;
+    stbir__contributors* vertical_contributors = stbir_info->vertical_contributors;
+    float* vertical_coefficients = stbir_info->vertical_coefficients;
+    int channels = stbir_info->channels;
+    int ring_buffer_entries = stbir_info->ring_buffer_num_entries;
+    float* horizontal_buffer = stbir_info->horizontal_buffer;
+    int coefficient_width = stbir_info->vertical_coefficient_width;
+    int contributor = n + stbir_info->vertical_filter_pixel_margin;
+
+    float* ring_buffer = stbir_info->ring_buffer;
+    int ring_buffer_begin_index = stbir_info->ring_buffer_begin_index;
+    int ring_buffer_first_scanline = stbir_info->ring_buffer_first_scanline;
+    int ring_buffer_length = stbir_info->ring_buffer_length_bytes/sizeof(float);
+    int n0,n1;
+
+    n0 = vertical_contributors[contributor].n0;
+    n1 = vertical_contributors[contributor].n1;
+
+    STBIR_ASSERT(!stbir__use_height_upsampling(stbir_info));
+
+    for (k = n0; k <= n1; k++)
+    {
+        int coefficient_index = k - n0;
+        int coefficient_group = coefficient_width * contributor;
+        float coefficient = vertical_coefficients[coefficient_group + coefficient_index];
+
+        float* ring_buffer_entry = stbir__get_ring_buffer_scanline(k, ring_buffer, ring_buffer_begin_index, ring_buffer_first_scanline, ring_buffer_entries, ring_buffer_length);
+
+        switch (channels) {
+            case 1:
+                for (x = 0; x < output_w; x++)
+                {
+                    int in_pixel_index = x * 1;
+                    ring_buffer_entry[in_pixel_index + 0] += horizontal_buffer[in_pixel_index + 0] * coefficient;
+                }
+                break;
+            case 2:
+                for (x = 0; x < output_w; x++)
+                {
+                    int in_pixel_index = x * 2;
+                    ring_buffer_entry[in_pixel_index + 0] += horizontal_buffer[in_pixel_index + 0] * coefficient;
+                    ring_buffer_entry[in_pixel_index + 1] += horizontal_buffer[in_pixel_index + 1] * coefficient;
+                }
+                break;
+            case 3:
+                for (x = 0; x < output_w; x++)
+                {
+                    int in_pixel_index = x * 3;
+                    ring_buffer_entry[in_pixel_index + 0] += horizontal_buffer[in_pixel_index + 0] * coefficient;
+                    ring_buffer_entry[in_pixel_index + 1] += horizontal_buffer[in_pixel_index + 1] * coefficient;
+                    ring_buffer_entry[in_pixel_index + 2] += horizontal_buffer[in_pixel_index + 2] * coefficient;
+                }
+                break;
+            case 4:
+                for (x = 0; x < output_w; x++)
+                {
+                    int in_pixel_index = x * 4;
+                    ring_buffer_entry[in_pixel_index + 0] += horizontal_buffer[in_pixel_index + 0] * coefficient;
+                    ring_buffer_entry[in_pixel_index + 1] += horizontal_buffer[in_pixel_index + 1] * coefficient;
+                    ring_buffer_entry[in_pixel_index + 2] += horizontal_buffer[in_pixel_index + 2] * coefficient;
+                    ring_buffer_entry[in_pixel_index + 3] += horizontal_buffer[in_pixel_index + 3] * coefficient;
+                }
+                break;
+            default:
+                for (x = 0; x < output_w; x++)
+                {
+                    int in_pixel_index = x * channels;
+
+                    int c;
+                    for (c = 0; c < channels; c++)
+                        ring_buffer_entry[in_pixel_index + c] += horizontal_buffer[in_pixel_index + c] * coefficient;
+                }
+                break;
+        }
+    }
+}
+
+static void stbir__buffer_loop_upsample(stbir__info* stbir_info)
+{
+    int y;
+    float scale_ratio = stbir_info->vertical_scale;
+    float out_scanlines_radius = stbir__filter_info_table[stbir_info->vertical_filter].support(1/scale_ratio) * scale_ratio;
+
+    STBIR_ASSERT(stbir__use_height_upsampling(stbir_info));
+
+    for (y = 0; y < stbir_info->output_h; y++)
+    {
+        float in_center_of_out = 0; // Center of the current out scanline in the in scanline space
+        int in_first_scanline = 0, in_last_scanline = 0;
+
+        stbir__calculate_sample_range_upsample(y, out_scanlines_radius, scale_ratio, stbir_info->vertical_shift, &in_first_scanline, &in_last_scanline, &in_center_of_out);
+
+        STBIR_ASSERT(in_last_scanline - in_first_scanline + 1 <= stbir_info->ring_buffer_num_entries);
+
+        if (stbir_info->ring_buffer_begin_index >= 0)
+        {
+            // Get rid of whatever we don't need anymore.
+            while (in_first_scanline > stbir_info->ring_buffer_first_scanline)
+            {
+                if (stbir_info->ring_buffer_first_scanline == stbir_info->ring_buffer_last_scanline)
+                {
+                    // We just popped the last scanline off the ring buffer.
+                    // Reset it to the empty state.
+                    stbir_info->ring_buffer_begin_index = -1;
+                    stbir_info->ring_buffer_first_scanline = 0;
+                    stbir_info->ring_buffer_last_scanline = 0;
+                    break;
+                }
+                else
+                {
+                    stbir_info->ring_buffer_first_scanline++;
+                    stbir_info->ring_buffer_begin_index = (stbir_info->ring_buffer_begin_index + 1) % stbir_info->ring_buffer_num_entries;
+                }
+            }
+        }
+
+        // Load in new ones.
+        if (stbir_info->ring_buffer_begin_index < 0)
+            stbir__decode_and_resample_upsample(stbir_info, in_first_scanline);
+
+        while (in_last_scanline > stbir_info->ring_buffer_last_scanline)
+            stbir__decode_and_resample_upsample(stbir_info, stbir_info->ring_buffer_last_scanline + 1);
+
+        // Now all buffers should be ready to write a row of vertical sampling.
+        stbir__resample_vertical_upsample(stbir_info, y);
+
+        STBIR_PROGRESS_REPORT((float)y / stbir_info->output_h);
+    }
+}
+
+static void stbir__empty_ring_buffer(stbir__info* stbir_info, int first_necessary_scanline)
+{
+    int output_stride_bytes = stbir_info->output_stride_bytes;
+    int channels = stbir_info->channels;
+    int alpha_channel = stbir_info->alpha_channel;
+    int type = stbir_info->type;
+    int colorspace = stbir_info->colorspace;
+    int output_w = stbir_info->output_w;
+    void* output_data = stbir_info->output_data;
+    int decode = STBIR__DECODE(type, colorspace);
+
+    float* ring_buffer = stbir_info->ring_buffer;
+    int ring_buffer_length = stbir_info->ring_buffer_length_bytes/sizeof(float);
+
+    if (stbir_info->ring_buffer_begin_index >= 0)
+    {
+        // Get rid of whatever we don't need anymore.
+        while (first_necessary_scanline > stbir_info->ring_buffer_first_scanline)
+        {
+            if (stbir_info->ring_buffer_first_scanline >= 0 && stbir_info->ring_buffer_first_scanline < stbir_info->output_h)
+            {
+                int output_row_start = stbir_info->ring_buffer_first_scanline * output_stride_bytes;
+                float* ring_buffer_entry = stbir__get_ring_buffer_entry(ring_buffer, stbir_info->ring_buffer_begin_index, ring_buffer_length);
+                stbir__encode_scanline(stbir_info, output_w, (char *) output_data + output_row_start, ring_buffer_entry, channels, alpha_channel, decode);
+                STBIR_PROGRESS_REPORT((float)stbir_info->ring_buffer_first_scanline / stbir_info->output_h);
+            }
+
+            if (stbir_info->ring_buffer_first_scanline == stbir_info->ring_buffer_last_scanline)
+            {
+                // We just popped the last scanline off the ring buffer.
+                // Reset it to the empty state.
+                stbir_info->ring_buffer_begin_index = -1;
+                stbir_info->ring_buffer_first_scanline = 0;
+                stbir_info->ring_buffer_last_scanline = 0;
+                break;
+            }
+            else
+            {
+                stbir_info->ring_buffer_first_scanline++;
+                stbir_info->ring_buffer_begin_index = (stbir_info->ring_buffer_begin_index + 1) % stbir_info->ring_buffer_num_entries;
+            }
+        }
+    }
+}
+
+static void stbir__buffer_loop_downsample(stbir__info* stbir_info)
+{
+    int y;
+    float scale_ratio = stbir_info->vertical_scale;
+    int output_h = stbir_info->output_h;
+    float in_pixels_radius = stbir__filter_info_table[stbir_info->vertical_filter].support(scale_ratio) / scale_ratio;
+    int pixel_margin = stbir_info->vertical_filter_pixel_margin;
+    int max_y = stbir_info->input_h + pixel_margin;
+
+    STBIR_ASSERT(!stbir__use_height_upsampling(stbir_info));
+
+    for (y = -pixel_margin; y < max_y; y++)
+    {
+        float out_center_of_in; // Center of the current out scanline in the in scanline space
+        int out_first_scanline, out_last_scanline;
+
+        stbir__calculate_sample_range_downsample(y, in_pixels_radius, scale_ratio, stbir_info->vertical_shift, &out_first_scanline, &out_last_scanline, &out_center_of_in);
+
+        STBIR_ASSERT(out_last_scanline - out_first_scanline + 1 <= stbir_info->ring_buffer_num_entries);
+
+        if (out_last_scanline < 0 || out_first_scanline >= output_h)
+            continue;
+
+        stbir__empty_ring_buffer(stbir_info, out_first_scanline);
+
+        stbir__decode_and_resample_downsample(stbir_info, y);
+
+        // Load in new ones.
+        if (stbir_info->ring_buffer_begin_index < 0)
+            stbir__add_empty_ring_buffer_entry(stbir_info, out_first_scanline);
+
+        while (out_last_scanline > stbir_info->ring_buffer_last_scanline)
+            stbir__add_empty_ring_buffer_entry(stbir_info, stbir_info->ring_buffer_last_scanline + 1);
+
+        // Now the horizontal buffer is ready to write to all ring buffer rows.
+        stbir__resample_vertical_downsample(stbir_info, y);
+    }
+
+    stbir__empty_ring_buffer(stbir_info, stbir_info->output_h);
+}
+
+static void stbir__setup(stbir__info *info, int input_w, int input_h, int output_w, int output_h, int channels)
+{
+    info->input_w = input_w;
+    info->input_h = input_h;
+    info->output_w = output_w;
+    info->output_h = output_h;
+    info->channels = channels;
+}
+
+static void stbir__calculate_transform(stbir__info *info, float s0, float t0, float s1, float t1, float *transform)
+{
+    info->s0 = s0;
+    info->t0 = t0;
+    info->s1 = s1;
+    info->t1 = t1;
+
+    if (transform)
+    {
+        info->horizontal_scale = transform[0];
+        info->vertical_scale   = transform[1];
+        info->horizontal_shift = transform[2];
+        info->vertical_shift   = transform[3];
+    }
+    else
+    {
+        info->horizontal_scale = ((float)info->output_w / info->input_w) / (s1 - s0);
+        info->vertical_scale = ((float)info->output_h / info->input_h) / (t1 - t0);
+
+        info->horizontal_shift = s0 * info->output_w / (s1 - s0);
+        info->vertical_shift = t0 * info->output_h / (t1 - t0);
+    }
+}
+
+static void stbir__choose_filter(stbir__info *info, stbir_filter h_filter, stbir_filter v_filter)
+{
+    if (h_filter == 0)
+        h_filter = stbir__use_upsampling(info->horizontal_scale) ? STBIR_DEFAULT_FILTER_UPSAMPLE : STBIR_DEFAULT_FILTER_DOWNSAMPLE;
+    if (v_filter == 0)
+        v_filter = stbir__use_upsampling(info->vertical_scale)   ? STBIR_DEFAULT_FILTER_UPSAMPLE : STBIR_DEFAULT_FILTER_DOWNSAMPLE;
+    info->horizontal_filter = h_filter;
+    info->vertical_filter = v_filter;
+}
+
+static stbir_uint32 stbir__calculate_memory(stbir__info *info)
+{
+    int pixel_margin = stbir__get_filter_pixel_margin(info->horizontal_filter, info->horizontal_scale);
+    int filter_height = stbir__get_filter_pixel_width(info->vertical_filter, info->vertical_scale);
+
+    info->horizontal_num_contributors = stbir__get_contributors(info->horizontal_scale, info->horizontal_filter, info->input_w, info->output_w);
+    info->vertical_num_contributors   = stbir__get_contributors(info->vertical_scale  , info->vertical_filter  , info->input_h, info->output_h);
+
+    // One extra entry because floating point precision problems sometimes cause an extra to be necessary.
+    info->ring_buffer_num_entries = filter_height + 1;
+
+    info->horizontal_contributors_size = info->horizontal_num_contributors * sizeof(stbir__contributors);
+    info->horizontal_coefficients_size = stbir__get_total_horizontal_coefficients(info) * sizeof(float);
+    info->vertical_contributors_size = info->vertical_num_contributors * sizeof(stbir__contributors);
+    info->vertical_coefficients_size = stbir__get_total_vertical_coefficients(info) * sizeof(float);
+    info->decode_buffer_size = (info->input_w + pixel_margin * 2) * info->channels * sizeof(float);
+    info->horizontal_buffer_size = info->output_w * info->channels * sizeof(float);
+    info->ring_buffer_size = info->output_w * info->channels * info->ring_buffer_num_entries * sizeof(float);
+    info->encode_buffer_size = info->output_w * info->channels * sizeof(float);
+
+    STBIR_ASSERT(info->horizontal_filter != 0);
+    STBIR_ASSERT(info->horizontal_filter < STBIR__ARRAY_SIZE(stbir__filter_info_table)); // this now happens too late
+    STBIR_ASSERT(info->vertical_filter != 0);
+    STBIR_ASSERT(info->vertical_filter < STBIR__ARRAY_SIZE(stbir__filter_info_table)); // this now happens too late
+
+    if (stbir__use_height_upsampling(info))
+        // The horizontal buffer is for when we're downsampling the height and we
+        // can't output the result of sampling the decode buffer directly into the
+        // ring buffers.
+        info->horizontal_buffer_size = 0;
+    else
+        // The encode buffer is to retain precision in the height upsampling method
+        // and isn't used when height downsampling.
+        info->encode_buffer_size = 0;
+
+    return info->horizontal_contributors_size + info->horizontal_coefficients_size
+        + info->vertical_contributors_size + info->vertical_coefficients_size
+        + info->decode_buffer_size + info->horizontal_buffer_size
+        + info->ring_buffer_size + info->encode_buffer_size;
+}
+
+static int stbir__resize_allocated(stbir__info *info,
+    const void* input_data, int input_stride_in_bytes,
+    void* output_data, int output_stride_in_bytes,
+    int alpha_channel, stbir_uint32 flags, stbir_datatype type,
+    stbir_edge edge_horizontal, stbir_edge edge_vertical, stbir_colorspace colorspace,
+    void* tempmem, size_t tempmem_size_in_bytes)
+{
+    size_t memory_required = stbir__calculate_memory(info);
+
+    int width_stride_input = input_stride_in_bytes ? input_stride_in_bytes : info->channels * info->input_w * stbir__type_size[type];
+    int width_stride_output = output_stride_in_bytes ? output_stride_in_bytes : info->channels * info->output_w * stbir__type_size[type];
+
+#ifdef STBIR_DEBUG_OVERWRITE_TEST
+#define OVERWRITE_ARRAY_SIZE 8
+    unsigned char overwrite_output_before_pre[OVERWRITE_ARRAY_SIZE];
+    unsigned char overwrite_tempmem_before_pre[OVERWRITE_ARRAY_SIZE];
+    unsigned char overwrite_output_after_pre[OVERWRITE_ARRAY_SIZE];
+    unsigned char overwrite_tempmem_after_pre[OVERWRITE_ARRAY_SIZE];
+
+    size_t begin_forbidden = width_stride_output * (info->output_h - 1) + info->output_w * info->channels * stbir__type_size[type];
+    memcpy(overwrite_output_before_pre, &((unsigned char*)output_data)[-OVERWRITE_ARRAY_SIZE], OVERWRITE_ARRAY_SIZE);
+    memcpy(overwrite_output_after_pre, &((unsigned char*)output_data)[begin_forbidden], OVERWRITE_ARRAY_SIZE);
+    memcpy(overwrite_tempmem_before_pre, &((unsigned char*)tempmem)[-OVERWRITE_ARRAY_SIZE], OVERWRITE_ARRAY_SIZE);
+    memcpy(overwrite_tempmem_after_pre, &((unsigned char*)tempmem)[tempmem_size_in_bytes], OVERWRITE_ARRAY_SIZE);
+#endif
+
+    STBIR_ASSERT(info->channels >= 0);
+    STBIR_ASSERT(info->channels <= STBIR_MAX_CHANNELS);
+
+    if (info->channels < 0 || info->channels > STBIR_MAX_CHANNELS)
+        return 0;
+
+    STBIR_ASSERT(info->horizontal_filter < STBIR__ARRAY_SIZE(stbir__filter_info_table));
+    STBIR_ASSERT(info->vertical_filter < STBIR__ARRAY_SIZE(stbir__filter_info_table));
+
+    if (info->horizontal_filter >= STBIR__ARRAY_SIZE(stbir__filter_info_table))
+        return 0;
+    if (info->vertical_filter >= STBIR__ARRAY_SIZE(stbir__filter_info_table))
+        return 0;
+
+    if (alpha_channel < 0)
+        flags |= STBIR_FLAG_ALPHA_USES_COLORSPACE | STBIR_FLAG_ALPHA_PREMULTIPLIED;
+
+    if (!(flags&STBIR_FLAG_ALPHA_USES_COLORSPACE) || !(flags&STBIR_FLAG_ALPHA_PREMULTIPLIED)) {
+        STBIR_ASSERT(alpha_channel >= 0 && alpha_channel < info->channels);
+    }
+
+    if (alpha_channel >= info->channels)
+        return 0;
+
+    STBIR_ASSERT(tempmem);
+
+    if (!tempmem)
+        return 0;
+
+    STBIR_ASSERT(tempmem_size_in_bytes >= memory_required);
+
+    if (tempmem_size_in_bytes < memory_required)
+        return 0;
+
+    memset(tempmem, 0, tempmem_size_in_bytes);
+
+    info->input_data = input_data;
+    info->input_stride_bytes = width_stride_input;
+
+    info->output_data = output_data;
+    info->output_stride_bytes = width_stride_output;
+
+    info->alpha_channel = alpha_channel;
+    info->flags = flags;
+    info->type = type;
+    info->edge_horizontal = edge_horizontal;
+    info->edge_vertical = edge_vertical;
+    info->colorspace = colorspace;
+
+    info->horizontal_coefficient_width   = stbir__get_coefficient_width  (info->horizontal_filter, info->horizontal_scale);
+    info->vertical_coefficient_width     = stbir__get_coefficient_width  (info->vertical_filter  , info->vertical_scale  );
+    info->horizontal_filter_pixel_width  = stbir__get_filter_pixel_width (info->horizontal_filter, info->horizontal_scale);
+    info->vertical_filter_pixel_width    = stbir__get_filter_pixel_width (info->vertical_filter  , info->vertical_scale  );
+    info->horizontal_filter_pixel_margin = stbir__get_filter_pixel_margin(info->horizontal_filter, info->horizontal_scale);
+    info->vertical_filter_pixel_margin   = stbir__get_filter_pixel_margin(info->vertical_filter  , info->vertical_scale  );
+
+    info->ring_buffer_length_bytes = info->output_w * info->channels * sizeof(float);
+    info->decode_buffer_pixels = info->input_w + info->horizontal_filter_pixel_margin * 2;
+
+#define STBIR__NEXT_MEMPTR(current, newtype) (newtype*)(((unsigned char*)current) + current##_size)
+
+    info->horizontal_contributors = (stbir__contributors *) tempmem;
+    info->horizontal_coefficients = STBIR__NEXT_MEMPTR(info->horizontal_contributors, float);
+    info->vertical_contributors = STBIR__NEXT_MEMPTR(info->horizontal_coefficients, stbir__contributors);
+    info->vertical_coefficients = STBIR__NEXT_MEMPTR(info->vertical_contributors, float);
+    info->decode_buffer = STBIR__NEXT_MEMPTR(info->vertical_coefficients, float);
+
+    if (stbir__use_height_upsampling(info))
+    {
+        info->horizontal_buffer = NULL;
+        info->ring_buffer = STBIR__NEXT_MEMPTR(info->decode_buffer, float);
+        info->encode_buffer = STBIR__NEXT_MEMPTR(info->ring_buffer, float);
+
+        STBIR_ASSERT((size_t)STBIR__NEXT_MEMPTR(info->encode_buffer, unsigned char) == (size_t)tempmem + tempmem_size_in_bytes);
+    }
+    else
+    {
+        info->horizontal_buffer = STBIR__NEXT_MEMPTR(info->decode_buffer, float);
+        info->ring_buffer = STBIR__NEXT_MEMPTR(info->horizontal_buffer, float);
+        info->encode_buffer = NULL;
+
+        STBIR_ASSERT((size_t)STBIR__NEXT_MEMPTR(info->ring_buffer, unsigned char) == (size_t)tempmem + tempmem_size_in_bytes);
+    }
+
+#undef STBIR__NEXT_MEMPTR
+
+    // This signals that the ring buffer is empty
+    info->ring_buffer_begin_index = -1;
+
+    stbir__calculate_filters(info->horizontal_contributors, info->horizontal_coefficients, info->horizontal_filter, info->horizontal_scale, info->horizontal_shift, info->input_w, info->output_w);
+    stbir__calculate_filters(info->vertical_contributors, info->vertical_coefficients, info->vertical_filter, info->vertical_scale, info->vertical_shift, info->input_h, info->output_h);
+
+    STBIR_PROGRESS_REPORT(0);
+
+    if (stbir__use_height_upsampling(info))
+        stbir__buffer_loop_upsample(info);
+    else
+        stbir__buffer_loop_downsample(info);
+
+    STBIR_PROGRESS_REPORT(1);
+
+#ifdef STBIR_DEBUG_OVERWRITE_TEST
+    STBIR_ASSERT(memcmp(overwrite_output_before_pre, &((unsigned char*)output_data)[-OVERWRITE_ARRAY_SIZE], OVERWRITE_ARRAY_SIZE) == 0);
+    STBIR_ASSERT(memcmp(overwrite_output_after_pre, &((unsigned char*)output_data)[begin_forbidden], OVERWRITE_ARRAY_SIZE) == 0);
+    STBIR_ASSERT(memcmp(overwrite_tempmem_before_pre, &((unsigned char*)tempmem)[-OVERWRITE_ARRAY_SIZE], OVERWRITE_ARRAY_SIZE) == 0);
+    STBIR_ASSERT(memcmp(overwrite_tempmem_after_pre, &((unsigned char*)tempmem)[tempmem_size_in_bytes], OVERWRITE_ARRAY_SIZE) == 0);
+#endif
+
+    return 1;
+}
+
+
+static int stbir__resize_arbitrary(
+    void *alloc_context,
+    const void* input_data, int input_w, int input_h, int input_stride_in_bytes,
+    void* output_data, int output_w, int output_h, int output_stride_in_bytes,
+    float s0, float t0, float s1, float t1, float *transform,
+    int channels, int alpha_channel, stbir_uint32 flags, stbir_datatype type,
+    stbir_filter h_filter, stbir_filter v_filter,
+    stbir_edge edge_horizontal, stbir_edge edge_vertical, stbir_colorspace colorspace)
+{
+    stbir__info info;
+    int result;
+    size_t memory_required;
+    void* extra_memory;
+
+    stbir__setup(&info, input_w, input_h, output_w, output_h, channels);
+    stbir__calculate_transform(&info, s0,t0,s1,t1,transform);
+    stbir__choose_filter(&info, h_filter, v_filter);
+    memory_required = stbir__calculate_memory(&info);
+    extra_memory = STBIR_MALLOC(memory_required, alloc_context);
+
+    if (!extra_memory)
+        return 0;
+
+    result = stbir__resize_allocated(&info, input_data, input_stride_in_bytes,
+                                            output_data, output_stride_in_bytes,
+                                            alpha_channel, flags, type,
+                                            edge_horizontal, edge_vertical,
+                                            colorspace, extra_memory, memory_required);
+
+    STBIR_FREE(extra_memory, alloc_context);
+
+    return result;
+}
+
+STBIRDEF int stbir_resize_uint8(     const unsigned char *input_pixels , int input_w , int input_h , int input_stride_in_bytes,
+                                           unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes,
+                                     int num_channels)
+{
+    return stbir__resize_arbitrary(NULL, input_pixels, input_w, input_h, input_stride_in_bytes,
+        output_pixels, output_w, output_h, output_stride_in_bytes,
+        0,0,1,1,NULL,num_channels,-1,0, STBIR_TYPE_UINT8, STBIR_FILTER_DEFAULT, STBIR_FILTER_DEFAULT,
+        STBIR_EDGE_CLAMP, STBIR_EDGE_CLAMP, STBIR_COLORSPACE_LINEAR);
+}
+
+STBIRDEF int stbir_resize_float(     const float *input_pixels , int input_w , int input_h , int input_stride_in_bytes,
+                                           float *output_pixels, int output_w, int output_h, int output_stride_in_bytes,
+                                     int num_channels)
+{
+    return stbir__resize_arbitrary(NULL, input_pixels, input_w, input_h, input_stride_in_bytes,
+        output_pixels, output_w, output_h, output_stride_in_bytes,
+        0,0,1,1,NULL,num_channels,-1,0, STBIR_TYPE_FLOAT, STBIR_FILTER_DEFAULT, STBIR_FILTER_DEFAULT,
+        STBIR_EDGE_CLAMP, STBIR_EDGE_CLAMP, STBIR_COLORSPACE_LINEAR);
+}
+
+STBIRDEF int stbir_resize_uint8_srgb(const unsigned char *input_pixels , int input_w , int input_h , int input_stride_in_bytes,
+                                           unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes,
+                                     int num_channels, int alpha_channel, int flags)
+{
+    return stbir__resize_arbitrary(NULL, input_pixels, input_w, input_h, input_stride_in_bytes,
+        output_pixels, output_w, output_h, output_stride_in_bytes,
+        0,0,1,1,NULL,num_channels,alpha_channel,flags, STBIR_TYPE_UINT8, STBIR_FILTER_DEFAULT, STBIR_FILTER_DEFAULT,
+        STBIR_EDGE_CLAMP, STBIR_EDGE_CLAMP, STBIR_COLORSPACE_SRGB);
+}
+
+STBIRDEF int stbir_resize_uint8_srgb_edgemode(const unsigned char *input_pixels , int input_w , int input_h , int input_stride_in_bytes,
+                                                    unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes,
+                                              int num_channels, int alpha_channel, int flags,
+                                              stbir_edge edge_wrap_mode)
+{
+    return stbir__resize_arbitrary(NULL, input_pixels, input_w, input_h, input_stride_in_bytes,
+        output_pixels, output_w, output_h, output_stride_in_bytes,
+        0,0,1,1,NULL,num_channels,alpha_channel,flags, STBIR_TYPE_UINT8, STBIR_FILTER_DEFAULT, STBIR_FILTER_DEFAULT,
+        edge_wrap_mode, edge_wrap_mode, STBIR_COLORSPACE_SRGB);
+}
+
+STBIRDEF int stbir_resize_uint8_generic( const unsigned char *input_pixels , int input_w , int input_h , int input_stride_in_bytes,
+                                               unsigned char *output_pixels, int output_w, int output_h, int output_stride_in_bytes,
+                                         int num_channels, int alpha_channel, int flags,
+                                         stbir_edge edge_wrap_mode, stbir_filter filter, stbir_colorspace space,
+                                         void *alloc_context)
+{
+    return stbir__resize_arbitrary(alloc_context, input_pixels, input_w, input_h, input_stride_in_bytes,
+        output_pixels, output_w, output_h, output_stride_in_bytes,
+        0,0,1,1,NULL,num_channels,alpha_channel,flags, STBIR_TYPE_UINT8, filter, filter,
+        edge_wrap_mode, edge_wrap_mode, space);
+}
+
+STBIRDEF int stbir_resize_uint16_generic(const stbir_uint16 *input_pixels  , int input_w , int input_h , int input_stride_in_bytes,
+                                               stbir_uint16 *output_pixels , int output_w, int output_h, int output_stride_in_bytes,
+                                         int num_channels, int alpha_channel, int flags,
+                                         stbir_edge edge_wrap_mode, stbir_filter filter, stbir_colorspace space,
+                                         void *alloc_context)
+{
+    return stbir__resize_arbitrary(alloc_context, input_pixels, input_w, input_h, input_stride_in_bytes,
+        output_pixels, output_w, output_h, output_stride_in_bytes,
+        0,0,1,1,NULL,num_channels,alpha_channel,flags, STBIR_TYPE_UINT16, filter, filter,
+        edge_wrap_mode, edge_wrap_mode, space);
+}
+
+
+STBIRDEF int stbir_resize_float_generic( const float *input_pixels         , int input_w , int input_h , int input_stride_in_bytes,
+                                               float *output_pixels        , int output_w, int output_h, int output_stride_in_bytes,
+                                         int num_channels, int alpha_channel, int flags,
+                                         stbir_edge edge_wrap_mode, stbir_filter filter, stbir_colorspace space,
+                                         void *alloc_context)
+{
+    return stbir__resize_arbitrary(alloc_context, input_pixels, input_w, input_h, input_stride_in_bytes,
+        output_pixels, output_w, output_h, output_stride_in_bytes,
+        0,0,1,1,NULL,num_channels,alpha_channel,flags, STBIR_TYPE_FLOAT, filter, filter,
+        edge_wrap_mode, edge_wrap_mode, space);
+}
+
+
+STBIRDEF int stbir_resize(         const void *input_pixels , int input_w , int input_h , int input_stride_in_bytes,
+                                         void *output_pixels, int output_w, int output_h, int output_stride_in_bytes,
+                                   stbir_datatype datatype,
+                                   int num_channels, int alpha_channel, int flags,
+                                   stbir_edge edge_mode_horizontal, stbir_edge edge_mode_vertical,
+                                   stbir_filter filter_horizontal,  stbir_filter filter_vertical,
+                                   stbir_colorspace space, void *alloc_context)
+{
+    return stbir__resize_arbitrary(alloc_context, input_pixels, input_w, input_h, input_stride_in_bytes,
+        output_pixels, output_w, output_h, output_stride_in_bytes,
+        0,0,1,1,NULL,num_channels,alpha_channel,flags, datatype, filter_horizontal, filter_vertical,
+        edge_mode_horizontal, edge_mode_vertical, space);
+}
+
+
+STBIRDEF int stbir_resize_subpixel(const void *input_pixels , int input_w , int input_h , int input_stride_in_bytes,
+                                         void *output_pixels, int output_w, int output_h, int output_stride_in_bytes,
+                                   stbir_datatype datatype,
+                                   int num_channels, int alpha_channel, int flags,
+                                   stbir_edge edge_mode_horizontal, stbir_edge edge_mode_vertical,
+                                   stbir_filter filter_horizontal,  stbir_filter filter_vertical,
+                                   stbir_colorspace space, void *alloc_context,
+                                   float x_scale, float y_scale,
+                                   float x_offset, float y_offset)
+{
+    float transform[4];
+    transform[0] = x_scale;
+    transform[1] = y_scale;
+    transform[2] = x_offset;
+    transform[3] = y_offset;
+    return stbir__resize_arbitrary(alloc_context, input_pixels, input_w, input_h, input_stride_in_bytes,
+        output_pixels, output_w, output_h, output_stride_in_bytes,
+        0,0,1,1,transform,num_channels,alpha_channel,flags, datatype, filter_horizontal, filter_vertical,
+        edge_mode_horizontal, edge_mode_vertical, space);
+}
+
+STBIRDEF int stbir_resize_region(  const void *input_pixels , int input_w , int input_h , int input_stride_in_bytes,
+                                         void *output_pixels, int output_w, int output_h, int output_stride_in_bytes,
+                                   stbir_datatype datatype,
+                                   int num_channels, int alpha_channel, int flags,
+                                   stbir_edge edge_mode_horizontal, stbir_edge edge_mode_vertical,
+                                   stbir_filter filter_horizontal,  stbir_filter filter_vertical,
+                                   stbir_colorspace space, void *alloc_context,
+                                   float s0, float t0, float s1, float t1)
+{
+    return stbir__resize_arbitrary(alloc_context, input_pixels, input_w, input_h, input_stride_in_bytes,
+        output_pixels, output_w, output_h, output_stride_in_bytes,
+        s0,t0,s1,t1,NULL,num_channels,alpha_channel,flags, datatype, filter_horizontal, filter_vertical,
+        edge_mode_horizontal, edge_mode_vertical, space);
+}
+
+#endif // STB_IMAGE_RESIZE_IMPLEMENTATION
+
+/*
+------------------------------------------------------------------------------
+This software is available under 2 licenses -- choose whichever you prefer.
+------------------------------------------------------------------------------
+ALTERNATIVE A - MIT License
+Copyright (c) 2017 Sean Barrett
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+------------------------------------------------------------------------------
+ALTERNATIVE B - Public Domain (www.unlicense.org)
+This is free and unencumbered software released into the public domain.
+Anyone is free to copy, modify, publish, use, compile, sell, or distribute this
+software, either in source code form or as a compiled binary, for any purpose,
+commercial or non-commercial, and by any means.
+In jurisdictions that recognize copyright laws, the author or authors of this
+software dedicate any and all copyright interest in the software to the public
+domain. We make this dedication for the benefit of the public at large and to
+the detriment of our heirs and successors. We intend this dedication to be an
+overt act of relinquishment in perpetuity of all present and future rights to
+this software under copyright law.
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+------------------------------------------------------------------------------
+*/
diff --git a/hailort/libhailort/src/net_flow/ops/yolo_post_process.cpp b/hailort/libhailort/src/net_flow/ops/yolo_post_process.cpp
deleted file mode 100644 (file)
index 2c4b90a..0000000
+++ /dev/null
@@ -1,97 +0,0 @@
-/**
- * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
-**/
-/**
- * @file yolo_post_process.cpp
- * @brief YOLO post process
- *
- * https://learnopencv.com/object-detection-using-yolov5-and-opencv-dnn-in-c-and-python :
- * The headline '4.3.5 POST-PROCESSING YOLOv5 Prediction Output' contains explanations on the YOLOv5 post-processing.
- **/
-
-#include "net_flow/ops/yolo_post_process.hpp"
-
-namespace hailort
-{
-namespace net_flow
-{
-
-hailo_status YOLOv5PostProcessOp::validate_metadata()
-{
-    auto status = NmsPostProcessOp::validate_metadata();
-    if (HAILO_SUCCESS != status) {
-        return status;
-    }
-
-    return HAILO_SUCCESS;
-}
-
-//TODO- move to a dedicated module and maybe convert all yolo function to yolov5, HRT-10858
-Expected<std::shared_ptr<Op>> YOLOv5PostProcessOp::create(const std::map<std::string, BufferMetaData> &inputs_metadata,
-                                                          const std::map<std::string, BufferMetaData> &outputs_metadata,
-                                                          const NmsPostProcessConfig &nms_post_process_config,
-                                                          const YoloPostProcessConfig &yolo_post_process_config)
-{
-    for (auto &name_to_inputs_metadata : inputs_metadata) {
-        CHECK_AS_EXPECTED(name_to_inputs_metadata.second.format.order == HAILO_FORMAT_ORDER_NHCW, HAILO_INVALID_ARGUMENT,
-            "YOLOv5PostProcessOp: Unexpected input format {}", name_to_inputs_metadata.second.format.order);
-    }
-    auto op = std::shared_ptr<YOLOv5PostProcessOp>(new (std::nothrow) YOLOv5PostProcessOp(inputs_metadata, outputs_metadata, nms_post_process_config, yolo_post_process_config));
-    CHECK_AS_EXPECTED(op != nullptr, HAILO_OUT_OF_HOST_MEMORY);
-
-    return std::shared_ptr<Op>(std::move(op));
-}
-
-hailo_status YOLOPostProcessOp::execute(const std::map<std::string, MemoryView> &inputs, std::map<std::string, MemoryView> &outputs)
-{
-    CHECK(inputs.size() == m_yolo_config.anchors.size(), HAILO_INVALID_ARGUMENT,
-        "Anchors vector count must be equal to data vector count. Anchors size is {}, data size is {}",
-            m_yolo_config.anchors.size(), inputs.size());
-
-    std::vector<DetectionBbox> detections;
-    std::vector<uint32_t> classes_detections_count(m_nms_config.number_of_classes, 0);
-    detections.reserve(m_nms_config.max_proposals_per_class * m_nms_config.number_of_classes);
-    for (const auto &name_to_input : inputs) {
-        hailo_status status;
-        auto &name = name_to_input.first;
-        auto &input_metadata = m_inputs_metadata[name];
-        if (input_metadata.format.type == HAILO_FORMAT_TYPE_UINT8) {
-            status = extract_detections<float32_t, uint8_t>(name_to_input.second, input_metadata.quant_info, input_metadata.shape,
-                input_metadata.padded_shape, m_yolo_config.anchors[name], detections, classes_detections_count);
-        } else if (input_metadata.format.type == HAILO_FORMAT_TYPE_UINT16) {
-            status = extract_detections<float32_t, uint16_t>(name_to_input.second, input_metadata.quant_info, input_metadata.shape,
-                input_metadata.padded_shape, m_yolo_config.anchors[name], detections, classes_detections_count);
-        } else {
-            CHECK_SUCCESS(HAILO_INVALID_ARGUMENT, "YOLO post-process received invalid input type {}", input_metadata.format.type);
-        }
-        CHECK_SUCCESS(status);
-    }
-
-    // TODO: Add support for TF_FORMAT_ORDER
-    return hailo_nms_format(std::move(detections), outputs.begin()->second, classes_detections_count);
-}
-
-std::string YOLOPostProcessOp::get_op_description()
-{
-    auto nms_config_info = get_nms_config_description();
-    auto config_info = fmt::format("Name: {}, {}, Image height: {:.2f}, Image width: {:.2f}", 
-                        m_name, nms_config_info, m_yolo_config.image_height, m_yolo_config.image_width);
-    return config_info;
-}
-
-hailo_bbox_float32_t YOLOv5PostProcessOp::decode(float32_t tx, float32_t ty, float32_t tw, float32_t th,
-    int wa, int ha, uint32_t col, uint32_t row, uint32_t w_stride, uint32_t h_stride) const
-{
-    auto w = pow(2.0f * tw, 2.0f) * static_cast<float32_t>(wa) / m_yolo_config.image_width;
-    auto h = pow(2.0f * th, 2.0f) * static_cast<float32_t>(ha) / m_yolo_config.image_height;
-    auto x_center = (tx * 2.0f - 0.5f + static_cast<float32_t>(col)) / static_cast<float32_t>(w_stride);
-    auto y_center = (ty * 2.0f - 0.5f + static_cast<float32_t>(row)) / static_cast<float32_t>(h_stride);
-    auto x_min = (x_center - (w / 2.0f));
-    auto y_min = (y_center - (h / 2.0f));
-    return hailo_bbox_float32_t{y_min, x_min, (y_min+h), (x_min+w), 0};
-}
-
-} // namespace net_flow
-} // namespace hailort
-
diff --git a/hailort/libhailort/src/net_flow/ops/yolo_post_process.hpp b/hailort/libhailort/src/net_flow/ops/yolo_post_process.hpp
deleted file mode 100644 (file)
index 049d587..0000000
+++ /dev/null
@@ -1,178 +0,0 @@
-/**
- * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
-**/
-/**
- * @file yolo_post_process.hpp
- * @brief YOLO post process
- *
- * https://learnopencv.com/object-detection-using-yolov5-and-opencv-dnn-in-c-and-python :
- * The headline '4.3.5 POST-PROCESSING YOLOv5 Prediction Output' contains explanations on the YOLOv5 post-processing.
- **/
-
-#ifndef _HAILO_YOLO_POST_PROCESS_HPP_
-#define _HAILO_YOLO_POST_PROCESS_HPP_
-
-#include "net_flow/ops/nms_post_process.hpp"
-
-namespace hailort
-{
-namespace net_flow
-{
-
-struct YoloPostProcessConfig
-{
-    // The image height.
-    float32_t image_height = 0;
-
-    // The image width.
-    float32_t image_width = 0;
-
-    // A vector of anchors, each element in the vector represents the anchors for a specific layer
-    // Each layer anchors vector is structured as {w,h} pairs.
-    std::map<std::string, std::vector<int>> anchors;
-};
-
-
-class YOLOPostProcessOp : public NmsPostProcessOp
-{
-public:
-    hailo_status execute(const std::map<std::string, MemoryView> &inputs, std::map<std::string, MemoryView> &outputs) override;
-    std::string get_op_description() override;
-    virtual hailo_status validate_metadata() = 0; // TODO: HRT-10676
-
-protected:
-    virtual hailo_bbox_float32_t decode(float32_t tx, float32_t ty, float32_t tw, float32_t th,
-        int wa, int ha, uint32_t col, uint32_t row, uint32_t w_stride, uint32_t h_stride) const = 0;
-
-    YOLOPostProcessOp(const std::map<std::string, BufferMetaData> &inputs_metadata,
-                      const std::map<std::string, BufferMetaData> &outputs_metadata,
-                      const NmsPostProcessConfig &nms_post_process_config,
-                      const YoloPostProcessConfig &yolo_post_process_config,
-                      const std::string &name)
-        : NmsPostProcessOp(inputs_metadata, outputs_metadata, nms_post_process_config, name)
-        , m_yolo_config(yolo_post_process_config)
-    {}
-
-    YoloPostProcessConfig m_yolo_config;
-
-private:
-    /**
-     * Extract bboxes with confidence level higher then @a confidence_threshold from @a buffer and add them to @a detections.
-     *
-     * @param[in] buffer                        Buffer containing data after inference
-     * @param[in] quant_info                    Quantization info corresponding to the @a buffer layer.
-     * @param[in] shape                         Shape corresponding to the @a buffer layer.
-     * @param[in] layer_anchors                 The layer anchors corresponding to layer receiving the @a buffer.
-     *                                          Each anchor is structured as {width, height} pairs.
-     * @param[inout] detections                 A vector of ::DetectionBbox objects, to add the detected bboxes to.
-     * @param[inout] classes_detections_count   A vector of uint32_t, to add count of detections count per class to.
-     * 
-     * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
-    */
-    template<typename HostType = float32_t, typename DeviceType>
-    hailo_status extract_detections(const MemoryView &buffer, hailo_quant_info_t quant_info,
-        hailo_3d_image_shape_t shape, hailo_3d_image_shape_t padded_shape,
-        const std::vector<int> &layer_anchors, std::vector<DetectionBbox> &detections, std::vector<uint32_t> &classes_detections_count)
-    {
-        static const uint32_t X_INDEX = 0;
-        static const uint32_t Y_INDEX = 1;
-        static const uint32_t W_INDEX = 2;
-        static const uint32_t H_INDEX = 3;
-        static const uint32_t OBJECTNESS_INDEX = 4;
-        static const uint32_t CLASSES_START_INDEX = 5;
-
-        const uint32_t X_OFFSET = X_INDEX * padded_shape.width;
-        const uint32_t Y_OFFSET = Y_INDEX * padded_shape.width;
-        const uint32_t W_OFFSET = W_INDEX * padded_shape.width;
-        const uint32_t H_OFFSET = H_INDEX * padded_shape.width;
-        const uint32_t OBJECTNESS_OFFSET = OBJECTNESS_INDEX * padded_shape.width;
-
-        // Each layer anchors vector is structured as {w,h} pairs.
-        // For example, if we have a vector of size 6 (default YOLOv5 vector) then we have 3 anchors for this layer.
-        assert(layer_anchors.size() % 2 == 0);
-        const size_t num_of_anchors = (layer_anchors.size() / 2);
-
-        uint32_t entry_size = (uint32_t)(CLASSES_START_INDEX + m_nms_config.number_of_classes);
-        auto number_of_entries = padded_shape.height * padded_shape.width * num_of_anchors;
-        // TODO: this can also be part of the Op configuration
-        auto buffer_size = number_of_entries * entry_size * sizeof(DeviceType);
-        CHECK(buffer_size == buffer.size(), HAILO_INVALID_ARGUMENT,
-            "Failed to extract_detections, buffer_size should be {}, but is {}", buffer_size, buffer.size());
-
-        auto row_size = padded_shape.width * padded_shape.features;
-        DeviceType *data = (DeviceType*)buffer.data();
-        for (uint32_t row = 0; row < shape.height; row++) {
-            for (uint32_t col = 0; col < shape.width; col++) {
-                for (uint32_t anchor = 0; anchor < num_of_anchors; anchor++) {
-                    auto entry_idx = (row_size * row) + col + ((anchor * entry_size) * padded_shape.width);
-                    auto objectness = Quantization::dequantize_output<HostType, DeviceType>(data[entry_idx + OBJECTNESS_OFFSET], quant_info);
-                    if (objectness < m_nms_config.nms_score_th) {
-                        continue;
-                    }
-                    
-                    auto tx = Quantization::dequantize_output<HostType, DeviceType>(data[entry_idx + X_OFFSET], quant_info);
-                    auto ty = Quantization::dequantize_output<HostType, DeviceType>(data[entry_idx + Y_OFFSET], quant_info);
-                    auto tw = Quantization::dequantize_output<HostType, DeviceType>(data[entry_idx + W_OFFSET], quant_info);
-                    auto th = Quantization::dequantize_output<HostType, DeviceType>(data[entry_idx + H_OFFSET], quant_info);
-                    auto bbox = decode(tx, ty, tw, th, layer_anchors[anchor * 2], layer_anchors[anchor * 2 + 1], col, row,
-                        shape.width, shape.height);
-
-                    // Source for the calculations - https://github.com/ultralytics/yolov5/blob/HEAD/models/yolo.py
-                    // Explanations for the calculations - https://github.com/ultralytics/yolov5/issues/471
-                    if (m_nms_config.cross_classes) {
-                        // Pre-NMS optimization. If NMS checks IOU over different classes, only the maximum class is relevant
-                        auto max_id_score_pair = get_max_class<HostType, DeviceType>(data, entry_idx, CLASSES_START_INDEX, objectness, quant_info, padded_shape.width);
-                        bbox.score = max_id_score_pair.second;
-                        if (max_id_score_pair.second >= m_nms_config.nms_score_th) {
-                            detections.emplace_back(DetectionBbox(bbox, max_id_score_pair.first));
-                            classes_detections_count[max_id_score_pair.first]++;
-                        }
-                    }
-                    else {
-                        for (uint32_t class_index = 0; class_index < m_nms_config.number_of_classes; class_index++) {
-                            auto class_entry_idx = entry_idx + ((CLASSES_START_INDEX + class_index) * padded_shape.width);
-                            auto class_confidence = Quantization::dequantize_output<HostType, DeviceType>(
-                                data[class_entry_idx], quant_info);
-                            auto class_score = class_confidence * objectness;
-                            if (class_score >= m_nms_config.nms_score_th) {
-                                bbox.score = class_score;
-                                detections.emplace_back(DetectionBbox(bbox, class_index));
-                                classes_detections_count[class_index]++;
-                            }
-                        }
-                    }
-                }
-            }
-        }
-        
-        return HAILO_SUCCESS;
-    }
-};
-
-class YOLOv5PostProcessOp : public YOLOPostProcessOp
-{
-public:
-    static Expected<std::shared_ptr<Op>> create(const std::map<std::string, BufferMetaData> &inputs_metadata,
-                                                const std::map<std::string, BufferMetaData> &outputs_metadata,
-                                                const NmsPostProcessConfig &nms_post_process_config,
-                                                const YoloPostProcessConfig &yolo_post_process_config);
-    hailo_status validate_metadata() override; // TODO: HRT-10676
-
-protected:
-    virtual hailo_bbox_float32_t decode(float32_t tx, float32_t ty, float32_t tw, float32_t th,
-        int wa, int ha, uint32_t col, uint32_t row, uint32_t w_stride, uint32_t h_stride) const override;
-
-private:
-    YOLOv5PostProcessOp(const std::map<std::string, BufferMetaData> &inputs_metadata,
-                        const std::map<std::string, BufferMetaData> &outputs_metadata,
-                        const NmsPostProcessConfig &nms_post_process_config,
-                        const YoloPostProcessConfig &yolo_post_process_config)
-        : YOLOPostProcessOp(inputs_metadata, outputs_metadata, nms_post_process_config, yolo_post_process_config, "YOLOv5-Post-Process")
-    {}
-};
-
-} // namespace net_flow
-} // namespace hailort
-
-#endif // _HAILO_YOLO_POST_PROCESS_HPP_
diff --git a/hailort/libhailort/src/net_flow/ops/yolov5_post_process.cpp b/hailort/libhailort/src/net_flow/ops/yolov5_post_process.cpp
new file mode 100644 (file)
index 0000000..e8033fa
--- /dev/null
@@ -0,0 +1,120 @@
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file yolov5_post_process.cpp
+ * @brief YOLOv5 post process
+ *
+ * https://learnopencv.com/object-detection-using-yolov5-and-opencv-dnn-in-c-and-python :
+ * The headline '4.3.5 POST-PROCESSING YOLOv5 Prediction Output' contains explanations on the YOLOv5 post-processing.
+ **/
+
+#include "net_flow/ops/yolov5_post_process.hpp"
+
+namespace hailort
+{
+namespace net_flow
+{
+
+Expected<std::shared_ptr<OpMetadata>> Yolov5OpMetadata::create(const std::unordered_map<std::string, BufferMetaData> &inputs_metadata,
+                                                            const std::unordered_map<std::string, BufferMetaData> &outputs_metadata,
+                                                            const NmsPostProcessConfig &nms_post_process_config,
+                                                            const YoloPostProcessConfig &yolov5_post_process_config,
+                                                            const std::string &network_name)
+{
+    auto op_metadata = std::shared_ptr<Yolov5OpMetadata>(new (std::nothrow) Yolov5OpMetadata(inputs_metadata, outputs_metadata,
+        nms_post_process_config, "YOLOv5-Post-Process", network_name, yolov5_post_process_config, OperationType::YOLOV5));
+    CHECK_AS_EXPECTED(op_metadata != nullptr, HAILO_OUT_OF_HOST_MEMORY);
+
+    auto status = op_metadata->validate_params();
+    CHECK_SUCCESS_AS_EXPECTED(status);
+
+    return std::shared_ptr<OpMetadata>(std::move(op_metadata));
+}
+
+hailo_status Yolov5OpMetadata::validate_params()
+{
+    return(NmsOpMetadata::validate_params());
+}
+
+hailo_status Yolov5OpMetadata::validate_format_info()
+{
+    return NmsOpMetadata::validate_format_info();
+}
+
+std::string Yolov5OpMetadata::get_op_description()
+{
+    auto nms_config_info = get_nms_config_description();
+    auto config_info = fmt::format("Op {}, Name: {}, {}, Image height: {:.2f}, Image width: {:.2f}",
+        OpMetadata::get_operation_type_str(m_type), m_name, nms_config_info, m_yolov5_config.image_height, m_yolov5_config.image_width);
+    return config_info;
+}
+
+Expected<std::shared_ptr<Op>> YOLOv5PostProcessOp::create(std::shared_ptr<Yolov5OpMetadata> metadata)
+{
+    auto status = metadata->validate_format_info();
+    CHECK_SUCCESS_AS_EXPECTED(status);
+
+    auto op = std::shared_ptr<YOLOv5PostProcessOp>(new (std::nothrow) YOLOv5PostProcessOp(metadata));
+    CHECK_AS_EXPECTED(op != nullptr, HAILO_OUT_OF_HOST_MEMORY);
+
+    return std::shared_ptr<Op>(std::move(op));
+}
+
+hailo_status YOLOv5PostProcessOp::execute(const std::map<std::string, MemoryView> &inputs, std::map<std::string, MemoryView> &outputs)
+{
+    const auto &inputs_metadata = m_metadata->inputs_metadata();
+    const auto &yolo_config = m_metadata->yolov5_config();
+    const auto &nms_config = m_metadata->nms_config();
+    CHECK(inputs.size() == yolo_config.anchors.size(), HAILO_INVALID_ARGUMENT,
+        "Anchors vector count must be equal to data vector count. Anchors size is {}, data size is {}",
+            yolo_config.anchors.size(), inputs.size());
+
+    std::vector<DetectionBbox> detections;
+    std::vector<uint32_t> classes_detections_count(nms_config.number_of_classes, 0);
+    detections.reserve(nms_config.max_proposals_per_class * nms_config.number_of_classes);
+    for (const auto &name_to_input : inputs) {
+        hailo_status status;
+        auto &name = name_to_input.first;
+        assert(contains(inputs_metadata, name));
+        auto &input_metadata = inputs_metadata.at(name);
+        assert(contains(yolo_config.anchors, name));
+        if (input_metadata.format.type == HAILO_FORMAT_TYPE_UINT8) {
+            status = extract_detections<float32_t, uint8_t>(name_to_input.second, input_metadata.quant_info, input_metadata.shape,
+                input_metadata.padded_shape, yolo_config.anchors.at(name), detections, classes_detections_count);
+        } else if (input_metadata.format.type == HAILO_FORMAT_TYPE_UINT16) {
+            status = extract_detections<float32_t, uint16_t>(name_to_input.second, input_metadata.quant_info, input_metadata.shape,
+                input_metadata.padded_shape, yolo_config.anchors.at(name), detections, classes_detections_count);
+        } else {
+            CHECK_SUCCESS(HAILO_INVALID_ARGUMENT, "YOLO post-process received invalid input type {}", input_metadata.format.type);
+        }
+        CHECK_SUCCESS(status);
+    }
+
+    // TODO: Add support for TF_FORMAT_ORDER
+    return hailo_nms_format(std::move(detections), outputs.begin()->second, classes_detections_count);
+}
+
+hailo_bbox_float32_t YOLOv5PostProcessOp::decode(float32_t tx, float32_t ty, float32_t tw, float32_t th,
+    int wa, int ha, uint32_t col, uint32_t row, uint32_t w_stride, uint32_t h_stride) const
+{
+    // Source for the calculations - https://github.com/ultralytics/yolov5/blob/HEAD/models/yolo.py
+    // Explanations for the calculations - https://github.com/ultralytics/yolov5/issues/471
+    auto w = pow(2.0f * tw, 2.0f) * static_cast<float32_t>(wa) / m_metadata->yolov5_config().image_width;
+    auto h = pow(2.0f * th, 2.0f) * static_cast<float32_t>(ha) / m_metadata->yolov5_config().image_height;
+    auto x_center = (tx * 2.0f - 0.5f + static_cast<float32_t>(col)) / static_cast<float32_t>(w_stride);
+    auto y_center = (ty * 2.0f - 0.5f + static_cast<float32_t>(row)) / static_cast<float32_t>(h_stride);
+    auto x_min = (x_center - (w / 2.0f));
+    auto y_min = (y_center - (h / 2.0f));
+    return hailo_bbox_float32_t{y_min, x_min, (y_min+h), (x_min+w), 0};
+}
+
+uint32_t YOLOv5PostProcessOp::get_entry_size()
+{
+    return (CLASSES_START_INDEX + m_metadata->nms_config().number_of_classes);
+}
+
+} // namespace net_flow
+} // namespace hailort
+
diff --git a/hailort/libhailort/src/net_flow/ops/yolov5_post_process.hpp b/hailort/libhailort/src/net_flow/ops/yolov5_post_process.hpp
new file mode 100644 (file)
index 0000000..7472afb
--- /dev/null
@@ -0,0 +1,219 @@
+/**
+ * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file yolov5_post_process.hpp
+ * @brief YOLO post process
+ *
+ * https://learnopencv.com/object-detection-using-yolov5-and-opencv-dnn-in-c-and-python :
+ * The headline '4.3.5 POST-PROCESSING YOLOv5 Prediction Output' contains explanations on the YOLOv5 post-processing.
+ **/
+
+#ifndef _HAILO_YOLO_POST_PROCESS_HPP_
+#define _HAILO_YOLO_POST_PROCESS_HPP_
+
+#include "net_flow/ops/nms_post_process.hpp"
+#include "net_flow/ops/op_metadata.hpp"
+
+namespace hailort
+{
+namespace net_flow
+{
+
+#define MASK_COEFFICIENT_SIZE (32)
+
+struct YoloPostProcessConfig
+{
+    // The image height.
+    float32_t image_height = 0;
+
+    // The image width.
+    float32_t image_width = 0;
+
+    // A vector of anchors, each element in the vector represents the anchors for a specific layer
+    // Each layer anchors vector is structured as {w,h} pairs.
+    std::map<std::string, std::vector<int>> anchors;
+};
+
+class Yolov5OpMetadata : public NmsOpMetadata
+{
+public:
+    static Expected<std::shared_ptr<OpMetadata>> create(const std::unordered_map<std::string, BufferMetaData> &inputs_metadata,
+                                                        const std::unordered_map<std::string, BufferMetaData> &outputs_metadata,
+                                                        const NmsPostProcessConfig &nms_post_process_config,
+                                                        const YoloPostProcessConfig &yolov5_post_process_config,
+                                                        const std::string &network_name);
+    std::string get_op_description() override;
+    hailo_status validate_format_info() override;
+    YoloPostProcessConfig &yolov5_config() { return m_yolov5_config;};
+
+protected:
+    Yolov5OpMetadata(const std::unordered_map<std::string, BufferMetaData> &inputs_metadata,
+                       const std::unordered_map<std::string, BufferMetaData> &outputs_metadata,
+                       const NmsPostProcessConfig &nms_post_process_config,
+                       const std::string &name,
+                       const std::string &network_name,
+                       const YoloPostProcessConfig &yolov5_post_process_config,
+                       const OperationType op_type)
+        : NmsOpMetadata(inputs_metadata, outputs_metadata, nms_post_process_config, name, network_name, op_type)
+        , m_yolov5_config(yolov5_post_process_config)
+    {}
+
+    hailo_status validate_params() override;
+
+private:
+    YoloPostProcessConfig m_yolov5_config;
+
+};
+
+class YOLOv5PostProcessOp : public NmsPostProcessOp
+{
+public:
+    static Expected<std::shared_ptr<Op>> create(std::shared_ptr<Yolov5OpMetadata> metadata);
+
+    hailo_status execute(const std::map<std::string, MemoryView> &inputs, std::map<std::string, MemoryView> &outputs) override;
+
+protected:
+    hailo_bbox_float32_t decode(float32_t tx, float32_t ty, float32_t tw, float32_t th,
+        int wa, int ha, uint32_t col, uint32_t row, uint32_t w_stride, uint32_t h_stride) const;
+
+    virtual uint32_t get_entry_size();
+
+    YOLOv5PostProcessOp(std::shared_ptr<Yolov5OpMetadata> metadata) :
+        NmsPostProcessOp(static_cast<std::shared_ptr<NmsOpMetadata>>(metadata)),
+        m_metadata(metadata)
+    {}
+
+    static const uint32_t X_INDEX = 0;
+    static const uint32_t Y_INDEX = 1;
+    static const uint32_t W_INDEX = 2;
+    static const uint32_t H_INDEX = 3;
+    static const uint32_t OBJECTNESS_INDEX = 4;
+    static const uint32_t CLASSES_START_INDEX = 5;
+
+
+
+    template<typename DstType = float32_t, typename SrcType>
+    void check_threshold_and_add_detection(std::vector<DetectionBbox> &detections,
+        std::vector<uint32_t> &classes_detections_count, hailo_bbox_float32_t bbox, hailo_quant_info_t &quant_info,
+        uint32_t class_index, SrcType* data, uint32_t entry_idx, uint32_t padded_width, DstType objectness)
+    {
+        const auto &nms_config = m_metadata->nms_config();
+        if (bbox.score >= nms_config.nms_score_th) {
+            if (should_add_mask()) {
+                // We will not preform the sigmoid on the mask at this point -
+                // It should happen on the result of the vector mask multiplication with the proto_mask layer.
+                uint32_t mask_index_start_index = CLASSES_START_INDEX + nms_config.number_of_classes;
+                std::vector<float32_t> mask(MASK_COEFFICIENT_SIZE, 0.0f);
+                for (size_t i = 0; i < MASK_COEFFICIENT_SIZE; i++) {
+                    auto mask_offset = entry_idx + (mask_index_start_index + i) * padded_width;
+                    mask[i] = (Quantization::dequantize_output<DstType, SrcType>(data[mask_offset], quant_info) * objectness);
+                }
+                detections.emplace_back(DetectionBbox(bbox, class_index, std::move(mask)));
+            } else {
+                detections.emplace_back(DetectionBbox(bbox, class_index));
+            }
+            classes_detections_count[class_index]++;
+        }
+    }
+
+    template<typename DstType = float32_t, typename SrcType>
+    void decode_classes_scores(std::vector<DetectionBbox> &detections, std::vector<uint32_t> &classes_detections_count,
+        hailo_bbox_float32_t &bbox, hailo_quant_info_t &quant_info, SrcType* data, uint32_t entry_idx, uint32_t class_start_idx,
+        DstType objectness, uint32_t padded_width)
+    {
+        const auto &nms_config = m_metadata->nms_config();
+
+        if (nms_config.cross_classes) {
+            // Pre-NMS optimization. If NMS checks IoU over different classes, only the maximum class is relevant
+            auto max_id_score_pair = get_max_class<DstType, SrcType>(data, entry_idx, class_start_idx, objectness, quant_info, padded_width);
+            bbox.score = max_id_score_pair.second;
+            check_threshold_and_add_detection(detections, classes_detections_count, bbox, quant_info, max_id_score_pair.first,
+                data, entry_idx, padded_width, objectness);
+        }
+        else {
+            for (uint32_t class_index = 0; class_index < nms_config.number_of_classes; class_index++) {
+                auto class_entry_idx = entry_idx + ((class_start_idx + class_index) * padded_width);
+                auto class_confidence = dequantize_and_sigmoid<DstType, SrcType>(
+                    data[class_entry_idx], quant_info);
+                bbox.score = class_confidence * objectness;
+                check_threshold_and_add_detection(detections, classes_detections_count, bbox, quant_info, class_index,
+                    data, entry_idx, padded_width, objectness);
+            }
+        }
+    }
+
+    /**
+     * Extract bboxes with confidence level higher then @a confidence_threshold from @a buffer and add them to @a detections.
+     *
+     * @param[in] buffer                        Buffer containing data after inference
+     * @param[in] quant_info                    Quantization info corresponding to the @a buffer layer.
+     * @param[in] shape                         Shape corresponding to the @a buffer layer.
+     * @param[in] layer_anchors                 The layer anchors corresponding to layer receiving the @a buffer.
+     *                                          Each anchor is structured as {width, height} pairs.
+     * @param[inout] detections                 A vector of ::DetectionBbox objects, to add the detected bboxes to.
+     * @param[inout] classes_detections_count   A vector of uint32_t, to add count of detections count per class to.
+     *
+     * @return Upon success, returns ::HAILO_SUCCESS. Otherwise, returns a ::hailo_status error.
+    */
+    template<typename DstType = float32_t, typename SrcType>
+    hailo_status extract_detections(const MemoryView &buffer, hailo_quant_info_t quant_info,
+        hailo_3d_image_shape_t shape, hailo_3d_image_shape_t padded_shape,
+        const std::vector<int> &layer_anchors, std::vector<DetectionBbox> &detections, std::vector<uint32_t> &classes_detections_count)
+    {
+        const uint32_t X_OFFSET = X_INDEX * padded_shape.width;
+        const uint32_t Y_OFFSET = Y_INDEX * padded_shape.width;
+        const uint32_t W_OFFSET = W_INDEX * padded_shape.width;
+        const uint32_t H_OFFSET = H_INDEX * padded_shape.width;
+        const uint32_t OBJECTNESS_OFFSET = OBJECTNESS_INDEX * padded_shape.width;
+
+        const auto &nms_config = m_metadata->nms_config();
+
+        // Each layer anchors vector is structured as {w,h} pairs.
+        // For example, if we have a vector of size 6 (default YOLOv5 vector) then we have 3 anchors for this layer.
+        assert(layer_anchors.size() % 2 == 0);
+        const size_t num_of_anchors = (layer_anchors.size() / 2);
+
+        uint32_t entry_size = get_entry_size();
+        auto number_of_entries = padded_shape.height * padded_shape.width * num_of_anchors;
+
+        auto buffer_size = number_of_entries * entry_size * sizeof(SrcType);
+        CHECK(buffer_size == buffer.size(), HAILO_INVALID_ARGUMENT,
+            "Failed to extract_detections, buffer_size should be {}, but is {}", buffer_size, buffer.size());
+
+        auto row_size = padded_shape.width * padded_shape.features;
+        SrcType *data = (SrcType*)buffer.data();
+        for (uint32_t row = 0; row < shape.height; row++) {
+            for (uint32_t col = 0; col < shape.width; col++) {
+                for (uint32_t anchor = 0; anchor < num_of_anchors; anchor++) {
+                    auto entry_idx = (row_size * row) + col + ((anchor * entry_size) * padded_shape.width);
+                    auto objectness = dequantize_and_sigmoid<DstType, SrcType>(data[entry_idx + OBJECTNESS_OFFSET], quant_info);
+                    if (objectness < nms_config.nms_score_th) {
+                        continue;
+                    }
+
+                    auto tx = dequantize_and_sigmoid<DstType, SrcType>(data[entry_idx + X_OFFSET], quant_info);
+                    auto ty = dequantize_and_sigmoid<DstType, SrcType>(data[entry_idx + Y_OFFSET], quant_info);
+                    auto tw = dequantize_and_sigmoid<DstType, SrcType>(data[entry_idx + W_OFFSET], quant_info);
+                    auto th = dequantize_and_sigmoid<DstType, SrcType>(data[entry_idx + H_OFFSET], quant_info);
+                    auto bbox = decode(tx, ty, tw, th, layer_anchors[anchor * 2], layer_anchors[anchor * 2 + 1], col, row,
+                        shape.width, shape.height);
+
+                    decode_classes_scores(detections, classes_detections_count, bbox, quant_info, data, entry_idx,
+                        CLASSES_START_INDEX, objectness, padded_shape.width);
+                }
+            }
+        }
+
+        return HAILO_SUCCESS;
+    }
+private:
+   std::shared_ptr<Yolov5OpMetadata> m_metadata;
+
+};
+
+} // namespace net_flow
+} // namespace hailort
+
+#endif // _HAILO_YOLO_POST_PROCESS_HPP_
diff --git a/hailort/libhailort/src/net_flow/ops/yolov5_seg_post_process.cpp b/hailort/libhailort/src/net_flow/ops/yolov5_seg_post_process.cpp
new file mode 100644 (file)
index 0000000..813abd8
--- /dev/null
@@ -0,0 +1,374 @@
+/**
+ * Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file yolov5_seg_post_process.cpp
+ * @brief YOLOv5 Instance Segmentation post-process implementation
+ **/
+
+#include "yolov5_seg_post_process.hpp"
+#include "hailo/hailort.h"
+
+#if defined(_MSC_VER)
+#pragma warning(push)
+#pragma warning(disable: 4244 4267 4127)
+#else
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wconversion"
+#endif
+#define STB_IMAGE_RESIZE_IMPLEMENTATION
+#include "stb_image_resize.h"
+#if defined(_MSC_VER)
+#pragma warning(pop)
+#else
+#pragma GCC diagnostic pop
+#endif
+
+namespace hailort
+{
+namespace net_flow
+{
+
+Expected<std::shared_ptr<OpMetadata>> Yolov5SegOpMetadata::create(const std::unordered_map<std::string, BufferMetaData> &inputs_metadata,
+    const std::unordered_map<std::string, BufferMetaData> &outputs_metadata, const NmsPostProcessConfig &nms_post_process_config,
+    const YoloPostProcessConfig &yolo_config, const YoloV5SegPostProcessConfig &yolo_seg_config,
+    const std::string &network_name)
+{
+    auto op_metadata = std::shared_ptr<Yolov5SegOpMetadata>(new (std::nothrow) Yolov5SegOpMetadata(inputs_metadata, outputs_metadata,
+        nms_post_process_config, yolo_config, yolo_seg_config, network_name));
+    CHECK_AS_EXPECTED(op_metadata != nullptr, HAILO_OUT_OF_HOST_MEMORY);
+
+    auto status = op_metadata->validate_params();
+    CHECK_SUCCESS_AS_EXPECTED(status);
+
+    return std::shared_ptr<OpMetadata>(std::move(op_metadata));
+}
+
+hailo_status Yolov5SegOpMetadata::validate_format_info()
+{
+    for (const auto& output_metadata : m_outputs_metadata) {
+        CHECK(HAILO_FORMAT_ORDER_HAILO_NMS_WITH_BYTE_MASK == output_metadata.second.format.order, HAILO_INVALID_ARGUMENT,
+            "The given output format order {} is not supported, should be `HAILO_FORMAT_ORDER_HAILO_NMS_WITH_BYTE_MASK`",
+            HailoRTCommon::get_format_order_str(output_metadata.second.format.order));
+
+        CHECK(HAILO_FORMAT_TYPE_FLOAT32 == output_metadata.second.format.type, HAILO_INVALID_ARGUMENT,
+            "The given output format type {} is not supported, should be `HAILO_FORMAT_TYPE_FLOAT32`",
+            HailoRTCommon::get_format_type_str(output_metadata.second.format.type));
+
+        CHECK(!(HAILO_FORMAT_FLAGS_TRANSPOSED & output_metadata.second.format.flags), HAILO_INVALID_ARGUMENT,
+            "Output {} is marked as transposed, which is not supported for this model.", output_metadata.first);
+        CHECK(!(HAILO_FORMAT_FLAGS_HOST_ARGMAX & output_metadata.second.format.flags), HAILO_INVALID_ARGUMENT,
+            "Output {} is marked as argmax, which is not supported for this model.", output_metadata.first);
+    }
+
+    assert(1 <= m_inputs_metadata.size());
+    for (const auto& input_metadata : m_inputs_metadata) {
+        CHECK(HAILO_FORMAT_ORDER_NHCW == input_metadata.second.format.order, HAILO_INVALID_ARGUMENT,
+            "The given input format order {} is not supported, should be `HAILO_FORMAT_ORDER_NHCW`",
+            HailoRTCommon::get_format_order_str(input_metadata.second.format.order));
+
+        CHECK((HAILO_FORMAT_TYPE_UINT8 == input_metadata.second.format.type) ||
+            (HAILO_FORMAT_TYPE_UINT16 == input_metadata.second.format.type), HAILO_INVALID_ARGUMENT,
+            "The given input format type {} is not supported, should be `HAILO_FORMAT_TYPE_UINT8` or `HAILO_FORMAT_TYPE_UINT16`",
+            HailoRTCommon::get_format_type_str(input_metadata.second.format.type));
+    }
+
+    return HAILO_SUCCESS;
+}
+
+std::string Yolov5SegOpMetadata::get_op_description()
+{
+    auto yolo_config_info = Yolov5OpMetadata::get_op_description();
+    auto config_info = fmt::format("{}, Mask threshold: {:.2f}",
+                        yolo_config_info, m_yolo_seg_config.mask_threshold);
+    return config_info;
+}
+
+Expected<hailo_vstream_info_t> Yolov5SegOpMetadata::get_output_vstream_info()
+{
+    auto vstream_info = NmsOpMetadata::get_output_vstream_info();
+    CHECK_EXPECTED(vstream_info);
+
+    vstream_info->nms_shape.max_mask_size = static_cast<uint32_t>(yolov5_config().image_height * yolov5_config().image_width);
+    return vstream_info.release();
+}
+
+Expected<std::shared_ptr<Op>> Yolov5SegPostProcess::create(std::shared_ptr<Yolov5SegOpMetadata> metadata)
+{
+    auto status = metadata->validate_format_info();
+    CHECK_SUCCESS_AS_EXPECTED(status);
+
+    // Create help buffers
+    assert(contains(metadata->inputs_metadata(), metadata->yolov5seg_config().proto_layer_name));
+    auto proto_layer_metadata = metadata->inputs_metadata().at(metadata->yolov5seg_config().proto_layer_name);
+    auto transformed_proto_layer_frame_size = HailoRTCommon::get_shape_size(proto_layer_metadata.shape) * sizeof(float32_t);
+    auto transformed_proto_buffer = Buffer::create(transformed_proto_layer_frame_size);
+    CHECK_EXPECTED(transformed_proto_buffer);
+    auto dequantized_proto_buffer = Buffer::create(transformed_proto_layer_frame_size);
+    CHECK_EXPECTED(dequantized_proto_buffer);
+    auto mask_mult_result_buffer = Buffer::create(proto_layer_metadata.shape.height * proto_layer_metadata.shape.width * sizeof(float32_t));
+    CHECK_EXPECTED(mask_mult_result_buffer);
+
+    auto image_size = static_cast<uint32_t>(metadata->yolov5_config().image_width) * static_cast<uint32_t>(metadata->yolov5_config().image_height);
+    auto resized_buffer = Buffer::create(image_size * sizeof(float32_t));
+    CHECK_EXPECTED(resized_buffer);
+
+    auto op = std::shared_ptr<Yolov5SegPostProcess>(new (std::nothrow) Yolov5SegPostProcess(std::move(metadata),
+        mask_mult_result_buffer.release(), resized_buffer.release(), transformed_proto_buffer.release(), dequantized_proto_buffer.release()));
+    CHECK_NOT_NULL_AS_EXPECTED(op, HAILO_OUT_OF_HOST_MEMORY);
+
+    return std::shared_ptr<Op>(std::move(op));
+}
+
+Yolov5SegPostProcess::Yolov5SegPostProcess(std::shared_ptr<Yolov5SegOpMetadata> metadata,
+    Buffer &&mask_mult_result_buffer, Buffer &&resized_mask, Buffer &&transformed_proto_buffer, Buffer &&dequantized_proto_buffer)
+    : YOLOv5PostProcessOp(static_cast<std::shared_ptr<Yolov5OpMetadata>>(metadata)), m_metadata(metadata),
+    m_mask_mult_result_buffer(std::move(mask_mult_result_buffer)),
+    m_resized_mask_to_image_dim(std::move(resized_mask)),
+    m_transformed_proto_buffer(std::move(transformed_proto_buffer)),
+    m_dequantized_proto_buffer(std::move(dequantized_proto_buffer))
+{}
+
+hailo_status Yolov5SegPostProcess::execute(const std::map<std::string, MemoryView> &inputs, std::map<std::string, MemoryView> &outputs)
+{
+    const auto &inputs_metadata = m_metadata->inputs_metadata();
+    const auto &yolo_config = m_metadata->yolov5_config();
+    const auto &yolov5seg_config = m_metadata->yolov5seg_config();
+    const auto &nms_config = m_metadata->nms_config();
+
+    std::vector<DetectionBbox> detections;
+    std::vector<uint32_t> classes_detections_count(nms_config.number_of_classes, 0);
+    detections.reserve(nms_config.max_proposals_per_class * nms_config.number_of_classes);
+    for (const auto &name_to_input : inputs) {
+        hailo_status status;
+        auto &name = name_to_input.first;
+        assert(contains(inputs_metadata, name));
+        auto &input_metadata = inputs_metadata.at(name);
+
+        CHECK(((input_metadata.format.type == HAILO_FORMAT_TYPE_UINT16) || (input_metadata.format.type == HAILO_FORMAT_TYPE_UINT8)),
+            HAILO_INVALID_ARGUMENT, "YOLO post-process received invalid input type {}", input_metadata.format.type);
+
+        // Prepare proto layer
+        if (name == yolov5seg_config.proto_layer_name) {
+            if (input_metadata.format.type == HAILO_FORMAT_TYPE_UINT8) {
+                transform_proto_layer<float32_t, uint8_t>((uint8_t*)name_to_input.second.data(), input_metadata.quant_info);
+            } else if (input_metadata.format.type == HAILO_FORMAT_TYPE_UINT16) {
+                transform_proto_layer<float32_t, uint16_t>((uint16_t*)name_to_input.second.data(), input_metadata.quant_info);
+            }
+            // Skip bbox extraction if the input is proto layer (the mask layer)
+            continue;
+        }
+
+        assert(contains(yolo_config.anchors, name));
+        if (input_metadata.format.type == HAILO_FORMAT_TYPE_UINT8) {
+            status = extract_detections<float32_t, uint8_t>(name_to_input.second, input_metadata.quant_info, input_metadata.shape,
+                input_metadata.padded_shape, yolo_config.anchors.at(name), detections, classes_detections_count);
+        } else if (input_metadata.format.type == HAILO_FORMAT_TYPE_UINT16) {
+            status = extract_detections<float32_t, uint16_t>(name_to_input.second, input_metadata.quant_info, input_metadata.shape,
+                input_metadata.padded_shape, yolo_config.anchors.at(name), detections, classes_detections_count);
+        }
+        CHECK_SUCCESS(status);
+    }
+
+    remove_overlapping_boxes(detections, classes_detections_count, m_metadata->nms_config().nms_iou_th);
+    auto status = fill_nms_with_byte_mask_format(outputs.begin()->second, detections, classes_detections_count);
+    CHECK_SUCCESS(status);
+
+    return HAILO_SUCCESS;
+}
+
+uint32_t Yolov5SegPostProcess::get_entry_size()
+{
+    return (CLASSES_START_INDEX + m_metadata->nms_config().number_of_classes + MASK_COEFFICIENT_SIZE);
+}
+
+void Yolov5SegPostProcess::mult_mask_vector_and_proto_matrix(const DetectionBbox &detection)
+{
+    float32_t *proto_layer = (float32_t*)m_transformed_proto_buffer.data();
+    float32_t *mult_result = (float32_t*)m_mask_mult_result_buffer.data();
+
+    auto proto_layer_shape = get_proto_layer_shape();
+    uint32_t mult_size = proto_layer_shape.height * proto_layer_shape.width;
+    for (uint32_t i = 0; i < mult_size; i++) {
+        float32_t sum = 0.0f;
+        for (uint32_t j = 0; j < proto_layer_shape.features; j++) {
+            sum += detection.m_mask[j] * proto_layer[j * mult_size + i];
+        }
+        mult_result[i] = sigmoid(sum);
+    }
+}
+
+hailo_status Yolov5SegPostProcess::crop_and_copy_mask(const DetectionBbox &detection, MemoryView &buffer, uint32_t buffer_offset)
+{
+    auto &yolov5_config = m_metadata->yolov5_config();
+    auto mask_threshold = m_metadata->yolov5seg_config().mask_threshold;
+
+    // Based on Bilinear interpolation algorithm
+    // TODO: HRT-11734 - Improve performance by resizing only the mask part if possible
+    auto proto_layer_shape = get_proto_layer_shape();
+    float32_t* resized_mask_to_image_dim_ptr = (float32_t*)m_resized_mask_to_image_dim.data();
+    stbir_resize_float_generic((float32_t*)m_mask_mult_result_buffer.data(), proto_layer_shape.width,
+        proto_layer_shape.height, 0, resized_mask_to_image_dim_ptr, static_cast<uint32_t>(yolov5_config.image_width),
+        static_cast<uint32_t>(yolov5_config.image_height), 0, 1, STBIR_ALPHA_CHANNEL_NONE, 0,
+        STBIR_EDGE_CLAMP, STBIR_FILTER_TRIANGLE, STBIR_COLORSPACE_LINEAR, NULL);
+
+    auto x_min = static_cast<uint32_t>(std::round(detection.m_bbox.x_min * yolov5_config.image_width));
+    auto x_max = static_cast<uint32_t>(std::round(detection.m_bbox.x_max * yolov5_config.image_width));
+    auto y_min = static_cast<uint32_t>(std::round(detection.m_bbox.y_min * yolov5_config.image_height));
+    auto y_max = static_cast<uint32_t>(std::round(detection.m_bbox.y_max * yolov5_config.image_height));
+    auto box_width = detection.get_bbox_rounded_width(yolov5_config.image_width);
+
+    float32_t *dst_mask = (float32_t*)(buffer.data() + buffer_offset);
+    for (uint32_t i = y_min; i <= y_max; i++) {
+        for (uint32_t j = x_min; j <= x_max; j++) {
+            auto image_mask_idx = (i * static_cast<uint32_t>(yolov5_config.image_width)) + j;
+            auto cropped_mask_idx = ((i-y_min) * box_width) + (j-x_min);
+
+            if (resized_mask_to_image_dim_ptr[image_mask_idx] > mask_threshold) {
+                dst_mask[cropped_mask_idx] = 1.0f;
+            } else {
+                dst_mask[cropped_mask_idx] = 0.0f;
+            }
+        }
+    }
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status Yolov5SegPostProcess::calc_and_copy_mask(const DetectionBbox &detection, MemoryView &buffer, uint32_t buffer_offset)
+{
+    mult_mask_vector_and_proto_matrix(detection);
+    auto status = crop_and_copy_mask(detection, buffer, buffer_offset);
+    CHECK_SUCCESS(status);
+
+    return HAILO_SUCCESS;
+}
+
+uint32_t Yolov5SegPostProcess::get_mask_size(const DetectionBbox &detection)
+{
+    auto &yolov5_config = m_metadata->yolov5_config();
+    auto box_height = detection.get_bbox_rounded_height(yolov5_config.image_height);
+    auto box_width = detection.get_bbox_rounded_width(yolov5_config.image_width);
+    auto mask_size = box_width * box_height;
+
+    // Add padding if needed
+    uint32_t remainder = mask_size % 8;
+    uint32_t adjustment = (remainder != 0) ? (8 - remainder) : 0;
+    uint32_t result = static_cast<uint32_t>(mask_size + adjustment);
+    return result;
+}
+
+Expected<uint32_t> Yolov5SegPostProcess::copy_detection_to_result_buffer(MemoryView &buffer, const DetectionBbox &detection,
+    uint32_t buffer_offset, std::vector<uint32_t> &classes_detections_count)
+{
+    auto detection_byte_size = 0;
+    float32_t mask_size_bytes = static_cast<float32_t>(get_mask_size(detection)) * sizeof(float32_t);
+
+    // Copy bbox
+    uint32_t size_to_copy = sizeof(detection.m_bbox);
+    assert((buffer_offset + size_to_copy) <= buffer.size());
+    memcpy((hailo_bbox_float32_t*)(buffer.data() + buffer_offset), &detection.m_bbox, size_to_copy);
+    buffer_offset += size_to_copy;
+    detection_byte_size += size_to_copy;
+
+    // Copy mask size
+    size_to_copy = sizeof(mask_size_bytes);
+    assert((buffer_offset + size_to_copy) <= buffer.size());
+    memcpy((buffer.data() + buffer_offset), &mask_size_bytes, size_to_copy);
+    buffer_offset += size_to_copy;
+    detection_byte_size += size_to_copy;
+
+    // Calc and copy mask
+    auto status = calc_and_copy_mask(detection, buffer, buffer_offset);
+    CHECK_SUCCESS_AS_EXPECTED(status);
+    detection_byte_size += static_cast<uint32_t>(mask_size_bytes);
+
+    classes_detections_count[detection.m_class_id]--;
+    return detection_byte_size;
+}
+
+uint32_t Yolov5SegPostProcess::copy_bbox_count_to_result_buffer(MemoryView &buffer, uint32_t class_detection_count, uint32_t buffer_offset)
+{
+    float32_t bbox_count_casted = static_cast<float32_t>(class_detection_count);
+    uint32_t size_to_copy = sizeof(bbox_count_casted);
+
+    assert((buffer_offset + size_to_copy) <= buffer.size());
+    memcpy((buffer.data() + buffer_offset), &bbox_count_casted, size_to_copy);
+    return size_to_copy;
+}
+
+uint32_t Yolov5SegPostProcess::copy_zero_bbox_count(MemoryView &buffer, uint32_t classes_with_zero_detections_count, uint32_t buffer_offset)
+{
+    uint32_t size_to_copy = static_cast<uint32_t>(sizeof(float32_t)) * classes_with_zero_detections_count;
+
+    assert((buffer_offset + size_to_copy) <= buffer.size());
+    memset((buffer.data() + buffer_offset), 0, size_to_copy);
+    return size_to_copy;
+}
+
+hailo_status Yolov5SegPostProcess::fill_nms_with_byte_mask_format(MemoryView &buffer, std::vector<DetectionBbox> &detections,
+    std::vector<uint32_t> &classes_detections_count)
+{
+    // TODO: HRT-11734 - Improve performance by adding a new format that doesn't require the sort
+    // Sort by class_id
+    std::sort(detections.begin(), detections.end(),
+        [](DetectionBbox a, DetectionBbox b)
+        { return (a.m_class_id != b.m_class_id) ? (a.m_class_id < b.m_class_id) : (a.m_bbox.score > b.m_bbox.score); });
+
+    const auto &nms_config = m_metadata->nms_config();
+    uint32_t ignored_detections_count = 0;
+    int curr_class_id = -1;
+    uint32_t buffer_offset = 0;
+    for (auto &detection : detections) {
+        if (REMOVED_CLASS_SCORE == detection.m_bbox.score) {
+            // Detection was removed in remove_overlapping_boxes()
+            continue;
+        }
+        if (0 == classes_detections_count[detection.m_class_id]) {
+            // This class' detections count is higher then m_nms_config.max_proposals_per_class.
+            // This detection is ignored due to having lower score (detections vector is sorted by score).
+            continue;
+        }
+
+        // If class's detections count is higher then max_proposals_per_class we set the detection count of that class to the max
+        // and ignore the rest by reducing the classes_detections_count[detection.m_class_id] after copying the bbox to result buffer.
+        if (nms_config.max_proposals_per_class < classes_detections_count[detection.m_class_id]) {
+            ignored_detections_count += (classes_detections_count[detection.m_class_id] - nms_config.max_proposals_per_class);
+            classes_detections_count[detection.m_class_id] = nms_config.max_proposals_per_class;
+        }
+
+        if (static_cast<int>(detection.m_class_id) == curr_class_id) {
+            auto buffer_offset_expected = copy_detection_to_result_buffer(buffer, detection, buffer_offset, classes_detections_count);
+            CHECK_EXPECTED_AS_STATUS(buffer_offset_expected);
+            buffer_offset += buffer_offset_expected.value();
+        }
+        else if (static_cast<int>(detection.m_class_id) == (curr_class_id + 1)) {
+            buffer_offset += copy_bbox_count_to_result_buffer(buffer, classes_detections_count[detection.m_class_id], buffer_offset);
+            auto buffer_offset_expected = copy_detection_to_result_buffer(buffer, detection, buffer_offset, classes_detections_count);
+            buffer_offset += buffer_offset_expected.value();
+            curr_class_id = detection.m_class_id;
+        }
+        else {
+            // no detections for classes between (curr_class_id, detection.m_class_id)
+            auto zero_detections_classes_count = (detection.m_class_id - curr_class_id);
+            buffer_offset += copy_zero_bbox_count(buffer, zero_detections_classes_count, buffer_offset);
+
+            // Copy the new class box
+            buffer_offset += copy_bbox_count_to_result_buffer(buffer, classes_detections_count[detection.m_class_id], buffer_offset);
+            auto buffer_offset_expected = copy_detection_to_result_buffer(buffer, detection, buffer_offset, classes_detections_count);
+            buffer_offset += buffer_offset_expected.value();
+            curr_class_id = detection.m_class_id;
+        }
+    }
+
+    if (0 != ignored_detections_count) {
+        LOGGER__INFO("{} Detections were ignored, due to `max_bboxes_per_class` defined as {}.",
+            ignored_detections_count, nms_config.max_proposals_per_class);
+    }
+
+    return HAILO_SUCCESS;
+}
+
+} /* namespace net_flow */
+} /* namespace hailort */
diff --git a/hailort/libhailort/src/net_flow/ops/yolov5_seg_post_process.hpp b/hailort/libhailort/src/net_flow/ops/yolov5_seg_post_process.hpp
new file mode 100644 (file)
index 0000000..541edc2
--- /dev/null
@@ -0,0 +1,125 @@
+/**
+ * Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file yolov5_seg_post_process.hpp
+ * @brief YOLOv5 Instance Segmentation Post-Process
+ **/
+
+#ifndef _HAILO_YOLOV5_SEG_POST_PROCESS_HPP_
+#define _HAILO_YOLOV5_SEG_POST_PROCESS_HPP_
+
+#include "hailo/hailort.h"
+#include "net_flow/ops/yolov5_post_process.hpp"
+#include "transform/transform_internal.hpp"
+
+namespace hailort
+{
+namespace net_flow
+{
+
+struct YoloV5SegPostProcessConfig
+{
+    // User given mask threshold. A pixel will consider part of the mask if it's value is higher then the mask_threshold.
+    double mask_threshold;
+    std::string proto_layer_name;
+};
+
+class Yolov5SegOpMetadata : public Yolov5OpMetadata
+{
+public:
+    static Expected<std::shared_ptr<OpMetadata>> create(const std::unordered_map<std::string, BufferMetaData> &inputs_metadata,
+                                                        const std::unordered_map<std::string, BufferMetaData> &outputs_metadata,
+                                                        const NmsPostProcessConfig &nms_post_process_config,
+                                                        const YoloPostProcessConfig &yolov5_config,
+                                                        const YoloV5SegPostProcessConfig &yolov5_seg_config,
+                                                        const std::string &network_name);
+    hailo_status validate_format_info() override;
+    std::string get_op_description() override;
+    YoloV5SegPostProcessConfig &yolov5seg_config() { return m_yolo_seg_config;};
+    virtual Expected<hailo_vstream_info_t> get_output_vstream_info() override;
+
+private:
+    Yolov5SegOpMetadata(const std::unordered_map<std::string, BufferMetaData> &inputs_metadata,
+                       const std::unordered_map<std::string, BufferMetaData> &outputs_metadata,
+                       const NmsPostProcessConfig &nms_post_process_config,
+                       const YoloPostProcessConfig &yolo_config,
+                       const YoloV5SegPostProcessConfig &yolo_seg_config,
+                       const std::string &network_name)
+        : Yolov5OpMetadata(inputs_metadata, outputs_metadata, nms_post_process_config, "YOLOv5Seg-Post-Process",
+            network_name, yolo_config, OperationType::YOLOV5SEG),
+        m_yolo_seg_config(yolo_seg_config)
+    {}
+
+    YoloV5SegPostProcessConfig m_yolo_seg_config;
+};
+
+class Yolov5SegPostProcess : public YOLOv5PostProcessOp
+{
+public:
+    static Expected<std::shared_ptr<Op>> create(std::shared_ptr<Yolov5SegOpMetadata> metadata);
+
+    hailo_status execute(const std::map<std::string, MemoryView> &inputs, std::map<std::string, MemoryView> &outputs) override;
+
+    uint32_t get_entry_size() override;
+
+    virtual bool should_sigmoid()
+    {
+        return true;
+    };
+
+    virtual bool should_add_mask()
+    {
+        return true;
+    };
+
+    const hailo_3d_image_shape_t &get_proto_layer_shape() const
+    {
+        assert(contains(m_metadata->inputs_metadata(), m_metadata->yolov5seg_config().proto_layer_name));
+        return m_metadata->inputs_metadata().at(m_metadata->yolov5seg_config().proto_layer_name).shape;
+    };
+
+    // Transform proto layer - To multiply between the box mask coefficients (of shape (1, 32)), in the proto layer,
+    // we change the proto layer shape to be (features=32, height * width)
+    template<typename DstType = float32_t, typename SrcType>
+    void transform_proto_layer(SrcType *src_buffer, const hailo_quant_info_t &quant_info)
+    {
+        hailo_3d_image_shape_t shape = get_proto_layer_shape();
+
+         // TODO: HRT-11734 Improve performance - Make both funcs in one run?
+        Quantization::dequantize_output_buffer<float32_t, SrcType>(src_buffer, (float32_t*)m_dequantized_proto_buffer.data(),
+            HailoRTCommon::get_shape_size(shape), quant_info);
+        TransformContextUtils::transform__d2h_NHCW_to_NCHW<float32_t>((float32_t*)m_dequantized_proto_buffer.data(), &shape,
+            (float32_t*)m_transformed_proto_buffer.data(), &shape);
+    }
+
+private:
+    Yolov5SegPostProcess(std::shared_ptr<Yolov5SegOpMetadata> metadata, Buffer &&mask_mult_result_buffer,
+        Buffer &&resized_mask, Buffer &&transformed_proto_buffer, Buffer &&dequantized_proto_buffer);
+
+    hailo_status fill_nms_with_byte_mask_format(MemoryView &buffer, std::vector<DetectionBbox> &detections,
+        std::vector<uint32_t> &classes_detections_count);
+    void mult_mask_vector_and_proto_matrix(const DetectionBbox &detection);
+    uint32_t get_mask_size(const DetectionBbox &detection);
+
+    hailo_status calc_and_copy_mask(const DetectionBbox &detection, MemoryView &buffer, uint32_t buffer_offset);
+    hailo_status crop_and_copy_mask(const DetectionBbox &detection, MemoryView &buffer, uint32_t buffer_offset);
+    uint32_t copy_zero_bbox_count(MemoryView &buffer, uint32_t classes_with_zero_detections_count, uint32_t buffer_offset);
+    uint32_t copy_bbox_count_to_result_buffer(MemoryView &buffer, uint32_t class_detection_count, uint32_t buffer_offset);
+    Expected<uint32_t> copy_detection_to_result_buffer(MemoryView &buffer, const DetectionBbox &detection, uint32_t buffer_offset,
+        std::vector<uint32_t> &classes_detections_count);
+
+    std::shared_ptr<Yolov5SegOpMetadata> m_metadata;
+    Buffer m_mask_mult_result_buffer;
+    Buffer m_resized_mask_to_image_dim;
+
+    // TODO: HRT-11734 - Try use one buffer for both actions
+    Buffer m_transformed_proto_buffer;
+    Buffer m_dequantized_proto_buffer;
+};
+
+} /* namespace hailort */
+} /* namespace net_flow */
+
+#endif /* _HAILO_YOLOV5_SEG_POST_PROCESS_HPP_ */
index 23229e811dab7422943ce97dc6c18ca1a70df424..f0febbe8e09d8cc3502928113aedee36c7d04d82 100644 (file)
@@ -15,24 +15,31 @@ namespace hailort
 namespace net_flow
 {
 
-Expected<std::shared_ptr<Op>> YOLOXPostProcessOp::create(const std::map<std::string, BufferMetaData> &inputs_metadata,
-                                                         const std::map<std::string, BufferMetaData> &outputs_metadata,
-                                                         const NmsPostProcessConfig &nms_post_process_config,
-                                                         const YoloxPostProcessConfig &yolox_post_process_config)
+Expected<std::shared_ptr<OpMetadata>> YoloxOpMetadata::create(const std::unordered_map<std::string, BufferMetaData> &inputs_metadata,
+    const std::unordered_map<std::string, BufferMetaData> &outputs_metadata, const NmsPostProcessConfig &nms_post_process_config,
+    const YoloxPostProcessConfig &yolox_post_process_config, const std::string &network_name)
 {
-    auto op = std::shared_ptr<YOLOXPostProcessOp>(new (std::nothrow) YOLOXPostProcessOp(inputs_metadata, outputs_metadata, nms_post_process_config,
-        yolox_post_process_config));
-    CHECK_AS_EXPECTED(op != nullptr, HAILO_OUT_OF_HOST_MEMORY);
+    auto op_metadata = std::shared_ptr<YoloxOpMetadata>(new (std::nothrow) YoloxOpMetadata(inputs_metadata, outputs_metadata, nms_post_process_config,
+        yolox_post_process_config, network_name));
+    CHECK_AS_EXPECTED(op_metadata != nullptr, HAILO_OUT_OF_HOST_MEMORY);
 
-    return std::shared_ptr<Op>(std::move(op));
+    auto status = op_metadata->validate_params();
+    CHECK_SUCCESS_AS_EXPECTED(status);
+
+    return std::shared_ptr<OpMetadata>(std::move(op_metadata));
 }
 
-hailo_status YOLOXPostProcessOp::validate_metadata()
+std::string YoloxOpMetadata::get_op_description()
 {
-    auto status = NmsPostProcessOp::validate_metadata();
-    if (HAILO_SUCCESS != status) {
-        return status;
-    }
+    auto nms_config_info = get_nms_config_description();
+    auto config_info = fmt::format("Op {}, Name: {}, {}, Image height: {:.2f}, Image width: {:.2f}",
+                        OpMetadata::get_operation_type_str(m_type), m_name, nms_config_info, m_yolox_config.image_height, m_yolox_config.image_width);
+    return config_info;
+}
+
+hailo_status YoloxOpMetadata::validate_params()
+{
+    CHECK_SUCCESS(NmsOpMetadata::validate_params());
 
     // Validate regs, clss and objs matching layers have same shape
     for (const auto &layer_names : m_yolox_config.input_names) {
@@ -43,8 +50,11 @@ hailo_status YOLOXPostProcessOp::validate_metadata()
         CHECK(contains(m_inputs_metadata, layer_names.obj), HAILO_INVALID_ARGUMENT,
             "YOLOXPostProcessOp: inputs_metadata does not contain obj layer {}", layer_names.obj);
 
+        assert(contains(m_inputs_metadata, layer_names.reg));
         const auto &reg_input_metadata = m_inputs_metadata.at(layer_names.reg);
+        assert(contains(m_inputs_metadata, layer_names.cls));
         const auto &cls_input_metadata = m_inputs_metadata.at(layer_names.cls);
+        assert(contains(m_inputs_metadata, layer_names.obj));
         const auto &obj_input_metadata = m_inputs_metadata.at(layer_names.obj);
 
         // NOTE: padded shape might be different because features might be different,
@@ -68,29 +78,47 @@ hailo_status YOLOXPostProcessOp::validate_metadata()
             && (obj_input_metadata.format.order == reg_input_metadata.format.order),
             HAILO_INVALID_ARGUMENT, "YOLOXPostProcess: reg input {} has different format than obj input {}",
                 layer_names.reg, layer_names.obj);
-
     }
 
     return HAILO_SUCCESS;
 }
 
+hailo_status YoloxOpMetadata::validate_format_info()
+{
+    return NmsOpMetadata::validate_format_info();
+}
+
+Expected<std::shared_ptr<Op>> YOLOXPostProcessOp::create(std::shared_ptr<YoloxOpMetadata> metadata)
+{
+    auto status = metadata->validate_format_info();
+    CHECK_SUCCESS_AS_EXPECTED(status);
+
+    auto op = std::shared_ptr<YOLOXPostProcessOp>(new (std::nothrow) YOLOXPostProcessOp(metadata));
+    CHECK_AS_EXPECTED(op != nullptr, HAILO_OUT_OF_HOST_MEMORY);
+
+    return std::shared_ptr<Op>(std::move(op));
+}
+
 hailo_status YOLOXPostProcessOp::execute(const std::map<std::string, MemoryView> &inputs, std::map<std::string, MemoryView> &outputs)
 {
+    const auto &yolox_config = m_metadata->yolox_config();
+    const auto &inputs_metadata = m_metadata->inputs_metadata();
+    const auto &nms_config = m_metadata->nms_config();
     std::vector<DetectionBbox> detections;
-    std::vector<uint32_t> classes_detections_count(m_nms_config.number_of_classes, 0);
-    detections.reserve(m_nms_config.max_proposals_per_class * m_nms_config.number_of_classes);
-    for (const auto &layers_names_triplet : m_yolox_config.input_names) {
+    std::vector<uint32_t> classes_detections_count(nms_config.number_of_classes, 0);
+    detections.reserve(nms_config.max_proposals_per_class * nms_config.number_of_classes);
+    for (const auto &layers_names_triplet : yolox_config.input_names) {
         hailo_status status;
         assert(contains(inputs, layers_names_triplet.cls));
         assert(contains(inputs, layers_names_triplet.obj));
         assert(contains(inputs, layers_names_triplet.reg));
 
-        auto &input_metadata = m_inputs_metadata[layers_names_triplet.reg];
+        auto &input_metadata = inputs_metadata.at(layers_names_triplet.reg);
         if (input_metadata.format.type == HAILO_FORMAT_TYPE_UINT8) {
-            status = extract_detections<float32_t, uint8_t>(layers_names_triplet, inputs.at(layers_names_triplet.reg), inputs.at(layers_names_triplet.cls), 
+            status = extract_detections<float32_t, uint8_t>(layers_names_triplet, inputs.at(layers_names_triplet.reg), inputs.at(layers_names_triplet.cls),
                 inputs.at(layers_names_triplet.obj), detections, classes_detections_count);
         } else if (input_metadata.format.type == HAILO_FORMAT_TYPE_UINT16) {
-            status = extract_detections<float32_t, uint16_t>(layers_names_triplet, inputs.at(layers_names_triplet.reg), inputs.at(layers_names_triplet.cls), 
+            status = extract_detections<float32_t, uint16_t>(layers_names_triplet, inputs.at(layers_names_triplet.reg), inputs.at(layers_names_triplet.cls),
                 inputs.at(layers_names_triplet.obj), detections, classes_detections_count);
         } else {
             CHECK_SUCCESS(HAILO_INVALID_ARGUMENT, "YOLO post-process received invalid input type {}", input_metadata.format.type);
@@ -120,13 +148,5 @@ hailo_bbox_float32_t YOLOXPostProcessOp::decode(float32_t tx, float32_t ty, floa
     return hailo_bbox_float32_t{y_min, x_min, (y_min+h), (x_min+w), 0};
 }
 
-std::string YOLOXPostProcessOp::get_op_description()
-{
-    auto nms_config_info = get_nms_config_description();
-    auto config_info = fmt::format("Name: {}, {}, Image height: {:.2f}, Image width: {:.2f}",
-                        m_name, nms_config_info, m_yolox_config.image_height, m_yolox_config.image_width);
-    return config_info;
-}
-
 }
 }
index de5d2680672ccddb748302cce9abb1a72dc1c4f3..d0be5cf002023a1f0955f56d38c90f4a92aba880 100644 (file)
@@ -12,6 +12,7 @@
 #define _HAILO_YOLOX_POST_PROCESS_HPP_
 
 #include "net_flow/ops/nms_post_process.hpp"
+#include "net_flow/ops/op_metadata.hpp"
 
 namespace hailort
 {
@@ -42,30 +43,64 @@ struct YoloxPostProcessConfig
     std::vector<MatchingLayersNames> input_names;
 };
 
+class YoloxOpMetadata : public NmsOpMetadata
+{
+public:
+    static Expected<std::shared_ptr<OpMetadata>> create(const std::unordered_map<std::string, BufferMetaData> &inputs_metadata,
+                                                        const std::unordered_map<std::string, BufferMetaData> &outputs_metadata,
+                                                        const NmsPostProcessConfig &nms_post_process_config,
+                                                        const YoloxPostProcessConfig &yolox_post_process_config,
+                                                        const std::string &network_name);
+    hailo_status validate_format_info() override;
+    std::string get_op_description() override;
+    YoloxPostProcessConfig &yolox_config() { return m_yolox_config;};
+
+private:
+    YoloxPostProcessConfig m_yolox_config;
+    YoloxOpMetadata(const std::unordered_map<std::string, BufferMetaData> &inputs_metadata,
+                       const std::unordered_map<std::string, BufferMetaData> &outputs_metadata,
+                       const NmsPostProcessConfig &nms_post_process_config,
+                       const YoloxPostProcessConfig &yolox_post_process_config,
+                       const std::string &network_name)
+        : NmsOpMetadata(inputs_metadata, outputs_metadata, nms_post_process_config, "YOLOX-Post-Process", network_name, OperationType::YOLOX)
+        , m_yolox_config(yolox_post_process_config)
+    {}
+
+    hailo_status validate_params() override;
+};
+
 class YOLOXPostProcessOp : public NmsPostProcessOp
 {
 public:
-    static Expected<std::shared_ptr<Op>> create(const std::map<std::string, BufferMetaData> &inputs_metadata,
-                                                const std::map<std::string, BufferMetaData> &outputs_metadata,
-                                                const NmsPostProcessConfig &nms_post_process_config,
-                                                const YoloxPostProcessConfig &yolo_post_process_config);
+    static Expected<std::shared_ptr<Op>> create(std::shared_ptr<YoloxOpMetadata> metadata);
 
     hailo_status execute(const std::map<std::string, MemoryView> &inputs, std::map<std::string, MemoryView> &outputs) override;
-    std::string get_op_description() override;
-    hailo_status validate_metadata() override;// TODO: HRT-10676
 
 private:
-    template<typename HostType = float32_t, typename DeviceType>
+    std::shared_ptr<YoloxOpMetadata> m_metadata;
+
+    YOLOXPostProcessOp(std::shared_ptr<YoloxOpMetadata> metadata)
+        : NmsPostProcessOp(static_cast<std::shared_ptr<NmsOpMetadata>>(metadata))
+        , m_metadata(metadata)
+    {}
+
+    template<typename DstType = float32_t, typename SrcType>
     hailo_status extract_detections(const MatchingLayersNames &layers_names, const MemoryView &reg_buffer, const MemoryView &cls_buffer,
         const MemoryView &obj_buffer, std::vector<DetectionBbox> &detections, std::vector<uint32_t> &classes_detections_count)
     {
-        const auto &reg_shape = m_inputs_metadata[layers_names.reg].shape;
-        const auto &reg_padded_shape = m_inputs_metadata[layers_names.reg].padded_shape;
-        const auto &cls_padded_shape = m_inputs_metadata[layers_names.cls].padded_shape;
-        const auto &obj_padded_shape = m_inputs_metadata[layers_names.obj].padded_shape;
-        const auto &reg_quant_info = m_inputs_metadata[layers_names.reg].quant_info;
-        const auto &cls_quant_info = m_inputs_metadata[layers_names.cls].quant_info;
-        const auto &obj_quant_info = m_inputs_metadata[layers_names.obj].quant_info;
+        const auto &inputs_metadata = m_metadata->inputs_metadata();
+        const auto &nms_config = m_metadata->nms_config();
+
+        assert(contains(inputs_metadata, layers_names.reg));
+        assert(contains(inputs_metadata, layers_names.cls));
+        assert(contains(inputs_metadata, layers_names.obj));
+        const auto &reg_shape = inputs_metadata.at(layers_names.reg).shape;
+        const auto &reg_padded_shape = inputs_metadata.at(layers_names.reg).padded_shape;
+        const auto &cls_padded_shape = inputs_metadata.at(layers_names.cls).padded_shape;
+        const auto &obj_padded_shape = inputs_metadata.at(layers_names.obj).padded_shape;
+        const auto &reg_quant_info = inputs_metadata.at(layers_names.reg).quant_info;
+        const auto &cls_quant_info = inputs_metadata.at(layers_names.cls).quant_info;
+        const auto &obj_quant_info = inputs_metadata.at(layers_names.obj).quant_info;
 
         static const uint32_t X_INDEX = 0;
         static const uint32_t Y_INDEX = 1;
@@ -82,21 +117,21 @@ private:
         // Validate regression buffer size
         static const uint32_t reg_entry_size = 4;
         auto number_of_entries = reg_padded_shape.height * reg_padded_shape.width;
-        auto buffer_size = number_of_entries * reg_entry_size * sizeof(DeviceType);
+        auto buffer_size = number_of_entries * reg_entry_size * sizeof(SrcType);
         CHECK(buffer_size == reg_buffer.size(), HAILO_INVALID_ARGUMENT,
             "Failed to extract_detections, reg {} buffer_size should be {}, but is {}", layers_names.reg, buffer_size, reg_buffer.size());
 
         // Validate classes buffer size
-        const uint32_t cls_entry_size = m_nms_config.number_of_classes;
+        const uint32_t cls_entry_size = nms_config.number_of_classes;
         number_of_entries = cls_padded_shape.height * cls_padded_shape.width;
-        buffer_size = number_of_entries * cls_entry_size * sizeof(DeviceType);
+        buffer_size = number_of_entries * cls_entry_size * sizeof(SrcType);
         CHECK(buffer_size == cls_buffer.size(), HAILO_INVALID_ARGUMENT,
             "Failed to extract_detections, cls {} buffer_size should be {}, but is {}", layers_names.cls, buffer_size, cls_buffer.size());
 
         // Validate objectness buffer size
         static const uint32_t obj_entry_size = 1;
         number_of_entries = obj_padded_shape.height * obj_padded_shape.width;
-        buffer_size = number_of_entries * obj_entry_size * sizeof(DeviceType);
+        buffer_size = number_of_entries * obj_entry_size * sizeof(SrcType);
         CHECK(buffer_size == obj_buffer.size(), HAILO_INVALID_ARGUMENT,
             "Failed to extract_detections, obj {} buffer_size should be {}, but is {}", layers_names.obj, buffer_size, obj_buffer.size());
 
@@ -104,44 +139,45 @@ private:
         auto cls_row_size = cls_padded_shape.width * cls_padded_shape.features;
         auto obj_row_size = obj_padded_shape.width * obj_padded_shape.features;
 
-        DeviceType *reg_data = (DeviceType*)reg_buffer.data();
-        DeviceType *obj_data = (DeviceType*)obj_buffer.data();
-        DeviceType *cls_data = (DeviceType*)cls_buffer.data();
+        SrcType *reg_data = (SrcType*)reg_buffer.data();
+        SrcType *obj_data = (SrcType*)obj_buffer.data();
+        SrcType *cls_data = (SrcType*)cls_buffer.data();
+
 
         for (uint32_t row = 0; row < reg_shape.height; row++) {
             for (uint32_t col = 0; col < reg_shape.width; col++) {
                 auto obj_idx = (obj_row_size * row) + col;
-                auto objectness = Quantization::dequantize_output<HostType, DeviceType>(obj_data[obj_idx], obj_quant_info);
+                auto objectness = Quantization::dequantize_output<DstType, SrcType>(obj_data[obj_idx], obj_quant_info);
 
-                if (objectness < m_nms_config.nms_score_th) {
+                if (objectness < nms_config.nms_score_th) {
                     continue;
                 }
 
                 auto reg_idx = (reg_row_size * row) + col;
                 auto cls_idx = (cls_row_size * row) + col;
 
-                auto tx = Quantization::dequantize_output<HostType, DeviceType>(reg_data[reg_idx + X_OFFSET], reg_quant_info);
-                auto ty = Quantization::dequantize_output<HostType, DeviceType>(reg_data[reg_idx + Y_OFFSET], reg_quant_info);
-                auto tw = Quantization::dequantize_output<HostType, DeviceType>(reg_data[reg_idx + W_OFFSET], reg_quant_info);
-                auto th = Quantization::dequantize_output<HostType, DeviceType>(reg_data[reg_idx + H_OFFSET], reg_quant_info);
+                auto tx = Quantization::dequantize_output<DstType, SrcType>(reg_data[reg_idx + X_OFFSET], reg_quant_info);
+                auto ty = Quantization::dequantize_output<DstType, SrcType>(reg_data[reg_idx + Y_OFFSET], reg_quant_info);
+                auto tw = Quantization::dequantize_output<DstType, SrcType>(reg_data[reg_idx + W_OFFSET], reg_quant_info);
+                auto th = Quantization::dequantize_output<DstType, SrcType>(reg_data[reg_idx + H_OFFSET], reg_quant_info);
                 auto bbox = decode(tx, ty, tw, th, col, row, static_cast<float32_t>(reg_shape.width), static_cast<float32_t>(reg_shape.height));
 
-                if (m_nms_config.cross_classes) {
-                    // Pre-NMS optimization. If NMS checks IOU over different classes, only the maximum class is relevant
-                    auto max_id_score_pair = get_max_class<HostType, DeviceType>(cls_data, cls_idx, CLASSES_START_INDEX, objectness, cls_quant_info, cls_padded_shape.width);
+                if (nms_config.cross_classes) {
+                    // Pre-NMS optimization. If NMS checks IoU over different classes, only the maximum class is relevant
+                    auto max_id_score_pair = get_max_class<DstType, SrcType>(cls_data, cls_idx, CLASSES_START_INDEX, objectness, cls_quant_info, cls_padded_shape.width);
                     bbox.score = max_id_score_pair.second;
-                    if (max_id_score_pair.second >= m_nms_config.nms_score_th) {
+                    if (max_id_score_pair.second >= nms_config.nms_score_th) {
                         detections.emplace_back(DetectionBbox(bbox, max_id_score_pair.first));
                         classes_detections_count[max_id_score_pair.first]++;
                     }
                 }
                 else {
-                    for (uint32_t curr_class_idx = 0; curr_class_idx < m_nms_config.number_of_classes; curr_class_idx++) {
+                    for (uint32_t curr_class_idx = 0; curr_class_idx < nms_config.number_of_classes; curr_class_idx++) {
                         auto class_entry_idx = cls_idx + (curr_class_idx * cls_padded_shape.width);
-                        auto class_confidence = Quantization::dequantize_output<HostType, DeviceType>(
+                        auto class_confidence = Quantization::dequantize_output<DstType, SrcType>(
                             cls_data[class_entry_idx], cls_quant_info);
                         auto class_score = class_confidence * objectness;
-                        if (class_score >= m_nms_config.nms_score_th) {
+                        if (class_score >= nms_config.nms_score_th) {
                             bbox.score = class_score;
                             detections.emplace_back(DetectionBbox(bbox, curr_class_idx));
                             classes_detections_count[curr_class_idx]++;
@@ -157,16 +193,6 @@ private:
     virtual hailo_bbox_float32_t decode(float32_t tx, float32_t ty, float32_t tw, float32_t th,
         uint32_t col, uint32_t row, float32_t w_stride, float32_t h_stride) const;
 
-    YoloxPostProcessConfig m_yolox_config;
-
-    YOLOXPostProcessOp(const std::map<std::string, BufferMetaData> &inputs_metadata,
-                       const std::map<std::string, BufferMetaData> &outputs_metadata,
-                       const NmsPostProcessConfig &nms_post_process_config,
-                       const YoloxPostProcessConfig &yolo_post_process_config)
-        : NmsPostProcessOp(inputs_metadata, outputs_metadata, nms_post_process_config, "YOLOX-Post-Process")
-        , m_yolox_config(yolo_post_process_config)
-    {}
-
 };
 
 } // namespace net_flow
diff --git a/hailort/libhailort/src/net_flow/pipeline/async_infer_runner.cpp b/hailort/libhailort/src/net_flow/pipeline/async_infer_runner.cpp
new file mode 100644 (file)
index 0000000..04bb899
--- /dev/null
@@ -0,0 +1,1100 @@
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file async_infer_runner.cpp
+ * @brief Implemention of the async HL infer
+ **/
+
+#include <iostream>
+
+#include "common/utils.hpp"
+#include "common/os_utils.hpp"
+#include "hailo/event.hpp"
+#include "hailo/hailort_defaults.hpp"
+#include "hailo/hailort_common.hpp"
+#include "net_flow/pipeline/async_infer_runner_internal.hpp"
+#include "net_flow/pipeline/pipeline.hpp"
+#include "net_flow/ops/op_metadata.hpp"
+
+namespace hailort
+{
+
+Expected<AsyncPipeline> AsyncPipeline::create()
+{
+    return AsyncPipeline();
+}
+
+AsyncPipeline::AsyncPipeline() {}
+
+void AsyncPipeline::add_element_to_pipeline(std::shared_ptr<PipelineElement> pipeline_element)
+{
+    m_pipeline_elements.push_back(pipeline_element);
+}
+
+void AsyncPipeline::set_async_hw_element(std::shared_ptr<AsyncHwElement> async_hw_element)
+{
+    m_async_hw_element = async_hw_element;
+}
+
+void AsyncPipeline::add_entry_element(std::shared_ptr<PipelineElement> pipeline_element, const std::string &input_name)
+{
+    assert(!contains(m_entry_elements, input_name));
+    m_entry_elements[input_name] = pipeline_element;
+}
+
+void AsyncPipeline::add_last_element(std::shared_ptr<PipelineElement> pipeline_element, const std::string &output_name)
+{
+    assert(!contains(m_last_elements, output_name));
+    m_last_elements[output_name] = pipeline_element;
+}
+
+void AsyncPipeline::set_build_params(ElementBuildParams &build_params)
+{
+    m_build_params = build_params;
+}
+
+const std::vector<std::shared_ptr<PipelineElement>>& AsyncPipeline::get_pipeline() const
+{
+    return m_pipeline_elements;
+}
+
+const std::unordered_map<std::string, std::shared_ptr<PipelineElement>>& AsyncPipeline::get_entry_elements() const
+{
+    return m_entry_elements;
+}
+
+const std::unordered_map<std::string, std::shared_ptr<PipelineElement>>& AsyncPipeline::get_last_elements() const
+{
+    return m_last_elements;
+}
+
+const std::shared_ptr<AsyncHwElement> AsyncPipeline::get_async_hw_element()
+{
+    return m_async_hw_element;
+}
+
+const ElementBuildParams AsyncPipeline::get_build_params()
+{
+    return m_build_params;
+}
+
+Expected<std::shared_ptr<AsyncInferRunnerInternal>> AsyncInferRunnerInternal::create(ConfiguredNetworkGroupBase &net_group,
+        const std::unordered_map<std::string, hailo_format_t> &inputs_formats, const std::unordered_map<std::string, hailo_format_t> &outputs_formats)
+{
+    auto async_infer_runner = AsyncInferRunnerImpl::create(net_group, inputs_formats, outputs_formats);
+    CHECK_EXPECTED(async_infer_runner);
+
+    auto async_infer_runner_ptr = std::shared_ptr<AsyncInferRunnerInternal>(async_infer_runner.release());
+    CHECK_NOT_NULL_AS_EXPECTED(async_infer_runner_ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+    return async_infer_runner_ptr;
+}
+
+AsyncInferRunnerInternal::AsyncInferRunnerInternal() :
+    m_pipeline_status(make_shared_nothrow<std::atomic<hailo_status>>(HAILO_SUCCESS))
+{}
+
+Expected<std::shared_ptr<AsyncInferRunnerImpl>> AsyncInferRunnerImpl::create(ConfiguredNetworkGroupBase &net_group,
+    const std::unordered_map<std::string, hailo_format_t> &inputs_formats, const std::unordered_map<std::string, hailo_format_t> &outputs_formats,
+    const uint32_t timeout)
+{
+    auto async_pipeline_expected = create_pipeline(net_group, inputs_formats, outputs_formats, timeout);
+    CHECK_EXPECTED(async_pipeline_expected);
+
+    auto async_infer_runner_ptr = make_shared_nothrow<AsyncInferRunnerImpl>(async_pipeline_expected.release());
+    CHECK_NOT_NULL_AS_EXPECTED(async_infer_runner_ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+    auto status = async_infer_runner_ptr->start_pipeline();
+    CHECK_SUCCESS_AS_EXPECTED(status);
+
+    return async_infer_runner_ptr;
+}
+
+AsyncInferRunnerImpl::AsyncInferRunnerImpl(AsyncPipeline &&async_pipeline) :
+    AsyncInferRunnerInternal(),
+    m_async_pipeline(std::move(async_pipeline)),
+    m_is_activated(false),
+    m_is_aborted(false)
+{}
+
+AsyncInferRunnerImpl::~AsyncInferRunnerImpl()
+{
+    (void)stop_pipeline();
+}
+
+hailo_status AsyncInferRunnerImpl::stop_pipeline()
+{
+    hailo_status status = HAILO_SUCCESS;
+    if (m_is_activated) {
+        m_is_activated = false;
+        for (auto &entry_element : m_async_pipeline.get_entry_elements()) {
+            status = entry_element.second->deactivate();
+            if (HAILO_SUCCESS != status) {
+                LOGGER__WARNING("Failed deactivate of element {} status {}", entry_element.second->name(), status);
+            }
+
+            auto should_clear_abort = (!m_is_aborted);
+            status = entry_element.second->post_deactivate(should_clear_abort);
+            if (HAILO_SUCCESS != status) {
+                LOGGER__WARNING("Failed post deactivate of element {} status {}", entry_element.second->name(), status);
+            }
+        }
+    }
+    return status;
+}
+
+hailo_status AsyncInferRunnerImpl::start_pipeline()
+{
+    hailo_status status = HAILO_SUCCESS;
+    for (auto &entry_element : m_async_pipeline.get_entry_elements()) {
+        status = entry_element.second->activate();
+        CHECK_SUCCESS(status);
+    }
+
+    return status;
+}
+
+hailo_status AsyncInferRunnerImpl::async_infer()
+{
+    hailo_status status = m_async_pipeline.get_build_params().pipeline_status->load();
+    CHECK(HAILO_SUCCESS == status, HAILO_INVALID_OPERATION, "Can't handle infer request since Pipeline status is {}.", status);
+
+    for (auto &last_element : m_async_pipeline.get_last_elements()) {
+        auto buffers_are_full = last_element.second->are_buffer_pools_full();
+        CHECK_EXPECTED_AS_STATUS(buffers_are_full);
+        if (buffers_are_full.release()) {
+            LOGGER__ERROR("Can't handle infer request since queue is full.");
+            return HAILO_QUEUE_IS_FULL;
+        }
+    }
+
+    for (auto &last_element : m_async_pipeline.get_last_elements()) {
+        assert(contains(m_output_buffers, last_element.first));
+        auto output_buffer = m_output_buffers.at(last_element.first);
+        auto read_done = m_read_dones.at(last_element.first);
+        // TODO: handle the non-recoverable case where one buffer is enqueued succesfully and the second isn't (HRT-11783)
+        status = last_element.second->enqueue_execution_buffer(output_buffer, read_done);
+        CHECK_SUCCESS(status);
+    }
+
+    for (auto &entry_element : m_async_pipeline.get_entry_elements()) {
+        assert(contains(m_input_buffers, entry_element.first));
+        auto input_buffer = m_input_buffers.at(entry_element.first);
+        auto write_done = m_write_dones.at(entry_element.first);
+        entry_element.second->sinks()[0].run_push_async(PipelineBuffer(input_buffer, write_done));
+    }
+    return HAILO_SUCCESS;
+}
+
+void AsyncInferRunnerImpl::add_element_to_pipeline(std::shared_ptr<PipelineElement> pipeline_element)
+{
+    m_async_pipeline.add_element_to_pipeline(pipeline_element);
+}
+
+void AsyncInferRunnerImpl::add_entry_element(std::shared_ptr<PipelineElement> pipeline_element, const std::string &input_name)
+{
+    m_async_pipeline.add_entry_element(pipeline_element, input_name);
+}
+
+void AsyncInferRunnerImpl::add_last_element(std::shared_ptr<PipelineElement> pipeline_element, const std::string &output_name)
+{
+    m_async_pipeline.add_last_element(pipeline_element, output_name);
+}
+
+std::unordered_map<std::string, std::shared_ptr<PipelineElement>> AsyncInferRunnerImpl::get_entry_elements()
+{
+    return m_async_pipeline.get_entry_elements();
+}
+
+std::unordered_map<std::string, std::shared_ptr<PipelineElement>> AsyncInferRunnerImpl::get_last_elements()
+{
+    return m_async_pipeline.get_last_elements();
+}
+
+void AsyncInferRunnerImpl::set_input(const std::string &input_name, MemoryView &&input_buffer, TransferDoneCallbackAsyncInfer &write_done)
+{
+    m_input_buffers[input_name] = std::move(input_buffer);
+    m_write_dones[input_name] = write_done;
+}
+
+void AsyncInferRunnerImpl::set_output(const std::string &output_name, MemoryView &&output_buffer, TransferDoneCallbackAsyncInfer &read_done)
+{
+    m_output_buffers[output_name] = std::move(output_buffer);
+    m_read_dones[output_name] = read_done;
+}
+
+Expected<size_t> AsyncInferRunnerImpl::get_min_buffer_pool_size(ConfiguredNetworkGroupBase &net_group)
+{
+    uint32_t buffer_pool_size = UINT32_MAX;
+
+    auto input_streams = net_group.get_input_streams();
+    for (const auto &input_stream : input_streams) {
+        auto async_max_queue_size = input_stream.get().get_async_max_queue_size();
+        CHECK_EXPECTED(async_max_queue_size);
+        if (buffer_pool_size > async_max_queue_size.value()) {
+            buffer_pool_size = static_cast<uint32_t>(async_max_queue_size.value());
+        }
+    }
+
+    auto output_streams = net_group.get_output_streams();
+    for (const auto &output_stream : output_streams) {
+        auto async_max_queue_size = output_stream.get().get_async_max_queue_size();
+        CHECK_EXPECTED(async_max_queue_size);
+        if (buffer_pool_size > async_max_queue_size.value()) {
+            buffer_pool_size = static_cast<uint32_t>(async_max_queue_size.value());
+        }
+    }
+
+    return buffer_pool_size;
+}
+
+Expected<std::unordered_map<std::string, hailo_format_t>> AsyncInferRunnerImpl::expand_auto_input_formats(ConfiguredNetworkGroupBase &net_group,
+    const std::unordered_map<std::string, hailo_format_t> &inputs_formats)
+{
+    std::unordered_map<std::string, hailo_format_t> expanded_input_format;
+    for (auto &input_format : inputs_formats) {
+        auto input_streams_names = net_group.get_stream_names_from_vstream_name(input_format.first);
+        CHECK_EXPECTED(input_streams_names);
+
+        // TODO: Taking data from the first ll stream will not work in multi-planar work
+        auto shared_stream_ptr = net_group.get_shared_input_stream_by_name(input_streams_names.value()[0]);
+        CHECK_EXPECTED(shared_stream_ptr);
+
+        expanded_input_format[input_format.first] = HailoRTDefaults::expand_auto_format(input_format.second,
+            shared_stream_ptr.value()->get_info().format);
+    }
+    return expanded_input_format;
+}
+
+Expected<std::unordered_map<std::string, hailo_format_t>> AsyncInferRunnerImpl::expand_auto_output_formats(ConfiguredNetworkGroupBase &net_group,
+    const std::unordered_map<std::string, hailo_format_t> &outputs_formats)
+{
+    std::unordered_map<std::string, hailo_format_t> expanded_output_format;
+    for (auto &output_format : outputs_formats) {
+        auto output_streams_names = net_group.get_stream_names_from_vstream_name(output_format.first);
+        CHECK_EXPECTED(output_streams_names);
+
+        // TODO: Taking data from the first ll stream will not work in multi-planar work
+        auto shared_stream_ptr = net_group.get_shared_output_stream_by_name(output_streams_names.value()[0]);
+        CHECK_EXPECTED(shared_stream_ptr);
+
+        expanded_output_format[output_format.first] = HailoRTDefaults::expand_auto_format(output_format.second,
+            shared_stream_ptr.value()->get_info().format);
+    }
+    return expanded_output_format;
+}
+
+Expected<std::unordered_map<std::string, std::shared_ptr<InputStream>>> AsyncInferRunnerImpl::get_input_streams_from_net_group(ConfiguredNetworkGroupBase &net_group,
+    const std::unordered_map<std::string, hailo_format_t> &inputs_formats)
+{
+    std::unordered_map<std::string, std::shared_ptr<InputStream>> input_streams;
+    for (auto &input_format : inputs_formats) {
+        auto input_streams_names = net_group.get_stream_names_from_vstream_name(input_format.first);
+        CHECK_EXPECTED(input_streams_names);
+
+        for (auto &input_stream_name : input_streams_names.release()) {
+            auto shared_stream_ptr = net_group.get_shared_input_stream_by_name(input_stream_name);
+            CHECK_EXPECTED(shared_stream_ptr);
+
+            input_streams[input_stream_name] = shared_stream_ptr.release();
+        }
+    }
+    return input_streams;
+}
+
+Expected<std::unordered_map<std::string, std::shared_ptr<OutputStream>>> AsyncInferRunnerImpl::get_output_streams_from_net_group(ConfiguredNetworkGroupBase &net_group,
+    const std::unordered_map<std::string, hailo_format_t> &outputs_formats)
+{
+    std::unordered_map<std::string, std::shared_ptr<OutputStream>> output_streams;
+    for (auto &output_format : outputs_formats) {
+        auto output_streams_names = net_group.get_stream_names_from_vstream_name(output_format.first);
+        CHECK_EXPECTED(output_streams_names);
+
+        for (auto &output_stream_name : output_streams_names.release()) {
+            auto shared_stream_ptr = net_group.get_shared_output_stream_by_name(output_stream_name);
+            CHECK_EXPECTED(shared_stream_ptr);
+
+            output_streams[output_stream_name] = shared_stream_ptr.release();
+        }
+    }
+    return output_streams;
+}
+
+hailo_status AsyncInferRunnerImpl::create_pre_async_hw_elements(ConfiguredNetworkGroupBase &net_group,
+        std::unordered_map<std::string, std::shared_ptr<InputStream>> &input_streams,
+        const std::unordered_map<std::string, hailo_format_t> &inputs_formats, AsyncPipeline &async_pipeline)
+{
+    bool is_dma_able = true;
+    for (auto &input_stream_pair : input_streams) {
+        auto input_stream = input_stream_pair.second;
+        auto input_stream_name = input_stream_pair.first;
+        auto input_stream_base = std::static_pointer_cast<InputStreamBase>(input_stream);
+        auto input_stream_info = input_stream->get_info();
+        auto vstream_names = net_group.get_vstream_names_from_stream_name(input_stream_name);
+        CHECK_EXPECTED_AS_STATUS(vstream_names);
+
+        auto sink_index = async_pipeline.get_async_hw_element()->get_sink_index_from_input_stream_name(input_stream_name);
+        CHECK_EXPECTED_AS_STATUS(sink_index);
+
+        auto should_transform = InputTransformContext::is_transformation_required(input_stream_info.shape,
+            inputs_formats.at(input_stream_name), input_stream_info.hw_shape, input_stream_info.format,
+            input_stream_base->get_quant_infos());
+        CHECK_EXPECTED_AS_STATUS(should_transform);
+
+        auto entry_queue_elem = add_push_queue_element(PipelineObject::create_element_name("EntryPushQueueElement", input_stream_info.name, input_stream_info.index),
+            async_pipeline, nullptr);
+        CHECK_EXPECTED_AS_STATUS(entry_queue_elem);
+
+        if (should_transform.value()) {
+            auto pre_infer_elem = PreInferElement::create(input_stream_info.shape, inputs_formats.at(input_stream_name),
+                input_stream_info.hw_shape, input_stream_info.format, input_stream_base->get_quant_infos(),
+                PipelineObject::create_element_name("PreInferElement", input_stream_info.name, input_stream_info.index),
+                async_pipeline.get_build_params(), PipelineDirection::PUSH, is_dma_able);
+            CHECK_EXPECTED_AS_STATUS(pre_infer_elem);
+            async_pipeline.add_element_to_pipeline(pre_infer_elem.value());
+            CHECK_SUCCESS(PipelinePad::link_pads(entry_queue_elem.value(), pre_infer_elem.value()));
+
+            auto queue_elem = add_push_queue_element(PipelineObject::create_element_name("PushQueueElement", input_stream_info.name, input_stream_info.index),
+                async_pipeline, pre_infer_elem.value());
+            CHECK_EXPECTED_AS_STATUS(queue_elem);
+
+            CHECK_SUCCESS(PipelinePad::link_pads(queue_elem.value(), async_pipeline.get_async_hw_element(), 0, sink_index.value()));
+        } else {
+            CHECK_SUCCESS(PipelinePad::link_pads(entry_queue_elem.value(), async_pipeline.get_async_hw_element(), 0, sink_index.value()));
+        }
+
+        for (auto &vstream_name : vstream_names.release()) {
+            if (!contains(async_pipeline.get_entry_elements(), vstream_name)) {
+                async_pipeline.add_entry_element(entry_queue_elem.release(), vstream_name);
+            }
+        }
+    }
+    return HAILO_SUCCESS;
+}
+
+Expected<std::shared_ptr<PostInferElement>> AsyncInferRunnerImpl::add_post_infer_element(const hailo_format_t &output_format,
+    const hailo_nms_info_t &nms_info, AsyncPipeline &async_pipeline, const hailo_3d_image_shape_t &src_image_shape,
+    const hailo_format_t &src_format, const hailo_3d_image_shape_t &dst_image_shape, const std::vector<hailo_quant_info_t> &dst_quant_infos,
+    bool is_last_copy_element, std::shared_ptr<PipelineElement> final_elem, const uint32_t final_elem_source_index)
+{
+    auto queue_elem = add_push_queue_element(PipelineObject::create_element_name("PushQueueElement", final_elem->name(), static_cast<uint8_t>(final_elem_source_index)),
+        async_pipeline, final_elem, final_elem_source_index);
+    CHECK_EXPECTED(queue_elem);
+
+    auto post_infer_elem = PostInferElement::create(src_image_shape, src_format, dst_image_shape, output_format,
+        dst_quant_infos, nms_info, PipelineObject::create_element_name("PostInferElement",
+        final_elem->name(), static_cast<uint8_t>(final_elem_source_index)), async_pipeline.get_build_params(),
+        PipelineDirection::PUSH, is_last_copy_element);
+    CHECK_EXPECTED(post_infer_elem);
+
+    async_pipeline.add_element_to_pipeline(post_infer_elem.value());
+
+    CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(queue_elem.value(), post_infer_elem.value()));
+    return post_infer_elem.release();
+}
+
+Expected<std::shared_ptr<AsyncPushQueueElement>> AsyncInferRunnerImpl::add_push_queue_element(const std::string &queue_name, AsyncPipeline &async_pipeline,
+    std::shared_ptr<PipelineElement> final_elem, const uint32_t final_elem_source_index)
+{
+    auto push_queue_elem = AsyncPushQueueElement::create(queue_name, async_pipeline.get_build_params(), PipelineDirection::PUSH);
+    CHECK_EXPECTED(push_queue_elem);
+
+    async_pipeline.add_element_to_pipeline(push_queue_elem.value());
+
+    // final elem will be nullptr in case it's the first element in pipeline
+    if (final_elem) {
+        CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(final_elem, push_queue_elem.value(), final_elem_source_index, 0));
+    }
+
+    return push_queue_elem.release();
+}
+
+Expected<std::shared_ptr<ConvertNmsToDetectionsElement>> AsyncInferRunnerImpl::add_nms_to_detections_convert_element(AsyncPipeline &async_pipeline,
+    std::shared_ptr<OutputStream> output_stream, const std::string &element_name, const net_flow::PostProcessOpMetadataPtr &op_metadata,
+    const bool is_last_copy_element, std::shared_ptr<PipelineElement> final_elem, const uint32_t final_elem_index)
+{
+    auto metadata = std::dynamic_pointer_cast<net_flow::NmsOpMetadata>(op_metadata);
+    assert(nullptr != metadata);
+
+    auto nms_to_detections_element = ConvertNmsToDetectionsElement::create(metadata->nms_info(),
+        PipelineObject::create_element_name(element_name, output_stream->name(), output_stream->get_info().index),
+        async_pipeline.get_build_params(), PipelineDirection::PUSH, is_last_copy_element);
+    CHECK_EXPECTED(nms_to_detections_element);
+
+    async_pipeline.add_element_to_pipeline(nms_to_detections_element.value());
+
+    CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(final_elem, nms_to_detections_element.value(), final_elem_index, 0));
+    return nms_to_detections_element.release();
+}
+
+Expected<std::shared_ptr<RemoveOverlappingBboxesElement>> AsyncInferRunnerImpl::add_remove_overlapping_bboxes_element(AsyncPipeline &async_pipeline,
+    std::shared_ptr<OutputStream> output_stream, const std::string &element_name, const net_flow::PostProcessOpMetadataPtr &op_metadata,
+    const bool is_last_copy_element, std::shared_ptr<PipelineElement> final_elem, const uint32_t final_elem_index)
+{
+    auto metadata = std::dynamic_pointer_cast<net_flow::NmsOpMetadata>(op_metadata);
+    assert(nullptr != metadata);
+
+    auto remove_overlapping_bboxes_element = RemoveOverlappingBboxesElement::create(metadata->nms_config(),
+        PipelineObject::create_element_name(element_name, output_stream->name(), output_stream->get_info().index),
+        async_pipeline.get_build_params(), PipelineDirection::PUSH, is_last_copy_element);
+    CHECK_EXPECTED(remove_overlapping_bboxes_element);
+
+    async_pipeline.add_element_to_pipeline(remove_overlapping_bboxes_element.value());
+
+    CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(final_elem, remove_overlapping_bboxes_element.value(), final_elem_index, 0));
+    return remove_overlapping_bboxes_element;
+}
+
+Expected<std::shared_ptr<FillNmsFormatElement>> AsyncInferRunnerImpl::add_fill_nms_format_element(AsyncPipeline &async_pipeline,
+    std::shared_ptr<OutputStream> output_stream, const std::string &element_name, const net_flow::PostProcessOpMetadataPtr &op_metadata,
+    const hailo_format_t &output_format, const bool is_last_copy_element, std::shared_ptr<PipelineElement> final_elem, const uint32_t final_elem_index)
+{
+    auto metadata = std::dynamic_pointer_cast<net_flow::NmsOpMetadata>(op_metadata);
+    assert(nullptr != metadata);
+
+    auto fill_nms_format_element = FillNmsFormatElement::create(metadata->nms_info(), output_format, metadata->nms_config(),
+        PipelineObject::create_element_name(element_name, output_stream->name(), output_stream->get_info().index),
+        async_pipeline.get_build_params(), PipelineDirection::PUSH, is_last_copy_element);
+    CHECK_EXPECTED(fill_nms_format_element);
+
+    async_pipeline.add_element_to_pipeline(fill_nms_format_element.value());
+
+    CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(final_elem, fill_nms_format_element.value(), final_elem_index, 0));
+    return fill_nms_format_element;
+}
+
+Expected<std::shared_ptr<LastAsyncElement>> AsyncInferRunnerImpl::add_last_async_element(AsyncPipeline &async_pipeline,
+    const std::string &output_format_name, std::shared_ptr<PipelineElement> final_elem, const uint32_t final_elem_source_index)
+{
+    auto last_async_element = LastAsyncElement::create(PipelineObject::create_element_name("LastAsyncElement",
+        final_elem->name(), static_cast<uint8_t>(final_elem_source_index)), async_pipeline.get_build_params());
+    CHECK_EXPECTED(last_async_element);
+
+    async_pipeline.add_element_to_pipeline(last_async_element.value());
+    CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(final_elem, last_async_element.value(), final_elem_source_index, 0));
+
+    async_pipeline.add_last_element(last_async_element.value(), output_format_name);
+
+    return last_async_element.release();
+}
+
+Expected<std::pair<std::string, hailo_format_t>> AsyncInferRunnerImpl::get_output_format_from_edge_info_name(std::string edge_info_name,
+    const std::unordered_map<std::string, hailo_format_t> &outputs_formats)
+{
+    for (auto &output_format : outputs_formats) {
+        if (output_format.first == edge_info_name) {
+            return std::pair<std::string, hailo_format_t>(output_format);
+        }
+    }
+    return make_unexpected(HAILO_NOT_FOUND);
+}
+
+hailo_status AsyncInferRunnerImpl::add_output_demux_flow(std::shared_ptr<OutputStreamBase> &output_stream,
+    AsyncPipeline &async_pipeline, const std::unordered_map<std::string, hailo_format_t> &outputs_formats)
+{
+    const bool is_dma_able_hw_async = true;
+    auto status = async_pipeline.get_async_hw_element()->fill_buffer_pools(is_dma_able_hw_async);
+    CHECK_SUCCESS(status);
+
+    auto expected_demuxer = OutputDemuxer::create(*output_stream);
+    CHECK_EXPECTED_AS_STATUS(expected_demuxer);
+
+    std::shared_ptr<OutputDemuxer> demuxer_ptr = expected_demuxer.release();
+    CHECK_ARG_NOT_NULL(demuxer_ptr);
+
+    status = output_stream->set_timeout(HAILO_INFINITE_TIMEOUT);
+    CHECK_SUCCESS(status);
+
+    auto demux_elem = TransformDemuxElement::create(demuxer_ptr,
+        PipelineObject::create_element_name("TransformDemuxElement", output_stream->name(), output_stream->get_info().index),
+        async_pipeline.get_build_params(), PipelineDirection::PUSH);
+    CHECK_EXPECTED_AS_STATUS(demux_elem);
+    async_pipeline.add_element_to_pipeline(demux_elem.value());
+
+    auto output_index = async_pipeline.get_async_hw_element()->get_source_index_from_output_stream_name(output_stream->name());
+    CHECK_EXPECTED_AS_STATUS(output_index);
+    CHECK_SUCCESS(PipelinePad::link_pads(async_pipeline.get_async_hw_element(), demux_elem.value(), output_index.value(), 0));
+
+    uint8_t i = 0;
+    for (auto &edge_info : demuxer_ptr->get_edges_stream_info()) {
+        auto output_format_expected = get_output_format_from_edge_info_name(edge_info.name, outputs_formats);
+        CHECK_EXPECTED_AS_STATUS(output_format_expected);
+
+        auto demux_queue_elem = add_push_queue_element(PipelineObject::create_element_name("PushQueueElement_demux", edge_info.name, i), async_pipeline,
+            demux_elem.value(), i);
+        CHECK_EXPECTED_AS_STATUS(demux_queue_elem);
+
+        auto should_transform = OutputTransformContext::is_transformation_required(edge_info.hw_shape, 
+            edge_info.format, edge_info.shape, output_format_expected.value().second, std::vector<hailo_quant_info_t>{edge_info.quant_info}); // TODO: Get quant vector (HRT-11077)
+        CHECK_EXPECTED_AS_STATUS(should_transform);
+
+        if (should_transform.value()) {
+            status = demux_elem.value()->fill_buffer_pool(false, i);
+            CHECK_SUCCESS(status);
+
+            auto post_infer_elem = add_post_infer_element(output_format_expected.value().second, edge_info.nms_info,
+                async_pipeline, edge_info.hw_shape, edge_info.format, edge_info.shape, {edge_info.quant_info}, true, demux_queue_elem.value());
+            CHECK_EXPECTED_AS_STATUS(post_infer_elem);
+
+            auto last_async_element = add_last_async_element(async_pipeline, output_format_expected.value().first, post_infer_elem.value());
+            CHECK_EXPECTED_AS_STATUS(last_async_element);
+        } else {
+            auto last_async_element = add_last_async_element(async_pipeline, output_format_expected.value().first, demux_queue_elem.value());
+            CHECK_EXPECTED_AS_STATUS(last_async_element);
+        }
+        i++;
+    }
+    return HAILO_SUCCESS;
+}
+
+// TODO: remove this function as part of HRT-11667
+hailo_status AsyncInferRunnerImpl::finalize_output_flow(std::shared_ptr<OutputStreamBase> &output_stream_base,
+    const std::pair<std::string, hailo_format_t> &output_format, const hailo_nms_info_t &nms_info, const bool is_dma_able,
+    AsyncPipeline &async_pipeline, std::shared_ptr<PipelineElement> final_elem, const uint32_t final_elem_source_index)
+{
+    auto stream_info = output_stream_base->get_info();
+    auto stream_quant_infos = output_stream_base->get_quant_infos();
+    auto should_transform = OutputTransformContext::is_transformation_required(stream_info.hw_shape,
+        stream_info.format, stream_info.shape, output_format.second, stream_quant_infos);
+    CHECK_EXPECTED_AS_STATUS(should_transform);
+
+    if (should_transform.value()) {
+        hailo_status status = final_elem->fill_buffer_pools(is_dma_able);
+        CHECK_SUCCESS(status);
+
+        auto post_infer_elem = add_post_infer_element(output_format.second, nms_info, async_pipeline,
+            stream_info.hw_shape, stream_info.format, stream_info.shape, stream_quant_infos, true, final_elem, final_elem_source_index);
+        CHECK_EXPECTED_AS_STATUS(post_infer_elem);
+
+        auto last_async_element = add_last_async_element(async_pipeline, output_format.first, post_infer_elem.value());
+        CHECK_EXPECTED_AS_STATUS(last_async_element);
+    } else {
+        auto last_async_element = add_last_async_element(async_pipeline, output_format.first, final_elem, final_elem_source_index);
+        CHECK_EXPECTED_AS_STATUS(last_async_element);
+    }
+    return HAILO_SUCCESS;
+}
+
+hailo_status AsyncInferRunnerImpl::add_nms_fuse_flow(OutputStreamPtrVector &output_streams,
+    const std::pair<std::string, hailo_format_t> &output_format, AsyncPipeline &async_pipeline)
+{
+    const bool is_dma_able_hw_async = true;
+    auto status = async_pipeline.get_async_hw_element()->fill_buffer_pools(is_dma_able_hw_async);
+    CHECK_SUCCESS(status);
+
+    std::vector<hailo_nms_info_t> nms_infos;
+    nms_infos.reserve(output_streams.size());
+    for (const auto &out_stream : output_streams) {
+        CHECK(out_stream->get_info().nms_info.defuse_info.class_group_index <= output_streams.size(),
+            HAILO_INVALID_ARGUMENT, "Not all defused nms outputs were grouped correctly!");
+        nms_infos.emplace_back(out_stream->get_info().nms_info);
+    }
+
+    // To get the fused layer name and src stream format, we use the stream info of one of the defuses
+    auto first_defused_stream_info = output_streams[0]->get_info();
+    auto fused_layer_name = first_defused_stream_info.nms_info.defuse_info.original_name;
+
+    bool is_last_copy_element = true;
+    auto nms_elem = NmsMuxElement::create(nms_infos, PipelineObject::create_element_name("NmsMuxElement", fused_layer_name, 0),
+        async_pipeline.get_build_params(), PipelineDirection::PUSH, is_last_copy_element);
+    CHECK_EXPECTED_AS_STATUS(nms_elem);
+
+    async_pipeline.add_element_to_pipeline(nms_elem.value());
+
+    uint32_t i = 0;
+    for (auto &output_stream :  output_streams) {
+        const auto &curr_stream_info = output_stream->get_info();
+        output_stream->set_timeout(HAILO_INFINITE_TIMEOUT);
+
+        auto output_index = async_pipeline.get_async_hw_element()->get_source_index_from_output_stream_name(output_stream->name());
+        CHECK_EXPECTED_AS_STATUS(output_index);
+
+        auto queue_elem = add_push_queue_element(PipelineObject::create_element_name("PushQueueElement_nms_source", curr_stream_info.name, curr_stream_info.index),
+            async_pipeline, async_pipeline.get_async_hw_element(), output_index.value());
+        CHECK_EXPECTED_AS_STATUS(queue_elem);
+
+        CHECK_SUCCESS(PipelinePad::link_pads(queue_elem.value(), nms_elem.value(), 0, i));
+        i++;
+    }
+
+    auto output_stream_base = std::static_pointer_cast<OutputStreamBase>(output_streams[0]);
+    auto fused_layer_nms_info = nms_elem.value()->get_fused_nms_info();
+    const bool is_dma_able_nms_mux = false;
+    const uint32_t final_elem_source_index = 0;
+    status = finalize_output_flow(output_stream_base, output_format, fused_layer_nms_info,
+        is_dma_able_nms_mux, async_pipeline, nms_elem.value(), final_elem_source_index);
+    CHECK_SUCCESS(status);
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status AsyncInferRunnerImpl::add_softmax_flow(AsyncPipeline &async_pipeline, OutputStreamPtrVector &output_streams,
+    const std::pair<std::string, hailo_format_t> &output_format, const net_flow::PostProcessOpMetadataPtr &softmax_op_metadata)
+{
+    assert(output_streams.size() == 1);
+    auto output_stream_base = std::static_pointer_cast<OutputStreamBase>(output_streams[0]);
+    auto hw_async_elem_index = async_pipeline.get_async_hw_element()->get_source_index_from_output_stream_name(output_stream_base->name());
+    CHECK_EXPECTED_AS_STATUS(hw_async_elem_index);
+
+    auto op_input_format = softmax_op_metadata->inputs_metadata().begin()->second.format;
+    auto output_format_expanded = net_flow::SoftmaxOpMetadata::expand_output_format_autos(output_format.second, op_input_format);
+
+    auto stream_info = output_stream_base->get_info();
+    auto stream_quant_infos = output_stream_base->get_quant_infos();
+    auto post_infer_elem = add_post_infer_element(output_format_expanded, {}, async_pipeline, stream_info.hw_shape, stream_info.format,
+        stream_info.shape, output_stream_base->get_quant_infos(), false, async_pipeline.get_async_hw_element(), hw_async_elem_index.value());
+    CHECK_EXPECTED_AS_STATUS(post_infer_elem);
+
+    auto queue_elem = add_push_queue_element(PipelineObject::create_element_name("PushQueueElement_softmax", async_pipeline.get_async_hw_element()->name(),
+        static_cast<uint8_t>(hw_async_elem_index.value())), async_pipeline, post_infer_elem.value());
+    CHECK_EXPECTED_AS_STATUS(queue_elem);
+
+    // Updating metadata according to user request
+    // Currently softmax only supports inputs to be float32 and order NHWC or NC
+    auto updated_inputs_metadata = softmax_op_metadata.get()->inputs_metadata();
+    updated_inputs_metadata.begin()->second.format = output_format_expanded;
+    auto updated_outputs_metadata = softmax_op_metadata.get()->outputs_metadata();
+    updated_outputs_metadata.begin()->second.format = output_format_expanded;
+    auto metadata = std::dynamic_pointer_cast<net_flow::SoftmaxOpMetadata>(softmax_op_metadata);
+    assert(nullptr != metadata);
+    metadata->set_outputs_metadata(updated_outputs_metadata);
+    metadata->set_inputs_metadata(updated_inputs_metadata);
+    CHECK_SUCCESS(metadata->validate_format_info());
+
+    auto op_expected = net_flow::SoftmaxPostProcessOp::create(metadata);
+    CHECK_EXPECTED_AS_STATUS(op_expected);
+
+    auto softmax_op = op_expected.release();
+    auto softmax_element = SoftmaxPostProcessElement::create(softmax_op,
+        PipelineObject::create_element_name("SoftmaxPostProcessElement", output_stream_base->name(), stream_info.index),
+        async_pipeline.get_build_params(), PipelineDirection::PUSH, true);
+    CHECK_EXPECTED_AS_STATUS(softmax_element);
+
+    async_pipeline.add_element_to_pipeline(softmax_element.value());
+    CHECK_SUCCESS(PipelinePad::link_pads(queue_elem.value(), softmax_element.value()));
+
+    auto last_async_element = add_last_async_element(async_pipeline, output_format.first, softmax_element.value());
+    CHECK_EXPECTED_AS_STATUS(last_async_element);
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status AsyncInferRunnerImpl::add_argmax_flow(AsyncPipeline &async_pipeline, OutputStreamPtrVector &output_streams,
+    const std::pair<std::string, hailo_format_t> &output_format, const net_flow::PostProcessOpMetadataPtr &argmax_op_metadata)
+{
+    assert(output_streams.size() == 1);
+    auto output_stream_base = std::static_pointer_cast<OutputStreamBase>(output_streams[0]);
+    auto hw_async_elem_index = async_pipeline.get_async_hw_element()->get_source_index_from_output_stream_name(output_stream_base->name());
+    CHECK_EXPECTED_AS_STATUS(hw_async_elem_index);
+
+    auto queue_elem = add_push_queue_element(PipelineObject::create_element_name("PushQueueElement_argmax", async_pipeline.get_async_hw_element()->name(),
+        static_cast<uint8_t>(hw_async_elem_index.value())), async_pipeline, async_pipeline.get_async_hw_element());
+    CHECK_EXPECTED_AS_STATUS(queue_elem);
+
+    // Updating metadata according to user request
+    auto op_input_format = argmax_op_metadata->inputs_metadata().begin()->second.format;
+    auto updated_outputs_metadata = argmax_op_metadata.get()->outputs_metadata();
+    updated_outputs_metadata.begin()->second.format = net_flow::ArgmaxOpMetadata::expand_output_format_autos(output_format.second, op_input_format);;
+    auto metadata = std::dynamic_pointer_cast<net_flow::ArgmaxOpMetadata>(argmax_op_metadata);
+    assert(nullptr != metadata);
+    metadata->set_outputs_metadata(updated_outputs_metadata);
+    CHECK_SUCCESS(metadata->validate_format_info());
+
+    auto op_expected = net_flow::ArgmaxPostProcessOp::create(metadata);
+    CHECK_EXPECTED_AS_STATUS(op_expected);
+    auto argmax_op = op_expected.release();
+    bool is_last_copy_element = true;
+
+    auto argmax_element = ArgmaxPostProcessElement::create(argmax_op,
+        PipelineObject::create_element_name("ArgmaxPostProcessElement", output_stream_base->name(), output_stream_base->get_info().index),
+        async_pipeline.get_build_params(), PipelineDirection::PUSH, is_last_copy_element);
+    CHECK_EXPECTED_AS_STATUS(argmax_element);
+
+    async_pipeline.add_element_to_pipeline(argmax_element.value());
+    CHECK_SUCCESS(PipelinePad::link_pads(queue_elem.value(), argmax_element.value()));
+
+    auto last_async_element = add_last_async_element(async_pipeline, output_format.first, argmax_element.value());
+    CHECK_EXPECTED_AS_STATUS(last_async_element);
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status AsyncInferRunnerImpl::add_nms_flow(AsyncPipeline &async_pipeline, OutputStreamPtrVector &output_streams,
+    const std::pair<std::string, hailo_format_t> &output_format, const std::shared_ptr<hailort::net_flow::Op> &nms_op,
+    const hailo_vstream_info_t &vstream_info)
+{
+    auto first_stream_info = output_streams[0]->get_info();
+    CHECK(output_format.second.type == HAILO_FORMAT_TYPE_FLOAT32, HAILO_INVALID_ARGUMENT,
+        "NMS output format type must be HAILO_FORMAT_TYPE_FLOAT32");
+    CHECK(HailoRTCommon::is_nms(output_format.second.order), HAILO_INVALID_ARGUMENT,
+        "NMS output format order must be HAILO_FORMAT_ORDER_HAILO_NMS or HAILO_FORMAT_ORDER_HAILO_NMS_WITH_BYTE_MASK");
+
+    std::unordered_map<std::string, net_flow::BufferMetaData> inputs_metadata;
+    std::unordered_map<std::string, net_flow::BufferMetaData> outputs_metadata;
+    for (uint32_t i = 0; i < output_streams.size(); ++i) {
+        const auto &curr_stream_info = output_streams[i]->get_info();
+        net_flow::BufferMetaData input_metadata = {
+            curr_stream_info.shape,
+            curr_stream_info.hw_shape,
+            curr_stream_info.format,
+            curr_stream_info.quant_info
+        };
+        inputs_metadata.insert({curr_stream_info.name, input_metadata});
+    }
+
+    assert(nms_op->outputs_metadata().size() == 1);
+
+    net_flow::BufferMetaData output_metadata = {
+        vstream_info.shape,
+        vstream_info.shape,
+        vstream_info.format,
+        vstream_info.quant_info
+    };
+    outputs_metadata.insert({nms_op->outputs_metadata().begin()->first, output_metadata});
+
+    auto nms_elem = NmsPostProcessMuxElement::create(nms_op, PipelineObject::create_element_name("NmsPostProcessMuxElement", nms_op->get_name(), 0),
+        async_pipeline.get_build_params(), PipelineDirection::PUSH, true);
+    CHECK_EXPECTED_AS_STATUS(nms_elem);
+
+    async_pipeline.add_element_to_pipeline(nms_elem.value());
+
+    hailo_format_t nms_src_format;
+    nms_src_format.flags = HAILO_FORMAT_FLAGS_QUANTIZED;
+    nms_src_format.order = HAILO_FORMAT_ORDER_NHCW;
+    nms_src_format.type = first_stream_info.format.type;
+
+    for (uint32_t i = 0; i < output_streams.size(); ++i) {
+        const auto &curr_stream_info = output_streams[i]->get_info();
+        output_streams[i]->set_timeout(HAILO_INFINITE_TIMEOUT); // TODO: Check with Salem/Kimel if can be removed
+
+        auto output_stream_base = std::static_pointer_cast<OutputStreamBase>(output_streams[i]);
+        auto should_transform = OutputTransformContext::is_transformation_required(curr_stream_info.hw_shape, curr_stream_info.format,
+            curr_stream_info.hw_shape, nms_src_format, output_stream_base->get_quant_infos());
+        CHECK_EXPECTED_AS_STATUS(should_transform);
+
+        CHECK(!(should_transform.value()), HAILO_INVALID_ARGUMENT, "Unexpected transformation required for {}", curr_stream_info.name);
+
+        auto source_id = async_pipeline.get_async_hw_element()->get_source_index_from_output_stream_name(output_stream_base->name());
+        CHECK_EXPECTED_AS_STATUS(source_id);
+
+        auto nms_source_queue_elem = add_push_queue_element(PipelineObject::create_element_name("PullQueueElement_nms_source", curr_stream_info.name, curr_stream_info.index),
+            async_pipeline, async_pipeline.get_async_hw_element(), source_id.value());
+        CHECK_EXPECTED_AS_STATUS(nms_source_queue_elem);
+
+        CHECK_SUCCESS(PipelinePad::link_pads(nms_source_queue_elem.value(), nms_elem.value(), 0, i));
+        nms_elem.value()->add_sink_name(curr_stream_info.name);
+    }
+    auto last_async_element = add_last_async_element(async_pipeline, output_format.first, nms_elem.value());
+    CHECK_EXPECTED_AS_STATUS(last_async_element);
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status AsyncInferRunnerImpl::add_iou_flow( AsyncPipeline &async_pipeline, OutputStreamPtrVector &output_streams,
+    const std::pair<std::string, hailo_format_t> &output_format, const net_flow::PostProcessOpMetadataPtr &iou_op_metadata)
+{
+    assert(output_streams.size() == 1);
+    auto output_stream = output_streams[0];
+
+    auto output_index = async_pipeline.get_async_hw_element()->get_source_index_from_output_stream_name(output_stream->name());
+        CHECK_EXPECTED_AS_STATUS(output_index);
+
+    auto hw_read_queue_element = add_push_queue_element(PipelineObject::create_element_name("PushQueueElement_hw_read", output_stream->name(), output_stream->get_info().index),
+        async_pipeline, async_pipeline.get_async_hw_element() , output_index.value());
+    CHECK_EXPECTED_AS_STATUS(hw_read_queue_element);
+
+    auto &stream_info = output_stream->get_info();
+    auto &stream_quant_infos = output_stream->get_quant_infos();
+
+    auto post_infer_element = add_post_infer_element(output_format.second, stream_info.nms_info,
+        async_pipeline, stream_info.hw_shape, stream_info.format, stream_info.shape, stream_quant_infos, false, hw_read_queue_element.value());
+    CHECK_EXPECTED_AS_STATUS(post_infer_element);
+
+    auto pre_nms_convert_queue_element = add_push_queue_element(PipelineObject::create_element_name("PullQueueElement_pre_nms_convert", output_stream->name(), output_stream->get_info().index),
+        async_pipeline, post_infer_element.value());
+    CHECK_EXPECTED_AS_STATUS(pre_nms_convert_queue_element);
+
+    auto nms_to_detections_element = add_nms_to_detections_convert_element(async_pipeline, output_stream, "NmsFormatToDetectionsElement", iou_op_metadata,
+        false, pre_nms_convert_queue_element.value());
+    CHECK_EXPECTED_AS_STATUS(nms_to_detections_element);
+
+    auto pre_remove_overlapping_bboxes_element_queue_element = add_push_queue_element(PipelineObject::create_element_name("PullQueueElement_pre_bboxes_removing", output_stream->name(), output_stream->get_info().index),
+        async_pipeline, nms_to_detections_element.value());
+    CHECK_EXPECTED_AS_STATUS(pre_remove_overlapping_bboxes_element_queue_element);
+
+    auto remove_overlapping_bboxes_element = add_remove_overlapping_bboxes_element(async_pipeline, output_stream, "RemoveOverlappingBboxesElement", iou_op_metadata,
+        false, pre_remove_overlapping_bboxes_element_queue_element.value());
+    CHECK_EXPECTED_AS_STATUS(remove_overlapping_bboxes_element);
+
+    auto pre_fill_nms_format_element_queue_element = add_push_queue_element(PipelineObject::create_element_name("PullQueueElement_pre_fill_nms_format", output_stream->name(), output_stream->get_info().index),
+        async_pipeline, remove_overlapping_bboxes_element.value());
+    CHECK_EXPECTED_AS_STATUS(pre_fill_nms_format_element_queue_element);
+
+    auto fill_nms_format_element = add_fill_nms_format_element(async_pipeline, output_stream, "FillNmsFormatElement", iou_op_metadata,
+        output_format.second, true, pre_fill_nms_format_element_queue_element.value());
+    CHECK_EXPECTED_AS_STATUS(fill_nms_format_element);
+
+    auto last_async_element = add_last_async_element(async_pipeline, output_format.first, fill_nms_format_element.value());
+    CHECK_EXPECTED_AS_STATUS(last_async_element);
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status AsyncInferRunnerImpl::add_nms_flows(AsyncPipeline &async_pipeline, OutputStreamPtrVector &output_streams,
+    const std::pair<std::string, hailo_format_t> &output_format, const net_flow::PostProcessOpMetadataPtr &op_metadata,
+    const std::vector<hailo_vstream_info_t> &vstreams_infos)
+{
+    assert(1 <= op_metadata->outputs_metadata().size());
+    auto updated_outputs_metadata = op_metadata->outputs_metadata();
+    std::pair<std::string, hailo_format_t> expanded_output_format = {output_format.first,
+        net_flow::NmsOpMetadata::expand_output_format_autos_by_op_type(output_format.second, op_metadata->type())};
+    updated_outputs_metadata.begin()->second.format = expanded_output_format.second;
+
+    if (HAILO_FORMAT_FLAGS_QUANTIZED & updated_outputs_metadata.begin()->second.format.flags) {
+        updated_outputs_metadata.begin()->second.format.flags &= ~HAILO_FORMAT_FLAGS_QUANTIZED;
+        // TODO: Delete override when changing CLI default flags
+        // TODO: check with Kimel/Salem of this warning is still needed
+        LOGGER__WARNING("The output_vstream {} format flag is marked as quantized, which is not supported with {}. "
+            "flag has been automatically set to False.", updated_outputs_metadata.begin()->first, op_metadata->get_name());
+    }
+
+    op_metadata->set_outputs_metadata(updated_outputs_metadata);
+    CHECK_SUCCESS(op_metadata->validate_format_info());
+    std::shared_ptr<hailort::net_flow::Op> op;
+
+    switch (op_metadata->type()) {
+    case net_flow::OperationType::IOU:
+        return add_iou_flow(async_pipeline, output_streams, expanded_output_format, op_metadata);
+
+    case net_flow::OperationType::YOLOX:
+    {
+        auto metadata = std::dynamic_pointer_cast<net_flow::YoloxOpMetadata>(op_metadata);
+        assert(nullptr != metadata);
+        auto op_expected = net_flow::YOLOXPostProcessOp::create(metadata);
+        CHECK_EXPECTED_AS_STATUS(op_expected);
+        op = op_expected.release();
+        break;
+    }
+    case net_flow::OperationType::YOLOV5:
+    {
+        auto metadata = std::dynamic_pointer_cast<net_flow::Yolov5OpMetadata>(op_metadata);
+        assert(nullptr != metadata);
+        auto op_expected = net_flow::YOLOv5PostProcessOp::create(metadata);
+        CHECK_EXPECTED_AS_STATUS(op_expected);
+        op = op_expected.release();
+        break;
+    }
+    case net_flow::OperationType::SSD:
+    {
+        auto metadata = std::dynamic_pointer_cast<net_flow::SSDOpMetadata>(op_metadata);
+        assert(nullptr != metadata);
+        auto op_expected = net_flow::SSDPostProcessOp::create(metadata);
+        CHECK_EXPECTED_AS_STATUS(op_expected);
+        op = op_expected.release();
+        break;
+    }
+    default:
+        break;
+    }
+    hailo_vstream_info_t output_vstream_info;
+    for (auto &current_output_vstream_info : vstreams_infos) {
+        if (current_output_vstream_info.name == op->outputs_metadata().begin()->first) {
+            output_vstream_info = current_output_vstream_info;
+        }
+    }
+    return add_nms_flow(async_pipeline, output_streams, expanded_output_format, op, output_vstream_info);
+}
+
+hailo_status AsyncInferRunnerImpl::add_ops_flows(AsyncPipeline &async_pipeline,
+    const std::pair<std::string, hailo_format_t> &output_format, net_flow::PostProcessOpMetadataPtr &op_metadata,
+    OutputStreamPtrVector &output_streams, const std::vector<hailo_vstream_info_t> &vstreams_infos)
+{
+    const bool is_dma_able_hw_async = true;
+    auto status = async_pipeline.get_async_hw_element()->fill_buffer_pools(is_dma_able_hw_async);
+    CHECK_SUCCESS(status);
+
+    switch (op_metadata->type()) {
+    case net_flow::OperationType::YOLOX:
+    case net_flow::OperationType::SSD:
+    case net_flow::OperationType::YOLOV5:
+    case net_flow::OperationType::IOU:
+    // TODO: add support for YOLOV5SEG
+        return add_nms_flows(async_pipeline, output_streams, output_format, op_metadata, vstreams_infos);
+
+    case net_flow::OperationType::ARGMAX:
+        return add_argmax_flow(async_pipeline, output_streams, output_format, op_metadata);
+
+    case net_flow::OperationType::SOFTMAX:
+        return add_softmax_flow(async_pipeline, output_streams, output_format, op_metadata);
+
+    default:
+        LOGGER__ERROR("op type {} of op {} is not in any of the supported post process OP types", net_flow::OpMetadata::get_operation_type_str(op_metadata->type()), op_metadata->get_name());
+        return HAILO_INVALID_OPERATION;
+    }
+}
+
+hailo_status AsyncInferRunnerImpl::create_post_async_hw_elements(ConfiguredNetworkGroupBase &net_group,
+        const std::unordered_map<std::string, hailo_format_t> &expanded_outputs_formats, std::unordered_map<std::string, hailo_format_t> &original_outputs_formats,
+        AsyncPipeline &async_pipeline)
+{
+    // streams_added is a vector which holds all stream names which vstreams connected to them were already added (for demux cases)
+    std::vector<std::string> streams_added;
+
+    // Building DBs that connect output_vstreams, output_streams and ops.
+    // Note: Assuming each post process op has a unique output streams.
+    //       In other words, not possible for an output stream to be connected to more than one op
+    std::unordered_map<std::string, net_flow::PostProcessOpMetadataPtr> post_process_metadata;
+    std::unordered_map<stream_name_t, op_name_t> op_inputs_to_op_name;
+    for (auto &metadata : net_group.get_ops_metadata().release()) {
+        post_process_metadata.insert({metadata->get_name(), metadata});
+        for (auto &input_name : metadata->get_input_names()) {
+            op_inputs_to_op_name.insert({input_name, metadata->get_name()});
+        }
+    }
+
+    for (auto &output_format : expanded_outputs_formats) {
+        auto output_streams_expected = net_group.get_output_streams_by_vstream_name(output_format.first);
+        CHECK_EXPECTED_AS_STATUS(output_streams_expected);
+
+        auto first_stream_info = output_streams_expected.value()[0]->get_info();
+        if (contains(streams_added, static_cast<std::string>(first_stream_info.name))) {
+            continue;
+        }
+        for (auto &output_stream : output_streams_expected.value()) {
+            streams_added.push_back(output_stream->get_info().name);
+        }
+
+        if (contains(op_inputs_to_op_name, static_cast<std::string>(first_stream_info.name))) {
+            auto &op_name = op_inputs_to_op_name.at(first_stream_info.name);
+            auto &op_metadata = post_process_metadata.at(op_name);
+
+            auto output_vstreams_infos = net_group.get_output_vstream_infos();
+            CHECK_EXPECTED_AS_STATUS(output_vstreams_infos);
+
+            std::pair<std::string, hailo_format_t> original_output_format = {output_format.first, original_outputs_formats.at(output_format.first)};
+
+            hailo_status status = add_ops_flows(async_pipeline, original_output_format,
+                op_metadata, output_streams_expected.value(), output_vstreams_infos.value());
+            CHECK_SUCCESS(status);
+
+        } else if ((HAILO_FORMAT_ORDER_HAILO_NMS == first_stream_info.format.order) &&
+            (first_stream_info.nms_info.is_defused)) {
+            // Case defuse NMS
+            hailo_status status = add_nms_fuse_flow(output_streams_expected.value(), output_format, async_pipeline);
+            CHECK_SUCCESS(status);
+        } else if (first_stream_info.is_mux) {
+            // case demux in output from NN core (only one output stream is currently suppored)
+            hailo_status status = add_output_demux_flow(output_streams_expected.value()[0], async_pipeline, expanded_outputs_formats);
+            CHECK_SUCCESS(status);
+        } else {
+            // case simple and single output from NN core to user (and transformation at best)
+            auto output_stream_base = std::static_pointer_cast<OutputStreamBase>(output_streams_expected.value()[0]);
+            const bool is_dma_able = true;
+            auto final_elem_source_index = async_pipeline.get_async_hw_element()->get_source_index_from_output_stream_name(output_stream_base->name());
+            CHECK_EXPECTED_AS_STATUS(final_elem_source_index);
+
+            hailo_status status = finalize_output_flow(output_stream_base, output_format, {}, is_dma_able, async_pipeline,
+                async_pipeline.get_async_hw_element(), final_elem_source_index.value());
+            CHECK_SUCCESS(status);
+        }
+    }
+    return HAILO_SUCCESS;
+}
+
+Expected<AsyncPipeline> AsyncInferRunnerImpl::create_pipeline(ConfiguredNetworkGroupBase &net_group,
+    const std::unordered_map<std::string, hailo_format_t> &inputs_formats,
+    const std::unordered_map<std::string, hailo_format_t> &outputs_formats,
+    const uint32_t timeout)
+{
+    std::unordered_map<std::string, std::shared_ptr<PipelineElement>> entry_elements;
+    std::unordered_map<std::string, std::shared_ptr<PipelineElement>> last_elements;
+
+    ElementBuildParams build_params;
+
+    // buffer_pool_size should be the minimum of the maximum queue size of all LL streams (input and output)
+    auto buffer_pool_size_expected = get_min_buffer_pool_size(net_group);
+    CHECK_EXPECTED(buffer_pool_size_expected);
+    build_params.buffer_pool_size = buffer_pool_size_expected.release();
+    build_params.elem_stats_flags = HAILO_PIPELINE_ELEM_STATS_NONE;
+    build_params.vstream_stats_flags = HAILO_VSTREAM_STATS_NONE;
+
+    auto async_pipeline_expected = AsyncPipeline::create();
+    CHECK_EXPECTED(async_pipeline_expected);
+    auto async_pipeline = async_pipeline_expected.release();
+
+    auto input_streams_expected = get_input_streams_from_net_group(net_group, inputs_formats);
+    CHECK_EXPECTED(input_streams_expected);
+
+    auto input_expanded_format = expand_auto_input_formats(net_group, inputs_formats);
+    CHECK_EXPECTED(input_expanded_format);
+
+    std::vector<std::shared_ptr<InputStream>> input_streams_list;
+    input_streams_list.reserve(input_streams_expected.value().size());
+    for (auto &input_stream : input_streams_expected.value()) {
+        input_streams_list.push_back(input_stream.second);
+    }
+
+    auto output_streams_expected = get_output_streams_from_net_group(net_group, outputs_formats);
+    CHECK_EXPECTED(output_streams_expected);
+
+    auto output_expanded_format = expand_auto_output_formats(net_group, outputs_formats);
+    CHECK_EXPECTED(output_expanded_format);
+
+    auto outputs_original_formats = outputs_formats;  // The original formats is needed for specific format expanding (required for PP OPs, like argmax)
+
+    std::vector<std::shared_ptr<OutputStream>> output_streams_list;
+    output_streams_list.reserve(output_streams_expected.value().size());
+    for (auto &output_stream : output_streams_expected.value()) {
+        output_streams_list.push_back(output_stream.second);
+    }
+
+    auto shutdown_event_expected = Event::create_shared(Event::State::not_signalled);
+    CHECK_EXPECTED(shutdown_event_expected);
+
+    build_params.shutdown_event = shutdown_event_expected.release();
+    build_params.pipeline_status = make_shared_nothrow<std::atomic<hailo_status>>(HAILO_SUCCESS);
+    CHECK_ARG_NOT_NULL_AS_EXPECTED(build_params.pipeline_status);
+    build_params.timeout = std::chrono::milliseconds(timeout);
+
+    async_pipeline.set_build_params(build_params);
+
+    // all elements in async pipeline start as last elements, and in the end of this func all non-last-copy elements will be added buffers
+    bool is_last_copy_element = true;
+
+    auto async_hw_elem = AsyncHwElement::create(input_streams_list, output_streams_list, build_params.timeout,
+        build_params.buffer_pool_size, build_params.elem_stats_flags,
+        build_params.vstream_stats_flags, build_params.shutdown_event,
+        "AsyncHwElement", build_params.pipeline_status, PipelineDirection::PUSH, is_last_copy_element);
+    CHECK_EXPECTED(async_hw_elem);
+    async_pipeline.add_element_to_pipeline(async_hw_elem.value());
+    async_pipeline.set_async_hw_element(async_hw_elem.release());
+
+    // TODO: HRT-11759
+    hailo_status status = create_pre_async_hw_elements(net_group, input_streams_expected.value(), input_expanded_format.value(),
+        async_pipeline);
+    CHECK_SUCCESS_AS_EXPECTED(status);
+
+    status = create_post_async_hw_elements(net_group, output_expanded_format.value(), outputs_original_formats, async_pipeline);
+    CHECK_SUCCESS_AS_EXPECTED(status);
+
+    return async_pipeline;
+}
+
+std::vector<std::shared_ptr<PipelineElement>> AsyncInferRunnerImpl::get_pipeline() const
+{
+    return m_async_pipeline.get_pipeline();
+}
+
+std::string AsyncInferRunnerImpl::get_pipeline_description() const
+{
+    std::stringstream pipeline_str;
+    pipeline_str << "Async infer pipeline description:\n";
+    for (const auto &element : get_pipeline()) {
+           pipeline_str << " >> " << element->description();
+    }
+    return pipeline_str.str();
+}
+
+} /* namespace hailort */
diff --git a/hailort/libhailort/src/net_flow/pipeline/async_infer_runner_internal.hpp b/hailort/libhailort/src/net_flow/pipeline/async_infer_runner_internal.hpp
new file mode 100644 (file)
index 0000000..f07316e
--- /dev/null
@@ -0,0 +1,184 @@
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file async_infer_runner_internal.hpp
+ * @brief Implemention of the async HL infer
+ **/
+
+#ifndef _HAILO_ASYNC_INFER_RUNNER_INTERNAL_HPP_
+#define _HAILO_ASYNC_INFER_RUNNER_INTERNAL_HPP_
+
+#include "network_group/network_group_internal.hpp"
+#include "net_flow/pipeline/pipeline.hpp"
+#include "net_flow/pipeline/vstream_internal.hpp"
+#include "net_flow/ops/argmax_post_process.hpp"
+#include "net_flow/ops/softmax_post_process.hpp"
+#include "net_flow/ops/yolox_post_process.hpp"
+#include "net_flow/ops/ssd_post_process.hpp"
+#include "net_flow/ops/op.hpp"
+
+namespace hailort
+{
+class AsyncPipeline
+{
+public:
+    static Expected<AsyncPipeline> create();
+    AsyncPipeline &operator=(const AsyncPipeline &) = delete;
+
+    virtual ~AsyncPipeline() = default;
+
+    void add_element_to_pipeline(std::shared_ptr<PipelineElement> pipeline_element);
+    void set_async_hw_element(std::shared_ptr<AsyncHwElement> async_hw_element);
+    void add_entry_element(std::shared_ptr<PipelineElement> pipeline_element, const std::string &input_name);
+    void add_last_element(std::shared_ptr<PipelineElement> pipeline_element, const std::string &output_name);
+    void set_build_params(ElementBuildParams &build_params);
+
+    const std::vector<std::shared_ptr<PipelineElement>>& get_pipeline() const;
+    const std::unordered_map<std::string, std::shared_ptr<PipelineElement>>& get_entry_elements() const;
+    const std::unordered_map<std::string, std::shared_ptr<PipelineElement>>& get_last_elements() const;
+    const std::shared_ptr<AsyncHwElement> get_async_hw_element();
+    const ElementBuildParams get_build_params();
+
+private:
+    AsyncPipeline();
+
+    std::vector<std::shared_ptr<PipelineElement>> m_pipeline_elements;
+    std::shared_ptr<AsyncHwElement> m_async_hw_element;
+    std::unordered_map<std::string, std::shared_ptr<PipelineElement>> m_entry_elements;
+    std::unordered_map<std::string, std::shared_ptr<PipelineElement>> m_last_elements;
+    ElementBuildParams m_build_params;
+};
+
+class AsyncInferRunnerInternal
+{
+public:
+    static Expected<std::shared_ptr<AsyncInferRunnerInternal>> create(ConfiguredNetworkGroupBase &net_group,
+        const std::unordered_map<std::string, hailo_format_t> &inputs_formats, const std::unordered_map<std::string, hailo_format_t> &outputs_formats);
+    AsyncInferRunnerInternal(AsyncInferRunnerInternal &&other) noexcept = default;
+    AsyncInferRunnerInternal &operator=(AsyncInferRunnerInternal &&other) noexcept = default;
+    virtual ~AsyncInferRunnerInternal() = default;
+
+    virtual hailo_status async_infer() = 0;
+    virtual std::string get_pipeline_description() const = 0;
+    virtual std::vector<std::shared_ptr<PipelineElement>> get_pipeline() const = 0;
+
+protected:
+    AsyncInferRunnerInternal();
+    std::shared_ptr<std::atomic<hailo_status>> m_pipeline_status;
+
+};
+
+
+class AsyncInferRunnerImpl : public AsyncInferRunnerInternal
+{
+public:
+    static Expected<std::shared_ptr<AsyncInferRunnerImpl>> create(ConfiguredNetworkGroupBase &net_group,
+        const std::unordered_map<std::string, hailo_format_t> &inputs_formats, const std::unordered_map<std::string, hailo_format_t> &outputs_formats,
+        const uint32_t timeout = HAILO_DEFAULT_VSTREAM_TIMEOUT_MS);
+    AsyncInferRunnerImpl(AsyncInferRunnerImpl &&) = delete;
+    AsyncInferRunnerImpl(const AsyncInferRunnerImpl &) = delete;
+    AsyncInferRunnerImpl &operator=(AsyncInferRunnerImpl &&) = delete;
+    AsyncInferRunnerImpl &operator=(const AsyncInferRunnerImpl &) = delete;
+    virtual ~AsyncInferRunnerImpl();
+    AsyncInferRunnerImpl(AsyncPipeline &&async_pipeline);
+
+    virtual hailo_status async_infer() override;
+
+    // TODO: consider removing the methods below (needed for unit testing)
+    void add_element_to_pipeline(std::shared_ptr<PipelineElement> pipeline_element);
+    void add_entry_element(std::shared_ptr<PipelineElement> pipeline_element, const std::string &input_name);
+    void add_last_element(std::shared_ptr<PipelineElement> pipeline_element, const std::string &output_name);
+
+    void set_input(const std::string &input_name, MemoryView &&input_buffer, TransferDoneCallbackAsyncInfer &write_done);
+    void set_output(const std::string &output_name, MemoryView &&output_buffer, TransferDoneCallbackAsyncInfer &read_done);
+
+    std::unordered_map<std::string, std::shared_ptr<PipelineElement>> get_entry_elements();
+    std::unordered_map<std::string, std::shared_ptr<PipelineElement>> get_last_elements();
+
+    virtual std::vector<std::shared_ptr<PipelineElement>> get_pipeline() const override;
+    virtual std::string get_pipeline_description() const override;
+
+    static Expected<size_t> get_min_buffer_pool_size(ConfiguredNetworkGroupBase &net_group);
+
+protected:
+    static Expected<AsyncPipeline> create_pipeline(ConfiguredNetworkGroupBase &net_group, const std::unordered_map<std::string, hailo_format_t> &inputs_formats,
+        const std::unordered_map<std::string, hailo_format_t> &outputs_formats, const uint32_t timeout);
+
+    hailo_status start_pipeline();
+    hailo_status stop_pipeline();
+
+    static Expected<std::unordered_map<std::string, std::shared_ptr<InputStream>>> get_input_streams_from_net_group(ConfiguredNetworkGroupBase &net_group,
+        const std::unordered_map<std::string, hailo_format_t> &inputs_formats);
+    static Expected<std::unordered_map<std::string, std::shared_ptr<OutputStream>>> get_output_streams_from_net_group(ConfiguredNetworkGroupBase &net_group,
+        const std::unordered_map<std::string, hailo_format_t> &outputs_formats);
+    static Expected<std::unordered_map<std::string, hailo_format_t>> expand_auto_input_formats(ConfiguredNetworkGroupBase &net_group,
+        const std::unordered_map<std::string, hailo_format_t> &inputs_formats);
+    static Expected<std::unordered_map<std::string, hailo_format_t>> expand_auto_output_formats(ConfiguredNetworkGroupBase &net_group,
+        const std::unordered_map<std::string, hailo_format_t> &outputs_formats);
+    static Expected<std::pair<std::string, hailo_format_t>> get_output_format_from_edge_info_name(std::string edge_info_name,
+        const std::unordered_map<std::string, hailo_format_t> &outputs_formats);
+
+    static hailo_status create_pre_async_hw_elements(ConfiguredNetworkGroupBase &net_group,
+        std::unordered_map<std::string, std::shared_ptr<InputStream>> &input_streams,
+        const std::unordered_map<std::string, hailo_format_t> &inputs_formats, AsyncPipeline &async_pipeline);
+    static hailo_status create_post_async_hw_elements(ConfiguredNetworkGroupBase &net_group,
+        const std::unordered_map<std::string, hailo_format_t> &expanded_outputs_formats, std::unordered_map<std::string, hailo_format_t> &original_outputs_formats,
+        AsyncPipeline &async_pipeline);
+
+    static hailo_status add_argmax_flow(AsyncPipeline &async_pipeline, OutputStreamPtrVector &output_streams,
+        const std::pair<std::string, hailo_format_t> &output_format, const net_flow::PostProcessOpMetadataPtr &argmax_op_metadata);
+    static hailo_status add_softmax_flow(AsyncPipeline &async_pipeline, OutputStreamPtrVector &output_streams,
+        const std::pair<std::string, hailo_format_t> &output_format, const net_flow::PostProcessOpMetadataPtr &softmax_op_metadata);
+    static hailo_status add_ops_flows(AsyncPipeline &async_pipeline,
+        const std::pair<std::string, hailo_format_t> &output_format, net_flow::PostProcessOpMetadataPtr &op_metadata,
+        OutputStreamPtrVector &output_streams, const std::vector<hailo_vstream_info_t> &vstreams_infos);
+    static hailo_status add_output_demux_flow(std::shared_ptr<OutputStreamBase> &output_stream,
+        AsyncPipeline &async_pipeline, const std::unordered_map<std::string, hailo_format_t> &outputs_formats);
+    static hailo_status add_nms_fuse_flow(OutputStreamPtrVector &output_streams, const std::pair<std::string, hailo_format_t> &output_format,
+        AsyncPipeline &async_pipeline);
+    static hailo_status add_nms_flow(AsyncPipeline &async_pipeline, OutputStreamPtrVector &output_streams,
+        const std::pair<std::string, hailo_format_t> &output_format, const std::shared_ptr<hailort::net_flow::Op> &nms_op,
+        const hailo_vstream_info_t &vstream_info);
+    static hailo_status add_iou_flow(AsyncPipeline &async_pipeline, OutputStreamPtrVector &output_streams,
+        const std::pair<std::string, hailo_format_t> &output_format, const net_flow::PostProcessOpMetadataPtr &iou_op_metadata);
+    static hailo_status add_nms_flows(AsyncPipeline &async_pipeline, OutputStreamPtrVector &output_streams,
+        const std::pair<std::string, hailo_format_t> &output_format, const net_flow::PostProcessOpMetadataPtr &op_metadata,
+        const std::vector<hailo_vstream_info_t> &vstreams_infos);
+
+
+    static Expected<std::shared_ptr<PostInferElement>> add_post_infer_element(const hailo_format_t &output_format, const hailo_nms_info_t &nms_info,
+        AsyncPipeline &async_pipeline, const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
+        const hailo_3d_image_shape_t &dst_image_shape, const std::vector<hailo_quant_info_t> &dst_quant_infos, bool is_last_copy_element,
+        std::shared_ptr<PipelineElement> final_elem, const uint32_t final_elem_source_index = 0);
+    static Expected<std::shared_ptr<LastAsyncElement>> add_last_async_element(AsyncPipeline &async_pipeline,
+        const std::string &output_format_name, std::shared_ptr<PipelineElement> final_elem, const uint32_t final_elem_source_index = 0);
+    static Expected<std::shared_ptr<AsyncPushQueueElement>> add_push_queue_element(const std::string &queue_name, AsyncPipeline &async_pipeline,
+        std::shared_ptr<PipelineElement> final_elem, const uint32_t final_elem_source_index = 0);
+    static Expected<std::shared_ptr<ConvertNmsToDetectionsElement>> add_nms_to_detections_convert_element(AsyncPipeline &async_pipeline,
+        std::shared_ptr<OutputStream> output_stream, const std::string &element_name, const net_flow::PostProcessOpMetadataPtr &op_metadata,
+        const bool is_last_copy_element, std::shared_ptr<PipelineElement> final_elem, const uint32_t final_elem_source_index = 0);
+    static Expected<std::shared_ptr<RemoveOverlappingBboxesElement>> add_remove_overlapping_bboxes_element(AsyncPipeline &async_pipeline,
+        std::shared_ptr<OutputStream> output_stream, const std::string &element_name, const net_flow::PostProcessOpMetadataPtr &op_metadata,
+        const bool is_last_copy_element, std::shared_ptr<PipelineElement> final_elem, const uint32_t final_elem_source_index = 0);
+    static Expected<std::shared_ptr<FillNmsFormatElement>> add_fill_nms_format_element(AsyncPipeline &async_pipeline,
+        std::shared_ptr<OutputStream> output_stream, const std::string &element_name, const net_flow::PostProcessOpMetadataPtr &op_metadata,
+        const hailo_format_t &output_format, const bool is_last_copy_element, std::shared_ptr<PipelineElement> final_elem, const uint32_t final_elem_source_index = 0);
+
+    static hailo_status finalize_output_flow(std::shared_ptr<OutputStreamBase> &output_stream_base,
+        const std::pair<std::string, hailo_format_t> &output_format, const hailo_nms_info_t &nms_info, const bool is_dma_able,
+        AsyncPipeline &async_pipeline, std::shared_ptr<PipelineElement> final_elem, const uint32_t final_elem_source_index = 0);
+
+    AsyncPipeline m_async_pipeline;
+    std::unordered_map<std::string, MemoryView> m_input_buffers;
+    std::unordered_map<std::string, TransferDoneCallbackAsyncInfer> m_write_dones;
+    std::unordered_map<std::string, MemoryView> m_output_buffers;
+    std::unordered_map<std::string, TransferDoneCallbackAsyncInfer> m_read_dones;
+    volatile bool m_is_activated;
+    volatile bool m_is_aborted;
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_ASYNC_INFER_RUNNER_INTERNAL_HPP_ */
diff --git a/hailort/libhailort/src/net_flow/pipeline/infer_model.cpp b/hailort/libhailort/src/net_flow/pipeline/infer_model.cpp
new file mode 100644 (file)
index 0000000..3cbf267
--- /dev/null
@@ -0,0 +1,506 @@
+/**
+ * Copyright (c) 2020-2023 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file infer_model.cpp
+ * @brief Implemention of the async HL infer
+ **/
+
+#include <iostream>
+
+#include "common/utils.hpp"
+#include "hailo/hailort_common.hpp"
+#include "hailo/vdevice.hpp"
+#include "hailo/infer_model.hpp"
+#include "net_flow/pipeline/infer_model_internal.hpp"
+#include "net_flow/pipeline/async_infer_runner_internal.hpp"
+
+#define WAIT_FOR_ASYNC_IN_DTOR_TIMEOUT (10000)
+
+namespace hailort
+{
+
+std::string InferModel::InferStream::Impl::name() const
+{
+    return m_vstream_info.name;
+}
+
+size_t InferModel::InferStream::Impl::get_frame_size() const
+{
+    return HailoRTCommon::get_frame_size(m_vstream_info, m_user_buffer_format);
+}
+
+void InferModel::InferStream::Impl::set_format_type(hailo_format_type_t type)
+{
+    m_user_buffer_format.type = type;
+    if (HAILO_FORMAT_TYPE_FLOAT32 == type) {
+        m_user_buffer_format.flags = HAILO_FORMAT_FLAGS_NONE;
+    } else {
+        m_user_buffer_format.flags = HAILO_FORMAT_FLAGS_QUANTIZED;
+    }
+}
+
+void InferModel::InferStream::Impl::set_format_order(hailo_format_order_t order)
+{
+    m_user_buffer_format.order = order;
+}
+
+hailo_format_t InferModel::InferStream::Impl::get_user_buffer_format()
+{
+    return m_user_buffer_format;
+}
+
+InferModel::InferStream::InferStream(std::shared_ptr<InferModel::InferStream::Impl> pimpl) : m_pimpl(pimpl)
+{
+}
+
+const std::string InferModel::InferStream::name() const
+{
+    return m_pimpl->name();
+}
+
+size_t InferModel::InferStream::get_frame_size() const
+{
+    return m_pimpl->get_frame_size();
+}
+
+void InferModel::InferStream::set_format_type(hailo_format_type_t type)
+{
+    m_pimpl->set_format_type(type);
+}
+
+void InferModel::InferStream::set_format_order(hailo_format_order_t order)
+{
+    m_pimpl->set_format_order(order);
+}
+
+hailo_format_t InferModel::InferStream::get_user_buffer_format()
+{
+    return m_pimpl->get_user_buffer_format();
+}
+
+InferModel::InferModel(VDevice &vdevice, Hef &&hef, std::unordered_map<std::string, InferModel::InferStream> &&inputs,
+        std::unordered_map<std::string, InferModel::InferStream> &&outputs)
+    : m_vdevice(vdevice), m_hef(std::move(hef)), m_inputs(std::move(inputs)), m_outputs(std::move(outputs))
+{
+    m_inputs_vector.reserve(m_inputs.size());
+    m_input_names.reserve(m_inputs.size());
+    for (const auto &pair : m_inputs) {
+        m_inputs_vector.push_back(pair.second);
+        m_input_names.push_back(pair.first);
+    }
+
+    m_outputs_vector.reserve(m_outputs.size());
+    m_output_names.reserve(m_outputs.size());
+    for (const auto &pair : m_outputs) {
+        m_outputs_vector.push_back(pair.second);
+        m_output_names.push_back(pair.first);
+    }
+}
+
+InferModel::InferModel(InferModel &&other) :
+    m_vdevice(std::move(other.m_vdevice)),
+    m_hef(std::move(other.m_hef)),
+    m_inputs(std::move(other.m_inputs)),
+    m_outputs(std::move(other.m_outputs)),
+    m_inputs_vector(std::move(other.m_inputs_vector)),
+    m_outputs_vector(std::move(other.m_outputs_vector)),
+    m_input_names(std::move(other.m_input_names)),
+    m_output_names(std::move(other.m_output_names))
+{
+}
+
+// TODO: document that this will check validity of format tpyes/orders
+Expected<ConfiguredInferModel> InferModel::configure(const std::string &network_name)
+{
+    CHECK_AS_EXPECTED("" == network_name, HAILO_NOT_IMPLEMENTED, "Passing network name is not supported yet!");
+
+    auto configure_params = m_vdevice.get().create_configure_params(m_hef);
+    CHECK_EXPECTED(configure_params);
+
+    for (auto &network_group_name_params_pair : *configure_params) {
+        for (auto &stream_params_name_pair : network_group_name_params_pair.second.stream_params_by_name) {
+            stream_params_name_pair.second.flags = HAILO_STREAM_FLAGS_ASYNC;
+        }
+    }
+
+    auto network_groups = m_vdevice.get().configure(m_hef, configure_params.value());
+    CHECK_EXPECTED(network_groups);
+
+    std::unordered_map<std::string, hailo_format_t> inputs_formats;
+    std::unordered_map<std::string, hailo_format_t> outputs_formats;
+
+    auto input_vstream_infos = network_groups.value()[0]->get_input_vstream_infos();
+    CHECK_EXPECTED(input_vstream_infos);
+
+    for (const auto &vstream_info : input_vstream_infos.value()) {
+        inputs_formats[vstream_info.name] = m_inputs.at(vstream_info.name).get_user_buffer_format();
+    }
+
+    auto output_vstream_infos = network_groups.value()[0]->get_output_vstream_infos();
+    CHECK_EXPECTED(output_vstream_infos);
+
+    for (const auto &vstream_info : output_vstream_infos.value()) {
+        outputs_formats[vstream_info.name] = m_outputs.at(vstream_info.name).get_user_buffer_format();
+    }
+
+    // downcasting from ConfiguredNetworkGroup to ConfiguredNetworkGroupBase since we need some functions from ConfiguredNetworkGroupBase
+    std::shared_ptr<ConfiguredNetworkGroupBase> configured_net_group_base = std::dynamic_pointer_cast<ConfiguredNetworkGroupBase>(network_groups.value()[0]);
+    CHECK_NOT_NULL_AS_EXPECTED(configured_net_group_base, HAILO_INTERNAL_FAILURE);
+
+    auto async_infer_runner = AsyncInferRunnerImpl::create(*configured_net_group_base, inputs_formats, outputs_formats);
+    CHECK_EXPECTED(async_infer_runner);
+
+    auto configured_infer_model_pimpl = make_shared_nothrow<ConfiguredInferModelImpl>(network_groups.value()[0], async_infer_runner.release(),
+        get_input_names(), get_output_names());
+    CHECK_NOT_NULL_AS_EXPECTED(configured_infer_model_pimpl, HAILO_OUT_OF_HOST_MEMORY);
+
+    return ConfiguredInferModel(configured_infer_model_pimpl);
+}
+
+Expected<InferModel::InferStream> InferModel::input()
+{
+    CHECK_AS_EXPECTED(1 == m_inputs.size(), HAILO_INVALID_OPERATION, "Model has more than one input!");
+    auto copy = m_inputs.begin()->second;
+    return copy;
+}
+
+Expected<InferModel::InferStream> InferModel::output()
+{
+    CHECK_AS_EXPECTED(1 == m_outputs.size(), HAILO_INVALID_OPERATION, "Model has more than one output!");
+    auto copy = m_outputs.begin()->second;
+    return copy;
+}
+
+Expected<InferModel::InferStream> InferModel::input(const std::string &name)
+{
+    CHECK_AS_EXPECTED(contains(m_inputs, name), HAILO_NOT_FOUND, "Input {} not found!", name);
+    auto copy = m_inputs.at(name);
+    return copy;
+}
+
+Expected<InferModel::InferStream> InferModel::output(const std::string &name)
+{
+    CHECK_AS_EXPECTED(contains(m_outputs, name), HAILO_NOT_FOUND, "Output {}, not found!", name);
+    auto copy = m_outputs.at(name);
+    return copy;
+}
+
+const std::vector<InferModel::InferStream> &InferModel::inputs() const
+{
+    return m_inputs_vector;
+}
+
+const std::vector<InferModel::InferStream> &InferModel::outputs() const
+{
+    return m_outputs_vector;
+}
+
+const std::vector<std::string> &InferModel::get_input_names() const
+{
+    return m_input_names;
+}
+
+const std::vector<std::string> &InferModel::get_output_names() const
+{
+    return m_output_names;
+}
+
+ConfiguredInferModel::ConfiguredInferModel(std::shared_ptr<ConfiguredInferModelImpl> pimpl) : m_pimpl(pimpl)
+{
+}
+
+Expected<ConfiguredInferModel::Bindings> ConfiguredInferModel::create_bindings()
+{
+    return m_pimpl->create_bindings();
+}
+
+hailo_status ConfiguredInferModel::wait_for_async_ready(std::chrono::milliseconds timeout)
+{
+    return m_pimpl->wait_for_async_ready(timeout);
+}
+
+hailo_status ConfiguredInferModel::activate()
+{
+    return m_pimpl->activate();
+}
+
+void ConfiguredInferModel::deactivate()
+{
+    m_pimpl->deactivate();
+}
+
+hailo_status ConfiguredInferModel::run(ConfiguredInferModel::Bindings bindings, std::chrono::milliseconds timeout)
+{
+    return m_pimpl->run(bindings, timeout);
+}
+
+Expected<AsyncInferJob> ConfiguredInferModel::run_async(ConfiguredInferModel::Bindings bindings,
+    std::function<void(const CompletionInfoAsyncInfer &)> callback)
+{
+    return m_pimpl->run_async(bindings, callback);
+}
+
+ConfiguredInferModelImpl::ConfiguredInferModelImpl(std::shared_ptr<ConfiguredNetworkGroup> cng,
+    std::shared_ptr<AsyncInferRunnerImpl> async_infer_runner, 
+    const std::vector<std::string> &input_names,
+    const std::vector<std::string> &output_names) : m_cng(cng), m_async_infer_runner(async_infer_runner),
+    m_ongoing_parallel_transfers(0), m_input_names(input_names), m_output_names(output_names)
+{
+}
+
+Expected<ConfiguredInferModel::Bindings> ConfiguredInferModelImpl::create_bindings()
+{
+    std::unordered_map<std::string, ConfiguredInferModel::Bindings::InferStream> inputs;
+    std::unordered_map<std::string, ConfiguredInferModel::Bindings::InferStream> outputs;
+
+    auto input_vstream_infos = m_cng->get_input_vstream_infos();
+    CHECK_EXPECTED(input_vstream_infos);
+
+    for (const auto &vstream_info : input_vstream_infos.value()) {
+        auto pimpl = make_shared_nothrow<ConfiguredInferModel::Bindings::InferStream::Impl>(vstream_info);
+        CHECK_NOT_NULL_AS_EXPECTED(pimpl, HAILO_OUT_OF_HOST_MEMORY);
+
+        ConfiguredInferModel::Bindings::InferStream stream(pimpl);
+        inputs.emplace(vstream_info.name, std::move(stream));
+    }
+
+    auto output_vstream_infos = m_cng->get_output_vstream_infos();
+    CHECK_EXPECTED(output_vstream_infos);
+
+    for (const auto &vstream_info : output_vstream_infos.value()) {
+        auto pimpl = make_shared_nothrow<ConfiguredInferModel::Bindings::InferStream::Impl>(vstream_info);
+        CHECK_NOT_NULL_AS_EXPECTED(pimpl, HAILO_OUT_OF_HOST_MEMORY);
+
+        ConfiguredInferModel::Bindings::InferStream stream(pimpl);
+        outputs.emplace(vstream_info.name, std::move(stream));
+    }
+
+    return ConfiguredInferModel::Bindings(std::move(inputs), std::move(outputs));
+}
+
+hailo_status ConfiguredInferModelImpl::wait_for_async_ready(std::chrono::milliseconds timeout)
+{
+    std::unique_lock<std::mutex> lock(m_mutex);
+
+    // downcasting from ConfiguredNetworkGroup to ConfiguredNetworkGroupBase since we need some functions from ConfiguredNetworkGroupBase
+    std::shared_ptr<ConfiguredNetworkGroupBase> configured_net_group_base = std::dynamic_pointer_cast<ConfiguredNetworkGroupBase>(m_cng);
+    CHECK_NOT_NULL(configured_net_group_base, HAILO_INTERNAL_FAILURE);
+
+    auto low_level_queue_size = m_async_infer_runner->get_min_buffer_pool_size(*configured_net_group_base);
+    CHECK_EXPECTED_AS_STATUS(low_level_queue_size);
+
+    bool was_successful = m_cv.wait_for(lock, timeout, [this, low_level_queue_size  = low_level_queue_size.value()] () -> bool {
+        return m_ongoing_parallel_transfers < low_level_queue_size;
+    });
+    CHECK(was_successful, HAILO_TIMEOUT);
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status ConfiguredInferModelImpl::activate()
+{
+    auto activated_ng = m_cng->activate();
+    CHECK_EXPECTED_AS_STATUS(activated_ng);
+
+    m_ang = activated_ng.release();
+    return HAILO_SUCCESS;;
+}
+
+void ConfiguredInferModelImpl::deactivate()
+{
+    m_ang = nullptr;
+}
+
+hailo_status ConfiguredInferModelImpl::run(ConfiguredInferModel::Bindings bindings, std::chrono::milliseconds timeout)
+{
+    auto job = run_async(bindings, [] (const CompletionInfoAsyncInfer &) {});
+    CHECK_EXPECTED_AS_STATUS(job);
+
+    auto status = job->wait(timeout);
+    CHECK_SUCCESS(status);
+
+    return HAILO_SUCCESS;
+}
+
+Expected<AsyncInferJob> ConfiguredInferModelImpl::run_async(ConfiguredInferModel::Bindings bindings,
+    std::function<void(const CompletionInfoAsyncInfer &)> callback)
+{
+    auto job_pimpl = make_shared_nothrow<AsyncInferJob::Impl>(static_cast<uint32_t>(m_input_names.size() + m_output_names.size()));
+    CHECK_NOT_NULL_AS_EXPECTED(job_pimpl, HAILO_OUT_OF_HOST_MEMORY);
+    AsyncInferJob job(job_pimpl);
+
+    TransferDoneCallbackAsyncInfer transfer_done = [this, bindings, job_pimpl, callback]
+    (const CompletionInfoAsyncInferInternal &internal_completion_info) {
+        bool should_call_callback = job_pimpl->stream_done();
+        if (should_call_callback) {
+            {
+                std::unique_lock<std::mutex> lock(m_mutex);
+                m_ongoing_parallel_transfers--;
+            }
+            m_cv.notify_all();
+
+            CompletionInfoAsyncInfer completion_info(bindings, internal_completion_info.status);
+            callback(completion_info);
+        }
+    };
+
+    for (const auto &input_name : m_input_names) {
+        m_async_infer_runner->set_input(input_name, bindings.input(input_name)->get_buffer(), transfer_done);
+    }
+
+    for (const auto &output_name : m_output_names) {
+        m_async_infer_runner->set_output(output_name, bindings.output(output_name)->get_buffer(), transfer_done);
+    }
+
+    {
+        std::unique_lock<std::mutex> lock(m_mutex);
+        m_ongoing_parallel_transfers++;
+    }
+    m_cv.notify_all();
+
+    auto status = m_async_infer_runner->async_infer();
+    CHECK_SUCCESS_AS_EXPECTED(status);
+
+    return job;
+}
+
+AsyncInferJob::AsyncInferJob(std::shared_ptr<Impl> pimpl) : m_pimpl(pimpl), m_should_wait_in_dtor(true)
+{
+}
+
+AsyncInferJob::AsyncInferJob(AsyncInferJob &&other) :
+    m_pimpl(std::move(other.m_pimpl)),
+    m_should_wait_in_dtor(std::exchange(other.m_should_wait_in_dtor, false))
+{
+}
+
+AsyncInferJob &AsyncInferJob::operator=(AsyncInferJob &&other)
+{
+    m_pimpl = std::move(other.m_pimpl);
+    m_should_wait_in_dtor = std::exchange(other.m_should_wait_in_dtor, false);
+    return *this;
+}
+
+AsyncInferJob::~AsyncInferJob()
+{
+    if (m_should_wait_in_dtor) {
+        auto status = wait(std::chrono::milliseconds(WAIT_FOR_ASYNC_IN_DTOR_TIMEOUT));
+        if (HAILO_SUCCESS != status) {
+            LOGGER__CRITICAL("Could not finish async infer request! status = {}", status);
+        }
+    }
+}
+
+hailo_status AsyncInferJob::wait(std::chrono::milliseconds timeout)
+{
+    auto status = m_pimpl->wait(timeout);
+    CHECK_SUCCESS(status);
+
+    m_should_wait_in_dtor = false;
+    return HAILO_SUCCESS;
+}
+
+void AsyncInferJob::detach()
+{
+    m_should_wait_in_dtor = false;
+}
+
+AsyncInferJob::Impl::Impl(uint32_t streams_count)
+{
+    m_ongoing_transfers = streams_count;
+}
+
+hailo_status AsyncInferJob::Impl::wait(std::chrono::milliseconds timeout)
+{
+    std::unique_lock<std::mutex> lock(m_mutex);
+    bool was_successful = m_cv.wait_for(lock, timeout, [this] () -> bool {
+        return (0 == m_ongoing_transfers);
+    });
+    CHECK(was_successful, HAILO_TIMEOUT);
+
+    return HAILO_SUCCESS;
+}
+
+bool AsyncInferJob::Impl::stream_done()
+{
+    bool should_call_callback = false;
+    {
+        std::unique_lock<std::mutex> lock(m_mutex);
+        m_ongoing_transfers--;
+        should_call_callback = (0 == m_ongoing_transfers);
+    }
+    m_cv.notify_all();
+    return should_call_callback;
+}
+
+ConfiguredInferModel::Bindings::Bindings(std::unordered_map<std::string, Bindings::InferStream> &&inputs,
+        std::unordered_map<std::string, Bindings::InferStream> &&outputs) :
+    m_inputs(std::move(inputs)), m_outputs(std::move(outputs))
+{
+}
+
+Expected<ConfiguredInferModel::Bindings::InferStream> ConfiguredInferModel::Bindings::input()
+{
+    CHECK_AS_EXPECTED(1 == m_inputs.size(), HAILO_INVALID_OPERATION, "Model has more than one input!");
+    auto copy = m_inputs.begin()->second;
+    return copy;
+}
+
+Expected<ConfiguredInferModel::Bindings::InferStream> ConfiguredInferModel::Bindings::output()
+{
+    CHECK_AS_EXPECTED(1 == m_outputs.size(), HAILO_INVALID_OPERATION, "Model has more than one output!");
+    auto copy = m_outputs.begin()->second;
+    return copy;
+}
+
+Expected<ConfiguredInferModel::Bindings::InferStream> ConfiguredInferModel::Bindings::input(const std::string &name)
+{
+    CHECK_AS_EXPECTED(contains(m_inputs, name), HAILO_NOT_FOUND, "Input {} not found!", name);
+    auto copy = m_inputs.at(name);
+    return copy;
+}
+
+Expected<ConfiguredInferModel::Bindings::InferStream> ConfiguredInferModel::Bindings::output(const std::string &name)
+{
+    CHECK_AS_EXPECTED(contains(m_outputs, name), HAILO_NOT_FOUND, "Output {}, not found!", name);
+    auto copy = m_outputs.at(name);
+    return copy;
+}
+
+ConfiguredInferModel::Bindings::InferStream::Impl::Impl(const hailo_vstream_info_t &vstream_info) : m_name(vstream_info.name)
+{
+}
+
+hailo_status ConfiguredInferModel::Bindings::InferStream::Impl::set_buffer(MemoryView view)
+{
+    m_view = view;
+    return HAILO_SUCCESS;
+}
+
+MemoryView ConfiguredInferModel::Bindings::InferStream::Impl::get_buffer()
+{
+    return m_view;
+}
+
+void ConfiguredInferModel::Bindings::InferStream::Impl::set_stream_callback(TransferDoneCallbackAsyncInfer callback)
+{
+    m_stream_callback = callback;
+}
+
+ConfiguredInferModel::Bindings::InferStream::InferStream(std::shared_ptr<Bindings::InferStream::Impl> pimpl) : m_pimpl(pimpl)
+{
+}
+
+hailo_status ConfiguredInferModel::Bindings::InferStream::set_buffer(MemoryView view)
+{
+    return m_pimpl->set_buffer(view);
+}
+
+MemoryView ConfiguredInferModel::Bindings::InferStream::get_buffer()
+{
+    return m_pimpl->get_buffer();
+}
+
+} /* namespace hailort */
diff --git a/hailort/libhailort/src/net_flow/pipeline/infer_model_internal.hpp b/hailort/libhailort/src/net_flow/pipeline/infer_model_internal.hpp
new file mode 100644 (file)
index 0000000..2f81dc8
--- /dev/null
@@ -0,0 +1,95 @@
+/**
+ * Copyright (c) 2020-2023 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file infer_model_internal.hpp
+ * @brief Implemention of the infer model
+ **/
+
+#ifndef _HAILO_INFER_MODEL_INTERNAL_HPP_
+#define _HAILO_INFER_MODEL_INTERNAL_HPP_
+
+#include "hailo/vstream.hpp"
+#include "net_flow/pipeline/async_infer_runner_internal.hpp"
+
+namespace hailort
+{
+
+class ConfiguredInferModel::Bindings::InferStream::Impl
+{
+public:
+    Impl(const hailo_vstream_info_t &vstream_info);
+    hailo_status set_buffer(MemoryView view);
+    MemoryView get_buffer();
+    void set_stream_callback(TransferDoneCallbackAsyncInfer callback);
+
+private:
+    std::string m_name;
+    MemoryView m_view;
+    TransferDoneCallbackAsyncInfer m_stream_callback;
+};
+
+class InferModel::InferStream::Impl
+{
+public:
+    Impl(const hailo_vstream_info_t &vstream_info) : m_vstream_info(vstream_info)
+    {
+        m_user_buffer_format.order = HAILO_FORMAT_ORDER_AUTO;
+        m_user_buffer_format.type = HAILO_FORMAT_TYPE_AUTO;
+        m_user_buffer_format.flags = HAILO_FORMAT_FLAGS_QUANTIZED;
+    }
+
+    std::string name() const;
+    size_t get_frame_size() const;
+    void set_format_type(hailo_format_type_t type);
+    void set_format_order(hailo_format_order_t order);
+    hailo_format_t get_user_buffer_format();
+
+private:
+    hailo_vstream_info_t m_vstream_info;
+    hailo_format_t m_user_buffer_format;
+};
+
+class AsyncInferJob::Impl
+{
+public:
+    Impl(uint32_t streams_count);
+    hailo_status wait(std::chrono::milliseconds timeout);
+    bool stream_done();
+
+private:
+    std::condition_variable m_cv;
+    std::mutex m_mutex;
+    std::atomic_uint32_t m_ongoing_transfers;
+};
+
+class ConfiguredInferModelImpl
+{
+public:
+    ConfiguredInferModelImpl(std::shared_ptr<ConfiguredNetworkGroup> cng,
+        std::shared_ptr<AsyncInferRunnerImpl> async_infer_runner,
+        const std::vector<std::string> &input_names,
+        const std::vector<std::string> &output_names);
+    Expected<ConfiguredInferModel::Bindings> create_bindings();
+    hailo_status wait_for_async_ready(std::chrono::milliseconds timeout);
+    hailo_status activate();
+    void deactivate();
+    hailo_status run(ConfiguredInferModel::Bindings bindings, std::chrono::milliseconds timeout);
+    Expected<AsyncInferJob> run_async(ConfiguredInferModel::Bindings bindings,
+        std::function<void(const CompletionInfoAsyncInfer &)> callback);
+
+private:
+    std::shared_ptr<ConfiguredNetworkGroup> m_cng;
+    std::unique_ptr<ActivatedNetworkGroup> m_ang;
+    std::shared_ptr<AsyncInferRunnerImpl> m_async_infer_runner;
+    uint32_t m_ongoing_parallel_transfers;
+    std::mutex m_mutex;
+    std::condition_variable m_cv;
+    std::vector<std::string> m_input_names;
+    std::vector<std::string> m_output_names;
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_INFER_MODEL_INTERNAL_HPP_ */
index f619982e16104ce028ed361b115779b1d780b92f..0cc4ad603a616c76513d091af973b1ae35bb2641 100644 (file)
@@ -335,4 +335,58 @@ std::vector<std::reference_wrapper<OutputVStream>> InferVStreams::get_output_vst
     return vsterams_refs;
 }
 
+hailo_status InferVStreams::set_nms_score_threshold(float32_t threshold)
+{
+    // Check that we have NMS outputs in the model
+    auto has_nms_output = std::any_of(m_outputs.begin(), m_outputs.end(), [](const auto &vs)
+    {
+        return HailoRTCommon::is_nms(vs.get_info());
+    });
+    CHECK(has_nms_output, HAILO_INVALID_OPERATION, "'set_nms_score_threshold()' is called, but there is no NMS output in this model.");
+
+    for (auto &ouput_vstream : m_outputs) {
+        if (HailoRTCommon::is_nms(ouput_vstream.get_info())) {
+            CHECK_SUCCESS(ouput_vstream.set_nms_score_threshold(threshold));
+        }
+    }
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status InferVStreams::set_nms_iou_threshold(float32_t threshold)
+{
+    // Check that we have NMS outputs in the model
+    auto has_nms_output = std::any_of(m_outputs.begin(), m_outputs.end(), [](const auto &vs)
+    {
+        return HailoRTCommon::is_nms(vs.get_info());
+    });
+    CHECK(has_nms_output, HAILO_INVALID_OPERATION, "'set_nms_iou_threshold()' is called, but there is no NMS output in this model.");
+
+    for (auto &ouput_vstream : m_outputs) {
+        if (HailoRTCommon::is_nms(ouput_vstream.get_info())) {
+            CHECK_SUCCESS(ouput_vstream.set_nms_iou_threshold(threshold));
+        }
+    }
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status InferVStreams::set_nms_max_proposals_per_class(uint32_t max_proposals_per_class)
+{
+    // Check that we have NMS outputs in the model
+    auto has_nms_output = std::any_of(m_outputs.begin(), m_outputs.end(), [](const auto &vs)
+    {
+        return HailoRTCommon::is_nms(vs.get_info());
+    });
+    CHECK(has_nms_output, HAILO_INVALID_OPERATION, "'set_nms_max_proposals_per_class()' is called, but there is no NMS output in this model.");
+
+    for (auto &ouput_vstream : m_outputs) {
+        if (HailoRTCommon::is_nms(ouput_vstream.get_info())) {
+            CHECK_SUCCESS(ouput_vstream.set_nms_max_proposals_per_class(max_proposals_per_class));
+        }
+    }
+
+    return HAILO_SUCCESS;
+}
+
 } /* namespace hailort */
index 521cc867042779415096268d58a16dd09545e445..c903be58944508d07e41208be6bab42044f89802 100644 (file)
 #include "common/runtime_statistics_internal.hpp"
 #include "common/os_utils.hpp"
 
+#include "hailo/expected.hpp"
+#include "hailo/hailort.h"
 #include "net_flow/pipeline/pipeline.hpp"
-
+#include <cstdint>
 
 namespace hailort
 {
 
+#define NUMBER_OF_PLANES_NV12_NV21 2
+#define NUMBER_OF_PLANES_I420 3
+
 PipelineBuffer::Metadata::Metadata(PipelineTimePoint start_time) :
     m_start_time(start_time)
 {}
@@ -41,60 +46,84 @@ PipelineBuffer::PipelineBuffer() :
 
 PipelineBuffer::PipelineBuffer(Type type) :
     m_type(type),
-    m_buffer(),
-    m_should_release_buffer(false),
     m_pool(nullptr),
     m_view(),
-    m_metadata()
+    m_exec_done([](CompletionInfoAsyncInferInternal /*completion_info*/) {}),
+    m_metadata(),
+    m_is_user_buffer(false),
+    m_action_status(HAILO_SUCCESS)
 {}
 
-PipelineBuffer::PipelineBuffer(MemoryView view, bool should_measure) :
+PipelineBuffer::PipelineBuffer(hailo_status action_status) :
     m_type(Type::DATA),
-    m_buffer(),
-    m_should_release_buffer(false),
     m_pool(nullptr),
+    m_view(),
+    m_exec_done([](CompletionInfoAsyncInferInternal /*completion_info*/) {}),
+    m_metadata(),
+    m_is_user_buffer(false),
+    m_action_status(action_status)
+{}
+
+PipelineBuffer::PipelineBuffer(MemoryView view, bool is_user_buffer, BufferPoolPtr pool, bool should_measure, hailo_status action_status) :
+    m_type(Type::DATA),
+    m_pool(pool),
     m_view(view),
-    m_metadata(Metadata(add_timestamp(should_measure)))
+    m_exec_done([](CompletionInfoAsyncInferInternal /*completion_info*/) {}),
+    m_metadata(Metadata(add_timestamp(should_measure))),
+    m_is_user_buffer(is_user_buffer),
+    m_action_status(action_status)
 {}
 
-PipelineBuffer::PipelineBuffer(Buffer &&buffer, BufferPoolPtr pool, bool should_measure) :
+PipelineBuffer::PipelineBuffer(MemoryView view, const TransferDoneCallbackAsyncInfer &exec_done, bool is_user_buffer, BufferPoolPtr pool, bool should_measure,
+    hailo_status action_status) :
     m_type(Type::DATA),
-    m_buffer(std::move(buffer)),
-    m_should_release_buffer(true),
     m_pool(pool),
-    m_view(m_buffer),
-    m_metadata(Metadata(add_timestamp(should_measure)))
+    m_view(view),
+    m_exec_done(exec_done),
+    m_metadata(Metadata(add_timestamp(should_measure))),
+    m_is_user_buffer(is_user_buffer),
+    m_action_status(action_status)
 {}
 
+PipelineBuffer::PipelineBuffer(hailo_pix_buffer_t buffer) :
+    m_type(Type::DATA),
+    m_pool(nullptr),
+    m_view(),
+    m_metadata(),
+    m_is_user_buffer(false)
+{
+    set_additional_data(std::make_shared<PixBufferPipelineData>(buffer));
+}
+
 PipelineBuffer::PipelineBuffer(PipelineBuffer &&other) :
     m_type(other.m_type),
-    m_buffer(std::move(other.m_buffer)),
-    m_should_release_buffer(std::exchange(other.m_should_release_buffer, false)),
     m_pool(std::move(other.m_pool)),
     m_view(std::move(other.m_view)),
-    m_metadata(std::move(other.m_metadata))
+    m_exec_done(std::move(other.m_exec_done)),
+    m_metadata(std::move(other.m_metadata)),
+    m_is_user_buffer(std::move(other.m_is_user_buffer)),
+    m_action_status(std::move(other.m_action_status))
 {}
 
 PipelineBuffer &PipelineBuffer::operator=(PipelineBuffer &&other)
 {
-    m_type = other.m_type,
-    m_buffer = std::move(other.m_buffer);
-    m_should_release_buffer = std::exchange(other.m_should_release_buffer, false);
+    m_type = other.m_type;
     m_pool = std::move(other.m_pool);
     m_view = std::move(other.m_view);
+    m_exec_done = std::move(other.m_exec_done);
     m_metadata = std::move(other.m_metadata);
+    m_is_user_buffer = std::move(other.m_is_user_buffer);
+    m_action_status = std::move(other.m_action_status);
     return *this;
 }
 
 PipelineBuffer::~PipelineBuffer()
 {
-    if (!m_should_release_buffer) {
-        return;
-    }
-
-    hailo_status status = m_pool->release_buffer(std::move(m_buffer));
-    if (HAILO_SUCCESS != status) {
-        LOGGER__ERROR("Releasing buffer in buffer pool failed! status = {}", status);
+    if ((nullptr != m_pool) && (!m_is_user_buffer)) {
+        hailo_status status = m_pool->release_buffer(m_view);
+        if (HAILO_SUCCESS != status) {
+            LOGGER__ERROR("Releasing buffer in buffer pool failed! status = {}", status);
+        }
     }
 }
 
@@ -128,18 +157,100 @@ PipelineBuffer::Metadata PipelineBuffer::get_metadata() const
     return m_metadata;
 }
 
-void PipelineBuffer::set_metadata(Metadata &&val) 
+Expected<hailo_pix_buffer_t> PipelineBuffer::as_hailo_pix_buffer(hailo_format_order_t order)
+{
+    auto pix_buffer = get_metadata().get_additional_data<PixBufferPipelineData>();
+
+    if (nullptr == pix_buffer) {
+        switch(order){
+            case HAILO_FORMAT_ORDER_NV12:
+            case HAILO_FORMAT_ORDER_NV21: {
+                CHECK_AS_EXPECTED(0 == (m_view.size() % 3), HAILO_INVALID_ARGUMENT, "buffer size must be divisible by 3");
+
+                auto y_plane_size = m_view.size() * 2 / 3;
+                auto uv_plane_size = m_view.size() * 1 / 3;
+
+                auto uv_data_ptr = reinterpret_cast<uint8_t*>(m_view.data()) + y_plane_size;
+
+                hailo_pix_buffer_plane_t y {uint32_t(y_plane_size), uint32_t(y_plane_size), m_view.data()};
+                hailo_pix_buffer_plane_t uv {uint32_t(uv_plane_size), uint32_t(uv_plane_size), uv_data_ptr};
+                hailo_pix_buffer_t buffer{0, {y, uv}, NUMBER_OF_PLANES_NV12_NV21};
+
+                return buffer;
+            }
+            case HAILO_FORMAT_ORDER_I420: {
+                CHECK_AS_EXPECTED(0 == (m_view.size() % 6), HAILO_INVALID_ARGUMENT, "buffer size must be divisible by 6");
+
+                auto y_plane_size = m_view.size() * 2 / 3;
+                auto u_plane_size = m_view.size() * 1 / 6;
+                auto v_plane_size = m_view.size() * 1 / 6;
+
+                auto u_data_ptr = (char*)m_view.data() + y_plane_size;
+                auto v_data_ptr = u_data_ptr + u_plane_size;
+
+                hailo_pix_buffer_plane_t y {uint32_t(y_plane_size), uint32_t(y_plane_size), m_view.data()};
+                hailo_pix_buffer_plane_t u {uint32_t(u_plane_size), uint32_t(u_plane_size), u_data_ptr};
+                hailo_pix_buffer_plane_t v {uint32_t(v_plane_size), uint32_t(v_plane_size), v_data_ptr};
+                hailo_pix_buffer_t buffer{0, {y, u, v}, NUMBER_OF_PLANES_I420};
+
+                return buffer;
+            }
+            default: {
+                CHECK_AS_EXPECTED(false, HAILO_INTERNAL_FAILURE, "unsupported format order");
+            }
+        }
+    } else {
+        uint32_t expected_number_of_planes;
+        switch(order){
+            case HAILO_FORMAT_ORDER_NV12:
+            case HAILO_FORMAT_ORDER_NV21: {
+                expected_number_of_planes = NUMBER_OF_PLANES_NV12_NV21;
+                break;
+            }
+            case HAILO_FORMAT_ORDER_I420: {
+                expected_number_of_planes = NUMBER_OF_PLANES_I420;
+                break;
+            }
+            default: {
+                CHECK_AS_EXPECTED(false, HAILO_INTERNAL_FAILURE, "unsupported format order");
+            }
+        }
+        CHECK_AS_EXPECTED(pix_buffer->m_pix_buffer.number_of_planes == expected_number_of_planes, HAILO_INVALID_ARGUMENT,
+            "number of planes in the pix buffer ({}) doesn't match the order ({})",
+            pix_buffer->m_pix_buffer.number_of_planes, expected_number_of_planes);
+
+        return std::move(pix_buffer->m_pix_buffer);
+    }
+}
+
+void PipelineBuffer::set_metadata(Metadata &&val)
 {
     m_metadata = std::move(val);
 }
 
+TransferDoneCallbackAsyncInfer PipelineBuffer::get_exec_done_cb() const
+{
+    return m_exec_done;
+}
+
 PipelineTimePoint PipelineBuffer::add_timestamp(bool should_measure)
 {
     return should_measure ? std::chrono::steady_clock::now() : PipelineTimePoint{};
 }
 
+hailo_status PipelineBuffer::action_status()
+{
+    return m_action_status;
+}
+
+void PipelineBuffer::set_action_status(hailo_status status)
+{
+    m_action_status = status;
+}
+
 Expected<BufferPoolPtr> BufferPool::create(size_t buffer_size, size_t buffer_count, EventPtr shutdown_event,
-                                           hailo_pipeline_elem_stats_flags_t elem_flags, hailo_vstream_stats_flags_t vstream_flags)
+                                           hailo_pipeline_elem_stats_flags_t elem_flags, hailo_vstream_stats_flags_t vstream_flags,
+                                           bool is_empty, bool is_dma_able)
 {
     AccumulatorPtr queue_size_accumulator = nullptr;
     if ((elem_flags & HAILO_PIPELINE_ELEM_STATS_MEASURE_QUEUE_SIZE) != 0) {
@@ -148,28 +259,47 @@ Expected<BufferPoolPtr> BufferPool::create(size_t buffer_size, size_t buffer_cou
     }
     const bool measure_vstream_latency = (vstream_flags & HAILO_VSTREAM_STATS_MEASURE_LATENCY) != 0;
 
-    auto free_buffers = SpscQueue<Buffer>::create(buffer_count, shutdown_event, BUFFER_POOL_DEFAULT_QUEUE_TIMEOUT);
-    CHECK_EXPECTED(free_buffers);
+    auto free_mem_views = SpscQueue<MemoryView>::create(buffer_count, shutdown_event, BUFFER_POOL_DEFAULT_QUEUE_TIMEOUT);
+    CHECK_EXPECTED(free_mem_views);
 
-    for (size_t i = 0; i < buffer_count; i++) {
-        auto buffer = Buffer::create(buffer_size);
-        CHECK_EXPECTED(buffer);
+    auto done_cbs = SpscQueue<TransferDoneCallbackAsyncInfer>::create(buffer_count, shutdown_event, BUFFER_POOL_DEFAULT_QUEUE_TIMEOUT);
+    CHECK_EXPECTED(done_cbs);
 
-        hailo_status status = free_buffers->enqueue(buffer.release());
-        CHECK_SUCCESS_AS_EXPECTED(status);
+    std::vector<Buffer> buffers;
+    if (!is_empty) {
+        buffers.reserve(buffer_count);
+        for (size_t i = 0; i < buffer_count; i++) {
+            BufferStorageParams buffer_storage_params;
+            if (is_dma_able) {
+                buffer_storage_params = BufferStorageParams::create_dma();
+            }
+            auto buffer = Buffer::create(buffer_size, buffer_storage_params);
+            CHECK_EXPECTED(buffer);
+
+            auto status = free_mem_views->enqueue(MemoryView(buffer.value()));
+            CHECK_SUCCESS_AS_EXPECTED(status);
+
+            buffers.emplace_back(buffer.release());
+        }
     }
 
-    auto buffer_pool_ptr = make_shared_nothrow<BufferPool>(buffer_size, measure_vstream_latency,
-        free_buffers.release(), std::move(queue_size_accumulator));
+    auto buffer_pool_ptr = make_shared_nothrow<BufferPool>(buffer_size, is_empty, measure_vstream_latency, std::move(buffers),
+        free_mem_views.release(), done_cbs.release(), std::move(queue_size_accumulator), buffer_count);
     CHECK_AS_EXPECTED(nullptr != buffer_pool_ptr, HAILO_OUT_OF_HOST_MEMORY);
 
     return buffer_pool_ptr;
 }
 
-BufferPool::BufferPool(size_t buffer_size, bool measure_vstream_latency, SpscQueue<Buffer> &&free_buffers, AccumulatorPtr &&queue_size_accumulator) :
+BufferPool::BufferPool(size_t buffer_size, bool is_holding_user_buffers, bool measure_vstream_latency, std::vector<Buffer> &&buffers,
+        SpscQueue<MemoryView> &&free_mem_views, SpscQueue<TransferDoneCallbackAsyncInfer> &&done_cbs, AccumulatorPtr &&queue_size_accumulator,
+        size_t max_buffer_count) :
     m_buffer_size(buffer_size),
+    m_is_holding_user_buffers(is_holding_user_buffers),
+    m_max_buffer_count(max_buffer_count),
     m_measure_vstream_latency(measure_vstream_latency),
-    m_free_buffers(std::move(free_buffers)),
+    m_buffers(std::move(buffers)),
+    m_free_mem_views(std::move(free_mem_views)),
+    m_done_cbs(std::move(done_cbs)),
     m_queue_size_accumulator(std::move(queue_size_accumulator))
 {}
 
@@ -178,23 +308,132 @@ size_t BufferPool::buffer_size()
     return m_buffer_size;
 }
 
+hailo_status BufferPool::enqueue_buffer(MemoryView mem_view)
+{
+    CHECK(mem_view.size() == m_buffer_size, HAILO_INTERNAL_FAILURE, "Buffer size is not the same as expected for pool! ({} != {})", mem_view.size(), m_buffer_size);
+
+    auto status = m_free_mem_views.enqueue(mem_view);
+    CHECK_SUCCESS(status);
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status BufferPool::enqueue_buffer(MemoryView mem_view, const TransferDoneCallbackAsyncInfer &exec_done)
+{
+    auto status = enqueue_buffer(mem_view);
+    CHECK_SUCCESS(status);
+
+    status = m_done_cbs.enqueue(exec_done);
+    CHECK_SUCCESS(status);
+
+    return HAILO_SUCCESS;
+}
+
+bool BufferPool::is_full() {
+    return (m_max_buffer_count - m_buffers.size() == 0);
+}
+
+hailo_status BufferPool::allocate_buffers(bool is_dma_able)
+{
+    m_is_holding_user_buffers = false;
+    size_t buffer_count = m_max_buffer_count - m_buffers.size();
+    for (size_t i = 0; i < buffer_count; i++) {
+        BufferStorageParams buffer_storage_params;
+        if (is_dma_able) {
+            buffer_storage_params = BufferStorageParams::create_dma();
+        }
+        auto buffer = Buffer::create(m_buffer_size, buffer_storage_params);
+        CHECK_EXPECTED_AS_STATUS(buffer);
+
+        auto status = m_free_mem_views.enqueue(MemoryView(buffer.value()));
+        CHECK_SUCCESS(status);
+        m_buffers.emplace_back(buffer.release());
+    }
+    return HAILO_SUCCESS;
+}
+
 Expected<PipelineBuffer> BufferPool::acquire_buffer(std::chrono::milliseconds timeout)
+{
+    auto mem_view = acquire_free_mem_view(timeout);
+    if (HAILO_SHUTDOWN_EVENT_SIGNALED == mem_view.status()) {
+        return make_unexpected(HAILO_SHUTDOWN_EVENT_SIGNALED);
+    }
+    CHECK_EXPECTED(mem_view);
+
+    if (m_is_holding_user_buffers) {
+        auto done_cb = acquire_on_done_cb(timeout);
+        if (HAILO_SHUTDOWN_EVENT_SIGNALED == done_cb.status()) {
+            return make_unexpected(HAILO_SHUTDOWN_EVENT_SIGNALED);
+        }
+        CHECK_EXPECTED(done_cb);
+
+        return PipelineBuffer(mem_view.release(), done_cb.release(), m_is_holding_user_buffers, shared_from_this(), m_measure_vstream_latency);
+    }
+
+    return PipelineBuffer(mem_view.release(), m_is_holding_user_buffers, shared_from_this(), m_measure_vstream_latency);
+}
+
+Expected<std::shared_ptr<PipelineBuffer>> BufferPool::acquire_buffer_ptr(std::chrono::milliseconds timeout)
+{
+    auto mem_view = acquire_free_mem_view(timeout);
+    if (HAILO_SHUTDOWN_EVENT_SIGNALED == mem_view.status()) {
+        return make_unexpected(HAILO_SHUTDOWN_EVENT_SIGNALED);
+    }
+    CHECK_EXPECTED(mem_view);
+
+    std::shared_ptr<PipelineBuffer> ptr = nullptr;
+    if (m_is_holding_user_buffers) {
+        auto done_cb = acquire_on_done_cb(timeout);
+        if (HAILO_SHUTDOWN_EVENT_SIGNALED == done_cb.status()) {
+            return make_unexpected(HAILO_SHUTDOWN_EVENT_SIGNALED);
+        }
+        CHECK_EXPECTED(done_cb);
+
+        ptr = make_shared_nothrow<PipelineBuffer>(mem_view.release(), done_cb.release(), m_is_holding_user_buffers, shared_from_this(), m_measure_vstream_latency);
+    } else {
+        ptr = make_shared_nothrow<PipelineBuffer>(mem_view.release(), m_is_holding_user_buffers, shared_from_this(), m_measure_vstream_latency);
+    }
+
+    CHECK_NOT_NULL_AS_EXPECTED(ptr, HAILO_OUT_OF_HOST_MEMORY);
+    return ptr;
+}
+
+Expected<MemoryView> BufferPool::acquire_free_mem_view(std::chrono::milliseconds timeout)
 {
     if (nullptr != m_queue_size_accumulator) {
-        m_queue_size_accumulator->add_data_point(static_cast<double>(m_free_buffers.size_approx()));
+        m_queue_size_accumulator->add_data_point(static_cast<double>(m_free_mem_views.size_approx()));
     }
-    auto buffer = m_free_buffers.dequeue(timeout);
-    if (HAILO_SHUTDOWN_EVENT_SIGNALED == buffer.status()) {
-        return make_unexpected(buffer.status());
+
+    auto mem_view = m_free_mem_views.dequeue(timeout);
+    if (HAILO_SHUTDOWN_EVENT_SIGNALED == mem_view.status()) {
+        return make_unexpected(mem_view.status());
     }
-    else if (HAILO_TIMEOUT == buffer.status()) {
+    else if (HAILO_TIMEOUT == mem_view.status()) {
         LOGGER__WARNING(
             "Failed to acquire buffer because the buffer pool is empty. This could be caused by uneven reading and writing speeds, with a short user-defined timeout. (timeout={}ms)",
             timeout.count());
-        return make_unexpected(buffer.status());
+        return make_unexpected(mem_view.status());
     }
-    CHECK_EXPECTED(buffer);
-    return PipelineBuffer(buffer.release(), shared_from_this(), m_measure_vstream_latency);
+    CHECK_EXPECTED(mem_view);
+
+    return mem_view.release();
+}
+
+Expected<TransferDoneCallbackAsyncInfer> BufferPool::acquire_on_done_cb(std::chrono::milliseconds timeout)
+{
+    auto done_cb = m_done_cbs.dequeue(timeout);
+    if (HAILO_SHUTDOWN_EVENT_SIGNALED == done_cb.status()) {
+        return make_unexpected(done_cb.status());
+    }
+    else if (HAILO_TIMEOUT == done_cb.status()) {
+        LOGGER__WARNING(
+            "Failed to acquire buffer because the buffer pool is empty. This could be caused by uneven reading and writing speeds, with a short user-defined timeout. (timeout={}ms)",
+            timeout.count());
+        return make_unexpected(done_cb.status());
+    }
+    CHECK_EXPECTED(done_cb);
+
+    return done_cb.release();
 }
 
 AccumulatorPtr BufferPool::get_queue_size_accumulator()
@@ -210,7 +449,7 @@ Expected<PipelineBuffer> BufferPool::get_available_buffer(PipelineBuffer &&optio
             optional.size(), buffer_size());
         return std::move(optional);
     }
-    
+
     auto acquired_buffer = acquire_buffer(timeout);
     if (HAILO_SHUTDOWN_EVENT_SIGNALED == acquired_buffer.status()) {
         return make_unexpected(acquired_buffer.status());
@@ -219,11 +458,11 @@ Expected<PipelineBuffer> BufferPool::get_available_buffer(PipelineBuffer &&optio
     return acquired_buffer.release();
 }
 
-hailo_status BufferPool::release_buffer(Buffer &&buffer)
+hailo_status BufferPool::release_buffer(MemoryView mem_view)
 {
     std::unique_lock<std::mutex> lock(m_release_buffer_mutex);
     // This can be called after the shutdown event was signaled so we ignore it here
-    return m_free_buffers.enqueue(std::move(buffer), true);
+    return m_free_mem_views.enqueue(std::move(mem_view), true);
 }
 
 Expected<DurationCollector> DurationCollector::create(hailo_pipeline_elem_stats_flags_t flags,
@@ -340,6 +579,7 @@ hailo_status PipelinePad::link_pads(PipelineElement &left, PipelineElement &righ
     CHECK(left_source_index < left.sources().size(), HAILO_INVALID_ARGUMENT,
         "Cannot link source pad #{} for PipelineElement '{}', it has only {} source pads.",
         left_source_index, left.name(), left.sources().size());
+
     CHECK(right_sink_index < right.sinks().size(), HAILO_INVALID_ARGUMENT,
         "Cannot link sink pad #{} for PipelineElement '{}', it has only {} sink pads.",
         right_sink_index, right.name(), right.sinks().size());
@@ -415,12 +655,24 @@ hailo_status PipelinePad::run_push(PipelineBuffer &&buffer)
 {
     if (m_push_complete_callback) {
         auto metadata = buffer.get_metadata();
-        const auto status = m_element.run_push(std::move(buffer));
+        const auto status = m_element.run_push(std::move(buffer), *this);
         m_push_complete_callback(metadata);
         return status;
     }
 
-    return m_element.run_push(std::move(buffer));
+    return m_element.run_push(std::move(buffer), *this);
+}
+
+void PipelinePad::run_push_async(PipelineBuffer &&buffer)
+{
+    if (m_push_complete_callback) {
+        auto metadata = buffer.get_metadata();
+        m_element.run_push_async(std::move(buffer), *this);
+        m_push_complete_callback(metadata);
+        return;
+    }
+
+    return m_element.run_push_async(std::move(buffer), *this);
 }
 
 Expected<PipelineBuffer> PipelinePad::run_pull(PipelineBuffer &&optional)
@@ -484,8 +736,9 @@ const PipelineElement &PipelinePad::element() const
 }
 
 SourceElement::SourceElement(const std::string &name, DurationCollector &&duration_collector,
-                             std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
-    PipelineElement(name, std::move(duration_collector), std::move(pipeline_status))
+                             std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+                             PipelineDirection pipeline_direction) :
+    PipelineElement(name, std::move(duration_collector), std::move(pipeline_status), pipeline_direction)
 {
     m_sources.emplace_back(*this, name, PipelinePad::Type::SOURCE);
 }
@@ -496,8 +749,9 @@ PipelinePad &SourceElement::source()
 }
 
 SinkElement::SinkElement(const std::string &name, DurationCollector &&duration_collector,
-                         std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
-    PipelineElement(name, std::move(duration_collector), std::move(pipeline_status))
+                         std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+                         PipelineDirection pipeline_direction) :
+    PipelineElement(name, std::move(duration_collector), std::move(pipeline_status), pipeline_direction)
 {
     m_sinks.emplace_back(*this, name, PipelinePad::Type::SINK);
 }
@@ -508,8 +762,9 @@ PipelinePad &SinkElement::sink()
 }
 
 IntermediateElement::IntermediateElement(const std::string &name, DurationCollector &&duration_collector,
-                                         std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
-    PipelineElement(name, std::move(duration_collector), std::move(pipeline_status))
+                                         std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+                                         PipelineDirection pipeline_direction) :
+    PipelineElement(name, std::move(duration_collector), std::move(pipeline_status), pipeline_direction)
 {
     m_sinks.emplace_back(*this, name, PipelinePad::Type::SINK);
     m_sources.emplace_back(*this, name, PipelinePad::Type::SOURCE);
@@ -522,12 +777,14 @@ std::vector<PipelinePad*> IntermediateElement::execution_pads()
 }
 
 PipelineElement::PipelineElement(const std::string &name, DurationCollector &&duration_collector,
-                                 std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
+                                 std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+                                 PipelineDirection pipeline_direction) :
     PipelineObject(name),
     m_duration_collector(std::move(duration_collector)),
     m_pipeline_status(std::move(pipeline_status)),
     m_sinks(),
-    m_sources()
+    m_sources(),
+    m_pipeline_direction(pipeline_direction)
 {}
 
 AccumulatorPtr PipelineElement::get_fps_accumulator()
@@ -572,6 +829,41 @@ std::string PipelineElement::description() const
     return element_description.str();
 }
 
+void PipelineElement::set_on_cant_pull_callback(std::function<void()> callback)
+{
+    m_cant_pull_callback = callback;
+}
+
+void PipelineElement::set_on_can_pull_callback(std::function<void()> callback)
+{
+    m_can_pull_callback = callback;
+}
+
+hailo_status PipelineElement::enqueue_execution_buffer(MemoryView mem_view, const TransferDoneCallbackAsyncInfer &exec_done, const std::string &source_name)
+{
+    (void)mem_view;
+    (void)exec_done;
+    (void)source_name;
+    LOGGER__ERROR("enqueue_execution_buffer is not implemented for {}!", name());
+    return HAILO_NOT_IMPLEMENTED;
+};
+
+hailo_status PipelineElement::enqueue_execution_buffer(MemoryView mem_view, const TransferDoneCallbackAsyncInfer &exec_done)
+{
+    return enqueue_execution_buffer(mem_view, exec_done, "");
+};
+
+hailo_status PipelineElement::fill_buffer_pools(bool is_dma_able)
+{
+    (void)is_dma_able;
+    return HAILO_NOT_IMPLEMENTED;
+}
+
+Expected<bool> PipelineElement::are_buffer_pools_full()
+{
+    return make_unexpected(HAILO_NOT_IMPLEMENTED);
+}
+
 hailo_status PipelineElement::activate()
 {
     return execute_activate();
@@ -661,6 +953,14 @@ hailo_status PipelineElement::execute(std::function<hailo_status(PipelinePad*)>
     return HAILO_SUCCESS;
 }
 
+void PipelineElement::handle_non_recoverable_async_error(hailo_status error_status)
+{
+    if (HAILO_SUCCESS != m_pipeline_status->load()){
+        LOGGER__ERROR("Non-recoverable Async Infer Pipeline error. status error code: {}", error_status);
+        m_pipeline_status->store(error_status);
+    }
+}
+
 std::vector<PipelinePad*> SourceElement::execution_pads()
 {
     std::vector<PipelinePad*> result{&source()};
@@ -674,11 +974,15 @@ std::vector<PipelinePad*> SinkElement::execution_pads()
 }
 
 FilterElement::FilterElement(const std::string &name, DurationCollector &&duration_collector,
-                             std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
-    IntermediateElement(name, std::move(duration_collector), std::move(pipeline_status))
+                             std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+                             PipelineDirection pipeline_direction, BufferPoolPtr buffer_pool,
+                             std::chrono::milliseconds timeout) :
+    IntermediateElement(name, std::move(duration_collector), std::move(pipeline_status), pipeline_direction),
+    m_pool(buffer_pool),
+    m_timeout(timeout)
 {}
 
-hailo_status FilterElement::run_push(PipelineBuffer &&buffer)
+hailo_status FilterElement::run_push(PipelineBuffer &&buffer, const PipelinePad &/*sink*/)
 {
     auto output = action(std::move(buffer), PipelineBuffer());
     if (HAILO_SHUTDOWN_EVENT_SIGNALED == output.status()) {
@@ -687,11 +991,11 @@ hailo_status FilterElement::run_push(PipelineBuffer &&buffer)
     CHECK_EXPECTED_AS_STATUS(output);
 
     hailo_status status = next_pad().run_push(output.release());
-    if (status == HAILO_SHUTDOWN_EVENT_SIGNALED) {
+    if (HAILO_SHUTDOWN_EVENT_SIGNALED == status) {
         LOGGER__INFO("run_push of {} was shutdown!", name());
         return status;
     }
-    if (status == HAILO_STREAM_ABORTED_BY_USER) {
+    if (HAILO_STREAM_ABORTED_BY_USER == status) {
         LOGGER__INFO("run_push of {} was aborted!", name());
         return status;
     }
@@ -700,6 +1004,28 @@ hailo_status FilterElement::run_push(PipelineBuffer &&buffer)
     return HAILO_SUCCESS;
 }
 
+void FilterElement::run_push_async(PipelineBuffer &&buffer, const PipelinePad &/*sink*/)
+{
+    assert(m_pipeline_direction == PipelineDirection::PUSH);
+    if (HAILO_SUCCESS != buffer.action_status()) {
+        auto buffer_from_pool = m_pool->get_available_buffer(PipelineBuffer(), m_timeout);
+        if (HAILO_SUCCESS != buffer_from_pool.status()) {
+            next_pad().run_push_async(PipelineBuffer(buffer_from_pool.status()));
+        } else {
+            next_pad().run_push_async(buffer_from_pool.release());
+        }
+        return;
+    }
+
+    auto output = action(std::move(buffer), PipelineBuffer());
+    if (HAILO_SUCCESS == output.status()) {
+        next_pad().run_push_async(output.release());
+    } else {
+        next_pad().run_push_async(PipelineBuffer(output.status()));
+    }
+    return;
+}
+
 Expected<PipelineBuffer> FilterElement::run_pull(PipelineBuffer &&optional, const PipelinePad &/*source*/)
 {
     auto buffer = next_pad().run_pull();
@@ -711,6 +1037,37 @@ Expected<PipelineBuffer> FilterElement::run_pull(PipelineBuffer &&optional, cons
     return action(buffer.release(), std::move(optional));
 }
 
+std::vector<AccumulatorPtr> FilterElement::get_queue_size_accumulators()
+{
+    if (nullptr == m_pool || nullptr == m_pool->get_queue_size_accumulator()) {
+        return std::vector<AccumulatorPtr>();
+    }
+    return {m_pool->get_queue_size_accumulator()};
+}
+
+hailo_status FilterElement::enqueue_execution_buffer(MemoryView mem_view, const TransferDoneCallbackAsyncInfer &exec_done, const std::string &source_name)
+{
+    (void)source_name;
+
+    auto status = m_pool->enqueue_buffer(mem_view, exec_done);
+    CHECK_SUCCESS(status);
+
+    return HAILO_SUCCESS;
+}
+
+Expected<bool> FilterElement::are_buffer_pools_full()
+{
+    return m_pool->is_full();
+}
+
+hailo_status FilterElement::fill_buffer_pools(bool is_dma_able)
+{
+    auto status = m_pool->allocate_buffers(is_dma_able);
+    CHECK_SUCCESS(status);
+
+    return HAILO_SUCCESS;
+}
+
 Expected<SpscQueue<PipelineBuffer>> BaseQueueElement::create_queue(size_t queue_size, EventPtr shutdown_event)
 {
     auto queue = SpscQueue<PipelineBuffer>::create(queue_size, shutdown_event);
@@ -722,8 +1079,8 @@ Expected<SpscQueue<PipelineBuffer>> BaseQueueElement::create_queue(size_t queue_
 BaseQueueElement::BaseQueueElement(SpscQueue<PipelineBuffer> &&queue, EventPtr shutdown_event, const std::string &name,
                                    std::chrono::milliseconds timeout, DurationCollector &&duration_collector,
                                    AccumulatorPtr &&queue_size_accumulator, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
-                                   Event &&activation_event, Event &&deactivation_event) :
-    IntermediateElement(name, std::move(duration_collector), std::move(pipeline_status)),
+                                   Event &&activation_event, Event &&deactivation_event, PipelineDirection pipeline_direction) :
+    IntermediateElement(name, std::move(duration_collector), std::move(pipeline_status), pipeline_direction),
     m_queue(std::move(queue)),
     m_shutdown_event(shutdown_event),
     m_timeout(timeout),
@@ -867,6 +1224,22 @@ hailo_status BaseQueueElement::execute_wait_for_finish()
     return HAILO_SUCCESS;
 }
 
+hailo_status BaseQueueElement::enqueue_execution_buffer(MemoryView mem_view, const TransferDoneCallbackAsyncInfer &exec_done, const std::string &source_name)
+{
+    (void)source_name;
+    return m_sinks[0].prev()->element().enqueue_execution_buffer(mem_view, exec_done, m_sinks[0].prev()->name());
+}
+
+Expected<bool> BaseQueueElement::are_buffer_pools_full()
+{
+    return m_sinks[0].prev()->element().are_buffer_pools_full();
+}
+
+hailo_status BaseQueueElement::fill_buffer_pools(bool is_dma_able)
+{
+    return m_sinks[0].prev()->element().fill_buffer_pools(is_dma_able);
+}
+
 hailo_status PushQueueElement::execute_abort()
 {
     auto status = m_shutdown_event->reset();
@@ -917,7 +1290,7 @@ hailo_status BaseQueueElement::pipeline_status()
 
 Expected<std::shared_ptr<PushQueueElement>> PushQueueElement::create(const std::string &name, std::chrono::milliseconds timeout,
         size_t queue_size, hailo_pipeline_elem_stats_flags_t flags, EventPtr shutdown_event,
-        std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
+        std::shared_ptr<std::atomic<hailo_status>> pipeline_status, PipelineDirection pipeline_direction)
 {
     auto queue = BaseQueueElement::create_queue(queue_size, shutdown_event);
     CHECK_EXPECTED(queue);
@@ -940,7 +1313,7 @@ Expected<std::shared_ptr<PushQueueElement>> PushQueueElement::create(const std::
 
     auto queue_ptr = make_shared_nothrow<PushQueueElement>(queue.release(), shutdown_event, name, timeout,
         duration_collector.release(), std::move(queue_size_accumulator), std::move(pipeline_status),
-        activation_event.release(), deactivation_event.release());
+        activation_event.release(), deactivation_event.release(), pipeline_direction);
     CHECK_AS_EXPECTED(nullptr != queue_ptr, HAILO_OUT_OF_HOST_MEMORY, "Creating PushQueueElement {} failed!", name);
 
     LOGGER__INFO("Created {}", queue_ptr->name());
@@ -949,20 +1322,22 @@ Expected<std::shared_ptr<PushQueueElement>> PushQueueElement::create(const std::
 }
 
 Expected<std::shared_ptr<PushQueueElement>> PushQueueElement::create(const std::string &name, const hailo_vstream_params_t &vstream_params,
-        EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
+        EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status, PipelineDirection pipeline_direction)
 {
     return PushQueueElement::create(name, std::chrono::milliseconds(vstream_params.timeout_ms),
-        vstream_params.queue_size, vstream_params.pipeline_elements_stats_flags, shutdown_event, pipeline_status);
+        vstream_params.queue_size, vstream_params.pipeline_elements_stats_flags, shutdown_event, pipeline_status, pipeline_direction);
 }
 
 PushQueueElement::PushQueueElement(SpscQueue<PipelineBuffer> &&queue, EventPtr shutdown_event, const std::string &name,
                                    std::chrono::milliseconds timeout, DurationCollector &&duration_collector, 
                                    AccumulatorPtr &&queue_size_accumulator, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
-                                   Event &&activation_event, Event &&deactivation_event) :
+                                   Event &&activation_event, Event &&deactivation_event, PipelineDirection pipeline_direction, bool should_start_thread) :
     BaseQueueElement(std::move(queue), shutdown_event, name, timeout, std::move(duration_collector), std::move(queue_size_accumulator),
-                     std::move(pipeline_status), std::move(activation_event), std::move(deactivation_event))
+                     std::move(pipeline_status), std::move(activation_event), std::move(deactivation_event), pipeline_direction)
 {
-    start_thread();
+    if (should_start_thread) {
+        start_thread();
+    }
 }
 
 PushQueueElement::~PushQueueElement()
@@ -970,14 +1345,14 @@ PushQueueElement::~PushQueueElement()
     stop_thread();
 }
 
-hailo_status PushQueueElement::run_push(PipelineBuffer &&buffer)
+hailo_status PushQueueElement::run_push(PipelineBuffer &&buffer, const PipelinePad &/*sink*/)
 {
     // TODO: Support fps/latency collection for queue elems (HRT-7711)
     if (nullptr != m_queue_size_accumulator) {
         m_queue_size_accumulator->add_data_point(static_cast<double>(m_queue.size_approx()));
     }
     auto status = m_pipeline_status->load();
-    if (status == HAILO_STREAM_ABORTED_BY_USER) {
+    if (HAILO_STREAM_ABORTED_BY_USER == status) {
         LOGGER__INFO("run_push of {} was aborted!", name());
         return status;
     }
@@ -995,6 +1370,11 @@ hailo_status PushQueueElement::run_push(PipelineBuffer &&buffer)
     return HAILO_SUCCESS;
 }
 
+void PushQueueElement::run_push_async(PipelineBuffer &&/*buffer*/, const PipelinePad &/*sink*/) {
+    LOGGER__ERROR("run_push_async is not supported for {}", name());
+    assert(false);
+}
+
 Expected<PipelineBuffer> PushQueueElement::run_pull(PipelineBuffer &&/*optional*/, const PipelinePad &/*source*/)
 {
     return make_unexpected(HAILO_INVALID_OPERATION);
@@ -1058,9 +1438,138 @@ hailo_status PushQueueElement::run_in_thread()
     return HAILO_SUCCESS;
 }
 
+Expected<std::shared_ptr<AsyncPushQueueElement>> AsyncPushQueueElement::create(const std::string &name, std::chrono::milliseconds timeout,
+        size_t queue_size, hailo_pipeline_elem_stats_flags_t flags, EventPtr shutdown_event,
+        std::shared_ptr<std::atomic<hailo_status>> pipeline_status, PipelineDirection pipeline_direction)
+{
+    auto queue = BaseQueueElement::create_queue(queue_size, shutdown_event);
+    CHECK_EXPECTED(queue);
+
+    auto activation_event = Event::create(Event::State::not_signalled);
+    CHECK_EXPECTED(activation_event);
+
+    auto deactivation_event = Event::create(Event::State::not_signalled);
+    CHECK_EXPECTED(deactivation_event);
+
+    // TODO: Support fps/latency collection for queue elems (HRT-7711)
+    auto duration_collector = DurationCollector::create(HAILO_PIPELINE_ELEM_STATS_NONE);
+    CHECK_EXPECTED(duration_collector);
+
+    AccumulatorPtr queue_size_accumulator = nullptr;
+    if ((flags & HAILO_PIPELINE_ELEM_STATS_MEASURE_QUEUE_SIZE) != 0) {
+        queue_size_accumulator = make_shared_nothrow<FullAccumulator<double>>("queue_size");
+        CHECK_AS_EXPECTED(nullptr != queue_size_accumulator, HAILO_OUT_OF_HOST_MEMORY);
+    }
+
+    auto queue_ptr = make_shared_nothrow<AsyncPushQueueElement>(queue.release(), shutdown_event, name, timeout,
+        duration_collector.release(), std::move(queue_size_accumulator), std::move(pipeline_status),
+        activation_event.release(), deactivation_event.release(), pipeline_direction);
+    CHECK_AS_EXPECTED(nullptr != queue_ptr, HAILO_OUT_OF_HOST_MEMORY, "Creating PushQueueElement {} failed!", name);
+
+    LOGGER__INFO("Created {}", queue_ptr->name());
+
+    return queue_ptr;
+}
+
+Expected<std::shared_ptr<AsyncPushQueueElement>> AsyncPushQueueElement::create(const std::string &name, const ElementBuildParams &build_params,
+    PipelineDirection pipeline_direction)
+{
+    return AsyncPushQueueElement::create(name, build_params.timeout, build_params.buffer_pool_size,
+            build_params.elem_stats_flags, build_params.shutdown_event, build_params.pipeline_status, pipeline_direction);
+}
+
+AsyncPushQueueElement::AsyncPushQueueElement(SpscQueue<PipelineBuffer> &&queue, EventPtr shutdown_event, const std::string &name,
+                                   std::chrono::milliseconds timeout, DurationCollector &&duration_collector, 
+                                   AccumulatorPtr &&queue_size_accumulator, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+                                   Event &&activation_event, Event &&deactivation_event, PipelineDirection pipeline_direction) :
+    PushQueueElement(std::move(queue), shutdown_event, name, timeout, std::move(duration_collector), std::move(queue_size_accumulator),
+                     std::move(pipeline_status), std::move(activation_event), std::move(deactivation_event), pipeline_direction, false)
+{
+    start_thread();
+}
+
+void AsyncPushQueueElement::run_push_async(PipelineBuffer &&buffer, const PipelinePad &/*sink*/)
+{
+    if (HAILO_SUCCESS != buffer.action_status()) {
+        auto status = m_queue.enqueue(std::move(buffer), m_timeout);
+        if (HAILO_SUCCESS != status) {
+            handle_non_recoverable_async_error(status);
+        }
+        return;
+    }
+    // TODO: Support fps/latency collection for queue elems (HRT-7711)
+    if (nullptr != m_queue_size_accumulator) {
+        m_queue_size_accumulator->add_data_point(static_cast<double>(m_queue.size_approx()));
+    }
+
+    auto status = m_queue.enqueue(std::move(buffer), m_timeout);
+    if (HAILO_SUCCESS != status && HAILO_SHUTDOWN_EVENT_SIGNALED != status) {
+        handle_non_recoverable_async_error(status);
+    }
+}
+
+void AsyncPushQueueElement::start_thread()
+{
+    m_thread = std::thread([this] () {
+        OsUtils::set_current_thread_name(thread_name());
+        while (m_is_thread_running.load()) {
+            auto status = m_activation_event.wait(INIFINITE_TIMEOUT());
+            if (HAILO_SUCCESS != status) {
+                handle_non_recoverable_async_error(status);
+            }
+
+            if (!m_is_thread_running) {
+                LOGGER__INFO("Thread in element {} is not running anymore, exiting..", this->name());
+                break;
+            }
+            if (HAILO_SUCCESS == status) {
+                {
+                    std::unique_lock<std::mutex> lock(m_mutex);
+                    m_is_run_in_thread_running = true;
+                }
+                m_cv.notify_all();
+
+                status = run_in_thread();
+                if (HAILO_SUCCESS != status) {
+                    handle_non_recoverable_async_error(status);
+                }
+
+                {
+                    std::unique_lock<std::mutex> lock(m_mutex);
+                    m_is_run_in_thread_running = false;
+                }
+                m_cv.notify_all();
+            }
+        }
+    });
+}
+
+hailo_status AsyncPushQueueElement::run_push(PipelineBuffer &&/*buffer*/, const PipelinePad &/*sink*/)
+{
+    return HAILO_INVALID_OPERATION;
+}
+
+hailo_status AsyncPushQueueElement::run_in_thread()
+{
+    auto buffer = m_queue.dequeue(INIFINITE_TIMEOUT());
+    switch (buffer.status()) {
+    case HAILO_SHUTDOWN_EVENT_SIGNALED:
+        break;
+    
+    case HAILO_SUCCESS:
+        next_pad().run_push_async(buffer.release());
+        break;
+
+    default:
+        next_pad().run_push_async(PipelineBuffer(buffer.status()));
+    }
+    return buffer.status();
+}
+
+
 Expected<std::shared_ptr<PullQueueElement>> PullQueueElement::create(const std::string &name, std::chrono::milliseconds timeout,
         size_t queue_size, hailo_pipeline_elem_stats_flags_t flags, EventPtr shutdown_event,
-        std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
+        std::shared_ptr<std::atomic<hailo_status>> pipeline_status, PipelineDirection pipeline_direction)
 {
     auto queue = BaseQueueElement::create_queue(queue_size, shutdown_event);
     CHECK_EXPECTED(queue);
@@ -1083,7 +1592,7 @@ Expected<std::shared_ptr<PullQueueElement>> PullQueueElement::create(const std::
 
     auto queue_ptr = make_shared_nothrow<PullQueueElement>(queue.release(), shutdown_event, name, timeout,
         duration_collector.release(), std::move(queue_size_accumulator), std::move(pipeline_status),
-        activation_event.release(), deactivation_event.release());
+        activation_event.release(), deactivation_event.release(), pipeline_direction);
     CHECK_AS_EXPECTED(nullptr != queue_ptr, HAILO_OUT_OF_HOST_MEMORY, "Creating PullQueueElement {} failed!", name);
 
     LOGGER__INFO("Created {}", queue_ptr->name());
@@ -1091,18 +1600,18 @@ Expected<std::shared_ptr<PullQueueElement>> PullQueueElement::create(const std::
     return queue_ptr;
 }
 Expected<std::shared_ptr<PullQueueElement>> PullQueueElement::create(const std::string &name, const hailo_vstream_params_t &vstream_params,
-        EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
+        EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status, PipelineDirection pipeline_direction)
 {
     return PullQueueElement::create(name, std::chrono::milliseconds(vstream_params.timeout_ms),
-        vstream_params.queue_size, vstream_params.pipeline_elements_stats_flags, shutdown_event, pipeline_status);
+        vstream_params.queue_size, vstream_params.pipeline_elements_stats_flags, shutdown_event, pipeline_status, pipeline_direction);
 }
 
 PullQueueElement::PullQueueElement(SpscQueue<PipelineBuffer> &&queue, EventPtr shutdown_event, const std::string &name,
                                    std::chrono::milliseconds timeout, DurationCollector &&duration_collector,
                                    AccumulatorPtr &&queue_size_accumulator, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
-                                   Event &&activation_event, Event &&deactivation_event) :
+                                   Event &&activation_event, Event &&deactivation_event, PipelineDirection pipeline_direction) :
     BaseQueueElement(std::move(queue), shutdown_event, name, timeout, std::move(duration_collector), std::move(queue_size_accumulator),
-                     std::move(pipeline_status), std::move(activation_event), std::move(deactivation_event))
+                     std::move(pipeline_status), std::move(activation_event), std::move(deactivation_event), pipeline_direction)
 {
     start_thread();
 }
@@ -1112,11 +1621,17 @@ PullQueueElement::~PullQueueElement()
     stop_thread();
 }
 
-hailo_status PullQueueElement::run_push(PipelineBuffer &&/*buffer*/)
+hailo_status PullQueueElement::run_push(PipelineBuffer &&/*buffer*/, const PipelinePad &/*sink*/)
 {
     return HAILO_INVALID_OPERATION;
 }
 
+void PullQueueElement::run_push_async(PipelineBuffer &&/*buffer*/, const PipelinePad &/*sink*/)
+{
+    LOGGER__ERROR("run_push_async is not supported for {}", name());
+    assert(false);
+}
+
 Expected<PipelineBuffer> PullQueueElement::run_pull(PipelineBuffer &&optional, const PipelinePad &/*sink*/)
 {
     // TODO: Support fps/latency collection for queue elems (HRT-7711)
@@ -1183,7 +1698,8 @@ hailo_status PullQueueElement::run_in_thread()
 }
 
 Expected<std::shared_ptr<UserBufferQueueElement>> UserBufferQueueElement::create(const std::string &name, std::chrono::milliseconds timeout,
-    hailo_pipeline_elem_stats_flags_t flags, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
+    hailo_pipeline_elem_stats_flags_t flags, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
+    PipelineDirection pipeline_direction)
 {
     auto pending_buffer_queue = BaseQueueElement::create_queue(1, shutdown_event);
     CHECK_EXPECTED(pending_buffer_queue);
@@ -1210,7 +1726,7 @@ Expected<std::shared_ptr<UserBufferQueueElement>> UserBufferQueueElement::create
     auto queue_ptr = make_shared_nothrow<UserBufferQueueElement>(pending_buffer_queue.release(),
         full_buffer_queue.release(), shutdown_event, name, timeout, duration_collector.release(),
         std::move(queue_size_accumulator), std::move(pipeline_status), activation_event.release(),
-        deactivation_event.release());
+        deactivation_event.release(), pipeline_direction);
     CHECK_AS_EXPECTED(nullptr != queue_ptr, HAILO_OUT_OF_HOST_MEMORY, "Creating UserBufferQueueElement {} failed!", name);
 
     LOGGER__INFO("Created {}", queue_ptr->name());
@@ -1219,20 +1735,22 @@ Expected<std::shared_ptr<UserBufferQueueElement>> UserBufferQueueElement::create
 }
 
 Expected<std::shared_ptr<UserBufferQueueElement>> UserBufferQueueElement::create(const std::string &name, const hailo_vstream_params_t &vstream_params,
-        EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
+        EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status, PipelineDirection pipeline_direction)
 {
     return UserBufferQueueElement::create(name, std::chrono::milliseconds(vstream_params.timeout_ms),
-        vstream_params.pipeline_elements_stats_flags, shutdown_event, pipeline_status);
+        vstream_params.pipeline_elements_stats_flags, shutdown_event, pipeline_status, pipeline_direction);
 }
 
 UserBufferQueueElement::UserBufferQueueElement(SpscQueue<PipelineBuffer> &&queue, SpscQueue<PipelineBuffer> &&full_buffer_queue,
                                                EventPtr shutdown_event, const std::string &name, std::chrono::milliseconds timeout,
                                                DurationCollector &&duration_collector, AccumulatorPtr &&queue_size_accumulator,
                                                std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
-                                               Event &&activation_event, Event &&deactivation_event) :
+                                               Event &&activation_event, Event &&deactivation_event,
+                                               PipelineDirection pipeline_direction) :
     PullQueueElement(std::move(queue), shutdown_event, name, timeout, std::move(duration_collector),
                      std::move(queue_size_accumulator), std::move(pipeline_status), std::move(activation_event),
-                     std::move(deactivation_event)),
+                     std::move(deactivation_event),
+                     pipeline_direction),
     m_full_buffer_queue(std::move(full_buffer_queue))
 {}
 
@@ -1317,34 +1835,110 @@ hailo_status UserBufferQueueElement::run_in_thread()
 }
 
 BaseMuxElement::BaseMuxElement(size_t sink_count, const std::string &name, std::chrono::milliseconds timeout,
-                               DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
-    PipelineElement(name, std::move(duration_collector), std::move(pipeline_status)),
-    m_timeout(timeout)
+                               DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+                               BufferPoolPtr buffer_pool, PipelineDirection pipeline_direction) :
+    PipelineElement(name, std::move(duration_collector), std::move(pipeline_status), pipeline_direction),
+    m_timeout(timeout),
+    m_pool(buffer_pool)
 {
     m_sources.emplace_back(*this, name, PipelinePad::Type::SOURCE);
     m_sinks.reserve(sink_count);
+    m_sink_has_arrived.reserve(sink_count);
     for (uint32_t i = 0; i < sink_count; ++i) {
         m_sinks.emplace_back(*this, name, PipelinePad::Type::SINK);
+        m_index_of_sink[m_sinks[i].name()] = i;
+        m_sink_has_arrived[m_sinks[i].name()] = false;
     }
 }
 
 std::vector<PipelinePad*> BaseMuxElement::execution_pads()
 {
-    std::vector<PipelinePad*> result;
-    result.reserve(m_sinks.size());
-    for (auto& pad : m_sinks) {
-        result.push_back(pad.prev());
+    if (m_next_pads.size() == 0) {
+        if (PipelineDirection::PUSH == m_pipeline_direction) {
+            m_next_pads.reserve(m_sources.size());
+            for (auto &source : m_sources ) {
+                m_next_pads.push_back(source.next());
+            }
+        } else {
+            m_next_pads.reserve(m_sinks.size());
+            for (auto &sink : m_sinks ) {
+                m_next_pads.push_back(sink.prev());
+            }
+        }
     }
-    return result;
+    return m_next_pads;
 }
 
-hailo_status BaseMuxElement::run_push(PipelineBuffer &&/*buffer*/)
+hailo_status BaseMuxElement::run_push(PipelineBuffer &&/*buffer*/, const PipelinePad &/*sink*/)
 {
-    return HAILO_NOT_IMPLEMENTED;
+    return HAILO_INVALID_OPERATION;
 }
 
+void BaseMuxElement::run_push_async(PipelineBuffer &&buffer, const PipelinePad &sink)
+{
+    assert(PipelineDirection::PUSH == m_pipeline_direction);
+    assert(m_next_pads.size() == 1);
+
+    std::unique_lock<std::mutex> lock(m_mutex);
+
+    m_sink_has_arrived[sink.name()] = true;
+    m_input_buffers[sink.name()] = std::move(buffer);
+    if (has_all_sinks_arrived()) {
+        for (auto &input_buffer : m_input_buffers) {
+            if (HAILO_SUCCESS != input_buffer.second.action_status()) {
+                auto acquired_buffer = m_pool->get_available_buffer(PipelineBuffer(), m_timeout);
+                if (HAILO_SUCCESS == acquired_buffer.status()) {
+                    acquired_buffer->set_action_status(input_buffer.second.action_status());
+                    m_next_pads[0]->run_push_async(acquired_buffer.release());
+                } else {
+                    handle_non_recoverable_async_error(acquired_buffer.status());
+                }
+                return;
+            }
+        }
+        std::vector<PipelineBuffer> input_buffers;
+        input_buffers.resize(m_input_buffers.size());
+        for (auto &input_buffer : m_input_buffers) {
+            input_buffers[m_index_of_sink[input_buffer.first]] = std::move(input_buffer.second);
+        }
+
+        auto output = action(std::move(input_buffers), PipelineBuffer());
+        if (HAILO_SUCCESS == output.status()) {
+            m_next_pads[0]->run_push_async(output.release());
+        } else {
+            m_next_pads[0]->run_push_async(PipelineBuffer(output.status()));
+        }
+
+        for (const auto &curr_sink : m_sinks) {
+            m_sink_has_arrived[curr_sink.name()] = false;
+        }
+        m_input_buffers.clear();
+
+        // Manual unlocking is done before notifying, to avoid waking up the waiting thread only to block again
+        lock.unlock();
+        m_cv.notify_all();
+    } else {
+        auto cv_status = m_cv.wait_for(lock, m_timeout);
+        if (std::cv_status::timeout == cv_status) {
+            LOGGER__ERROR("Waiting for other threads in BaseMuxElement {} has reached a timeout (timeout={}ms)", name(), m_timeout.count());
+            handle_non_recoverable_async_error(HAILO_TIMEOUT);
+        }
+    }
+}
+
+bool BaseMuxElement::has_all_sinks_arrived()
+{
+    for (const auto &current_sink : m_sink_has_arrived) {
+        if (!current_sink.second) {
+            return false;
+        }
+    }
+    return true;
+}
 Expected<PipelineBuffer> BaseMuxElement::run_pull(PipelineBuffer &&optional, const PipelinePad &/*source*/)
 {
+    CHECK_AS_EXPECTED(m_pipeline_direction == PipelineDirection::PULL, HAILO_INVALID_OPERATION,
+        "PostInferElement {} does not support run_pull operation", name());
     std::vector<PipelineBuffer> inputs;
     inputs.reserve(m_sinks.size());
     for (auto &sink : m_sinks) {
@@ -1363,10 +1957,34 @@ Expected<PipelineBuffer> BaseMuxElement::run_pull(PipelineBuffer &&optional, con
     return output;
 }
 
+hailo_status BaseMuxElement::enqueue_execution_buffer(MemoryView mem_view, const TransferDoneCallbackAsyncInfer &exec_done, const std::string &source_name)
+{
+    (void)source_name;
+    auto status = m_pool->enqueue_buffer(mem_view, exec_done);
+    CHECK_SUCCESS(status);
+
+    return HAILO_SUCCESS;
+}
+
+Expected<bool> BaseMuxElement::are_buffer_pools_full()
+{
+    return m_pool->is_full();
+}
+
+hailo_status BaseMuxElement::fill_buffer_pools(bool is_dma_able)
+{
+    auto status = m_pool->allocate_buffers(is_dma_able);
+    CHECK_SUCCESS(status);
+
+    return HAILO_SUCCESS;
+}
+
 BaseDemuxElement::BaseDemuxElement(size_t source_count, const std::string &name, std::chrono::milliseconds timeout,
-                                   DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
-    PipelineElement(name, std::move(duration_collector), std::move(pipeline_status)),
+                                   DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+                                   std::vector<BufferPoolPtr> pools, PipelineDirection pipeline_direction) :
+    PipelineElement(name, std::move(duration_collector), std::move(pipeline_status), pipeline_direction),
     m_timeout(timeout),
+    m_pools(pools),
     m_is_activated(false),
     m_was_stream_aborted(false),
     m_index_of_source(),
@@ -1377,28 +1995,92 @@ BaseDemuxElement::BaseDemuxElement(size_t source_count, const std::string &name,
     m_sources.reserve(source_count);
     for (uint32_t i = 0; i < source_count; i++) {
         m_sources.emplace_back(*this, name, PipelinePad::Type::SOURCE);
-        m_index_of_source[&m_sources[i]] = i;
+        m_index_of_source[m_sources[i].name()] = i;
     }
 }
 
-hailo_status BaseDemuxElement::run_push(PipelineBuffer &&/*buffer*/)
+hailo_status BaseDemuxElement::run_push(PipelineBuffer &&buffer, const PipelinePad &/*sink*/)
 {
-    return HAILO_NOT_IMPLEMENTED;
+    CHECK(PipelineDirection::PUSH == m_pipeline_direction, HAILO_INVALID_OPERATION,
+        "BaseDemuxElement {} does not support run_push operation", name());
+
+    auto outputs = action(std::move(buffer));
+    if (HAILO_SHUTDOWN_EVENT_SIGNALED == outputs.status()) {
+        return outputs.status();
+    }
+    CHECK_EXPECTED_AS_STATUS(outputs);
+
+    for (const auto pad : execution_pads()) {
+        assert(m_index_of_source.count(pad->prev()->name()) > 0);
+        auto source_index = m_index_of_source[pad->prev()->name()];
+
+        hailo_status status = pad->run_push(std::move(outputs.value()[source_index]));
+        if (HAILO_SHUTDOWN_EVENT_SIGNALED == status) {
+            LOGGER__INFO("run_push of {} was shutdown!", name());
+            return status;
+        }
+        if (HAILO_STREAM_ABORTED_BY_USER == status) {
+            LOGGER__INFO("run_push of {} was aborted!", name());
+            return status;
+        }
+        CHECK_SUCCESS(status);
+    }
+
+    return HAILO_SUCCESS;
+}
+
+void BaseDemuxElement::run_push_async(PipelineBuffer &&buffer, const PipelinePad &/*sink*/)
+{
+    assert(PipelineDirection::PUSH == m_pipeline_direction);
+
+    if (HAILO_SUCCESS != buffer.action_status()) {
+        for (const auto pad : execution_pads()) {
+            auto source_index = m_index_of_source[pad->prev()->name()];
+            auto acquired_buffer = m_pools[source_index]->acquire_buffer(m_timeout);
+            if (HAILO_SUCCESS == acquired_buffer.status()) {
+                acquired_buffer->set_action_status(buffer.action_status());
+                pad->run_push_async(acquired_buffer.release());
+            } else {
+                handle_non_recoverable_async_error(acquired_buffer.status());
+            }
+        }
+        return;
+    }
+
+    auto outputs = action(std::move(buffer));
+
+    for (const auto pad : execution_pads()) {
+        assert(m_index_of_source.count(pad->prev()->name()) > 0);
+        auto source_index = m_index_of_source[pad->prev()->name()];
+        if (HAILO_SUCCESS == outputs.status()) {
+            pad->run_push_async(std::move(outputs.value()[source_index]));
+        } else {
+            pad->run_push_async(PipelineBuffer(outputs.status()));
+        }
+    }
 }
 
 Expected<PipelineBuffer> BaseDemuxElement::run_pull(PipelineBuffer &&optional, const PipelinePad &source)
 {
+    CHECK_AS_EXPECTED(m_pipeline_direction == PipelineDirection::PULL, HAILO_INVALID_OPERATION,
+        "BaseDemuxElement {} does not support run_pull operation", name());
+
     CHECK_AS_EXPECTED(!optional, HAILO_INVALID_ARGUMENT, "Optional buffer is not allowed in demux element!");
 
-    // TODO: should we lock here? or only right before wait_for?
     std::unique_lock<std::mutex> lock(m_mutex);
     if (!m_is_activated) {
         return make_unexpected(HAILO_SHUTDOWN_EVENT_SIGNALED);
     }
 
-    m_was_source_called[m_index_of_source[&source]] = true;
-    if (were_all_sinks_called()) {
-        auto input = next_pad().run_pull();
+    if (m_was_stream_aborted) {
+        return make_unexpected(HAILO_STREAM_ABORTED_BY_USER);
+    }
+
+    m_was_source_called[m_index_of_source[source.name()]] = true;
+
+    if (were_all_srcs_arrived()) {
+        // If all srcs arrived, execute the demux
+        auto input = execution_pads()[0]->run_pull();
         if (HAILO_STREAM_ABORTED_BY_USER == input.status()) {
             LOGGER__INFO("run_pull of demux element was aborted!");
             m_was_stream_aborted = true;
@@ -1427,8 +2109,11 @@ Expected<PipelineBuffer> BaseDemuxElement::run_pull(PipelineBuffer &&optional, c
         lock.unlock();
         m_cv.notify_all();
     } else {
-        auto cv_status = m_cv.wait_for(lock, m_timeout);
-        CHECK_AS_EXPECTED(std::cv_status::timeout != cv_status, HAILO_TIMEOUT, "Waiting for other threads in demux {} has reached a timeout (timeout={}ms)", name(), m_timeout.count());
+        // If not all srcs arrived, wait until m_was_source_called is false (set to false after the demux execution)
+        auto wait_successful = m_cv.wait_for(lock, m_timeout, [&](){
+            return !m_was_source_called[m_index_of_source[source.name()]] || m_was_stream_aborted || !m_is_activated;
+        });
+        CHECK_AS_EXPECTED(wait_successful, HAILO_TIMEOUT, "Waiting for other threads in demux {} has reached a timeout (timeout={}ms)", name(), m_timeout.count());
 
         if (m_was_stream_aborted) {
             lock.unlock();
@@ -1444,11 +2129,11 @@ Expected<PipelineBuffer> BaseDemuxElement::run_pull(PipelineBuffer &&optional, c
         }
     }
 
-    assert(m_index_of_source[&source] < m_buffers_for_action.size());
-    return std::move(m_buffers_for_action[m_index_of_source[&source]]);
+    assert(m_index_of_source[source.name()] < m_buffers_for_action.size());
+    return std::move(m_buffers_for_action[m_index_of_source[source.name()]]);
 }
 
-bool BaseDemuxElement::were_all_sinks_called()
+bool BaseDemuxElement::were_all_srcs_arrived()
 {
     return std::all_of(m_was_source_called.begin(), m_was_source_called.end(), [](bool v) { return v; });
 }
@@ -1498,13 +2183,17 @@ hailo_status BaseDemuxElement::execute_post_deactivate(bool should_clear_abort)
 
 hailo_status BaseDemuxElement::execute_abort()
 {
-    return PipelineElement::execute_abort();
-}
+    auto status = PipelineElement::execute_abort();
+    CHECK_SUCCESS(status);
+    {
+        // There is a case where the other thread is halted (via context switch) before the wait_for() function,
+        // then we call notify_all() here, and then the wait_for() is called - resulting in a timeout.
+        // notify_all() only works on threads which are already waiting, so that's why we acquire the lock here.
+        std::unique_lock<std::mutex> lock(m_mutex);
+    }
+    m_cv.notify_all();
 
-PipelinePad &BaseDemuxElement::next_pad()
-{
-    // Note: The next elem to be run is upstream from this elem (i.e. buffers are pulled)
-    return *m_sinks[0].prev();
+    return HAILO_SUCCESS;
 }
 
 hailo_status BaseDemuxElement::set_timeout(std::chrono::milliseconds timeout)
@@ -1513,11 +2202,57 @@ hailo_status BaseDemuxElement::set_timeout(std::chrono::milliseconds timeout)
     return HAILO_SUCCESS;
 }
 
-std::vector<PipelinePad*> BaseDemuxElement::execution_pads()
+hailo_status BaseDemuxElement::enqueue_execution_buffer(MemoryView mem_view, const TransferDoneCallbackAsyncInfer &exec_done, const std::string &source_name)
 {
-    std::vector<PipelinePad*> result{&next_pad()};
-    return result;
+    auto pool_id = m_index_of_source.at(source_name);
+    auto status = m_pools[pool_id]->enqueue_buffer(mem_view, exec_done);
+    CHECK_SUCCESS(status);
+
+    return HAILO_SUCCESS;
+}
+
+Expected<bool> BaseDemuxElement::are_buffer_pools_full()
+{
+    for (const auto &pool : m_pools) {
+        if (pool->is_full()) {
+            return true;
+        }
+    }
+    return false;
+}
+
+hailo_status BaseDemuxElement::fill_buffer_pool(bool is_dma_able, size_t pool_id) {
+    auto status = m_pools[pool_id]->allocate_buffers(is_dma_able);
+    CHECK_SUCCESS(status);
+
+    return HAILO_SUCCESS;
 }
 
+hailo_status BaseDemuxElement::fill_buffer_pools(bool is_dma_able) {
+    for (auto &pool : m_pools) {
+        auto status = pool->allocate_buffers(is_dma_able);
+        CHECK_SUCCESS(status);
+    }
+    return HAILO_SUCCESS;
+}
+
+std::vector<PipelinePad*> BaseDemuxElement::execution_pads()
+{
+    if (m_next_pads.size() == 0)
+    {
+        if (PipelineDirection::PUSH == m_pipeline_direction) {
+            m_next_pads.reserve(m_sources.size());
+            for (auto &source : m_sources ) {
+                m_next_pads.push_back(source.next());
+            }
+        } else {
+            m_next_pads.reserve(m_sinks.size());
+            for (auto &sink : m_sinks ) {
+                m_next_pads.push_back(sink.prev());
+            }
+        }
+    }
+    return m_next_pads;
+}
 
 } /* namespace hailort */
index 77d56dc08c6f860ff12b761208e038fe7dc697fc..6f0db703bad7d6b05658b494edf8bbf3e1d89aa9 100644 (file)
 #define _HAILO_PIPELINE_HPP_
 
 #include "hailo/buffer.hpp"
+#include "hailo/expected.hpp"
+#include "hailo/hailort.h"
 #include "hailo/runtime_statistics.hpp"
+#include "net_flow/ops/nms_post_process.hpp"
 
 #include "utils/thread_safe_queue.hpp"
 
 
 namespace hailort
 {
+struct ElementBuildParams
+{
+    std::shared_ptr<std::atomic<hailo_status>> pipeline_status;
+    std::chrono::milliseconds timeout;
+    EventPtr shutdown_event;
+    size_t buffer_pool_size;
+    hailo_pipeline_elem_stats_flags_t elem_stats_flags;
+    hailo_vstream_stats_flags_t vstream_stats_flags;
+};
+
+enum class PipelineDirection
+{
+    PULL,
+    PUSH,
+};
+
+// TODO: need to think about naming and the right place to declare the CompletionInfoAsyncInferInternal and TransferDoneCallbackAsyncInfer
+struct CompletionInfoAsyncInferInternal
+{
+    hailo_status status;
+};
+using TransferDoneCallbackAsyncInfer = std::function<void(const CompletionInfoAsyncInferInternal &completion_info)>;;
 
 using PipelineTimePoint = std::chrono::steady_clock::time_point;
 #define BUFFER_POOL_DEFAULT_QUEUE_TIMEOUT (std::chrono::milliseconds(10000))
 #define DEFAULT_NUM_FRAMES_BEFORE_COLLECTION_START (100)
 
+struct AdditionalData {};
+
+struct IouPipelineData : AdditionalData
+{
+    IouPipelineData(std::vector<net_flow::DetectionBbox> &&detections, std::vector<uint32_t> &&detections_classes_count)
+        : m_detections(std::move(detections)),
+          m_detections_classes_count(std::move(detections_classes_count)) {}
+    std::vector<net_flow::DetectionBbox> m_detections;
+    std::vector<uint32_t> m_detections_classes_count;
+
+    bool operator==(const IouPipelineData &other) const {
+        return m_detections == other.m_detections && m_detections_classes_count == other.m_detections_classes_count;
+    }
+};
+
+struct PixBufferPipelineData : AdditionalData
+{
+    PixBufferPipelineData(const hailo_pix_buffer_t &buffer) : m_pix_buffer(buffer) {};
+    hailo_pix_buffer_t m_pix_buffer;
+};
+
 class BufferPool;
 using BufferPoolPtr = std::shared_ptr<BufferPool>;
 
@@ -40,7 +86,6 @@ public:
         explicit Metadata(PipelineTimePoint start_time);
         // Creates an empty metadata object
         Metadata();
-        
         ~Metadata() = default;
         Metadata(const Metadata &) = default;
         Metadata &operator=(const Metadata &) = delete;
@@ -48,9 +93,17 @@ public:
         Metadata &operator=(Metadata &&other) = default;
 
         PipelineTimePoint get_start_time() const;
+
         void set_start_time(PipelineTimePoint val);
 
+        void set_additional_data(std::shared_ptr<AdditionalData> data) { m_additional_data = data;}
+        template <typename T>
+        std::shared_ptr<T> get_additional_data() {
+            return std::static_pointer_cast<T>(m_additional_data);
+        }
+
     private:
+        std::shared_ptr<AdditionalData> m_additional_data;
         PipelineTimePoint m_start_time;
     };
 
@@ -59,14 +112,17 @@ public:
         FLUSH,
         DEACTIVATE
     };
-    
+
     // Creates an empty PipelineBuffer (with no buffer/memory view)
     PipelineBuffer();
     PipelineBuffer(Type type);
-    PipelineBuffer(MemoryView view, bool should_measure = false);
-    PipelineBuffer(Buffer &&buffer, BufferPoolPtr pool, bool should_measure = false);
+    PipelineBuffer(hailo_status status);
+    PipelineBuffer(MemoryView view, bool is_user_buffer = true, BufferPoolPtr pool = nullptr, bool should_measure = false, hailo_status status = HAILO_SUCCESS);
+    PipelineBuffer(MemoryView view, const TransferDoneCallbackAsyncInfer &exec_done,
+        bool is_user_buffer = true, BufferPoolPtr pool = nullptr, bool should_measure = false, hailo_status status = HAILO_SUCCESS);
+    PipelineBuffer(hailo_pix_buffer_t buffer);
     ~PipelineBuffer();
-    
+
     PipelineBuffer(const PipelineBuffer &) = delete;
     PipelineBuffer &operator=(const PipelineBuffer &) = delete;
     PipelineBuffer(PipelineBuffer &&other);
@@ -76,17 +132,23 @@ public:
     uint8_t* data();
     size_t size() const;
     MemoryView as_view();
+    Expected<hailo_pix_buffer_t> as_hailo_pix_buffer(hailo_format_order_t order = HAILO_FORMAT_ORDER_AUTO);
     Type get_type() const;
     Metadata get_metadata() const;
     void set_metadata(Metadata &&val);
+    void set_additional_data(std::shared_ptr<AdditionalData> data) { m_metadata.set_additional_data(data);}
+    TransferDoneCallbackAsyncInfer get_exec_done_cb() const;
+    hailo_status action_status();
+    void set_action_status(hailo_status status);
 
 private:
     Type m_type;
-    Buffer m_buffer;
-    bool m_should_release_buffer;
     BufferPoolPtr m_pool;
     MemoryView m_view;
+    TransferDoneCallbackAsyncInfer m_exec_done;
     Metadata m_metadata;
+    bool m_is_user_buffer;
+    hailo_status m_action_status;
 
     static PipelineTimePoint add_timestamp(bool should_measure);
 };
@@ -97,21 +159,40 @@ class BufferPool : public std::enable_shared_from_this<BufferPool>
 {
 public:
     static Expected<BufferPoolPtr> create(size_t buffer_size, size_t buffer_count, EventPtr shutdown_event,
-        hailo_pipeline_elem_stats_flags_t elem_flags, hailo_vstream_stats_flags_t vstream_flags);
-    BufferPool(size_t buffer_size, bool measure_vstream_latency, SpscQueue<Buffer> &&free_buffers, AccumulatorPtr &&queue_size_accumulator);
+        hailo_pipeline_elem_stats_flags_t elem_flags, hailo_vstream_stats_flags_t vstream_flags, bool is_empty = false,
+        bool dma_able = false);
+
+    BufferPool(size_t buffer_size, bool is_holding_user_buffers, bool measure_vstream_latency, std::vector<Buffer> &&buffers, SpscQueue<MemoryView> &&free_mem_views,
+        SpscQueue<TransferDoneCallbackAsyncInfer> &&done_cbs, AccumulatorPtr &&queue_size_accumulator, size_t max_buffer_count);
     virtual ~BufferPool() = default;
 
     size_t buffer_size();
+    hailo_status enqueue_buffer(MemoryView mem_view);
+    hailo_status enqueue_buffer(MemoryView mem_view, const TransferDoneCallbackAsyncInfer &exec_done);
+    hailo_status allocate_buffers(bool is_dma_able);
     Expected<PipelineBuffer> acquire_buffer(std::chrono::milliseconds timeout);
+    Expected<std::shared_ptr<PipelineBuffer>> acquire_buffer_ptr(std::chrono::milliseconds timeout);
     AccumulatorPtr get_queue_size_accumulator();
     Expected<PipelineBuffer> get_available_buffer(PipelineBuffer &&optional, std::chrono::milliseconds timeout);
+    bool is_full();
 
 private:
-    hailo_status release_buffer(Buffer &&buffer);
+    Expected<MemoryView> acquire_free_mem_view(std::chrono::milliseconds timeout);
+    Expected<TransferDoneCallbackAsyncInfer> acquire_on_done_cb(std::chrono::milliseconds timeout);
+    hailo_status release_buffer(MemoryView mem_view);
 
     const size_t m_buffer_size;
+    bool m_is_holding_user_buffers;
+    size_t m_max_buffer_count;
     const bool m_measure_vstream_latency;
-    SpscQueue<Buffer> m_free_buffers;
+
+    // BufferPool can hold allocated buffers (type of Buffer) and buffers that come from the user (type of MemoryView).
+    // To be able to support both types, the queue of the pool holds MemoryViews and to hold the allocated buffers we use a vector.
+    // So when the pool has allocated buffers, it will hold them in the vector and have pointers to them in the queue.
+    // And when the pool holds user buffers, the vector will be empty and only the queue will hold the user's buffers.
+    std::vector<Buffer> m_buffers;
+    SpscQueue<MemoryView> m_free_mem_views;
+    SpscQueue<TransferDoneCallbackAsyncInfer> m_done_cbs;
     AccumulatorPtr m_queue_size_accumulator;
     std::mutex m_release_buffer_mutex;
 
@@ -211,6 +292,7 @@ public:
     hailo_status wait_for_finish();
     hailo_status clear_abort();
     virtual hailo_status run_push(PipelineBuffer &&buffer);
+    void run_push_async(PipelineBuffer &&buffer);
     virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional = PipelineBuffer());
     void set_push_complete_callback(PushCompleteCallback push_complete_callback);
     void set_pull_complete_callback(PullCompleteCallback pull_complete_callback);
@@ -242,7 +324,8 @@ class PipelineElement : public PipelineObject
 {
 public:
     PipelineElement(const std::string &name, DurationCollector &&duration_collector,
-                    std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
+                    std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+                    PipelineDirection pipeline_direction);
     virtual ~PipelineElement() = default;
 
     PipelineElement(PipelineElement &&other) = delete;
@@ -258,8 +341,6 @@ public:
     hailo_status abort();
     hailo_status clear_abort();
     hailo_status wait_for_finish();
-    virtual hailo_status run_push(PipelineBuffer &&buffer) = 0;
-    virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) = 0;
     AccumulatorPtr get_fps_accumulator();
     AccumulatorPtr get_latency_accumulator();
     virtual std::vector<AccumulatorPtr> get_queue_size_accumulators();
@@ -268,26 +349,27 @@ public:
     const std::vector<PipelinePad> &sinks() const;
     const std::vector<PipelinePad> &sources() const;
     virtual std::string description() const;
-
-    virtual void set_on_cant_pull_callback(std::function<void()> callback)
-    {
-        m_cant_pull_callback = callback;
-    }
-
-    virtual void set_on_can_pull_callback(std::function<void()> callback)
-    {
-        m_can_pull_callback = callback;
-    }
+    virtual void set_on_cant_pull_callback(std::function<void()> callback);
+    virtual void set_on_can_pull_callback(std::function<void()> callback);
+    virtual hailo_status enqueue_execution_buffer(MemoryView mem_view, const TransferDoneCallbackAsyncInfer &exec_done, const std::string &source_name);
+    hailo_status enqueue_execution_buffer(MemoryView mem_view, const TransferDoneCallbackAsyncInfer &exec_done);
+    virtual Expected<bool> are_buffer_pools_full();
+    virtual hailo_status fill_buffer_pools(bool is_dma_able);
+    void handle_non_recoverable_async_error(hailo_status error_status);
 
 protected:
     DurationCollector m_duration_collector;
     std::shared_ptr<std::atomic<hailo_status>> m_pipeline_status;
     std::vector<PipelinePad> m_sinks;
     std::vector<PipelinePad> m_sources;
+    PipelineDirection m_pipeline_direction;
 
     std::function<void()> m_cant_pull_callback;
     std::function<void()> m_can_pull_callback;
 
+    virtual hailo_status run_push(PipelineBuffer &&buffer, const PipelinePad &sink) = 0;
+    virtual void run_push_async(PipelineBuffer &&buffer, const PipelinePad &sink) = 0;
+    virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) = 0;
     virtual std::vector<PipelinePad*> execution_pads() = 0;
     virtual hailo_status execute_activate();
     virtual hailo_status execute_deactivate();
@@ -299,6 +381,8 @@ protected:
     virtual hailo_status execute_wait_for_finish();
 
     virtual hailo_status execute(std::function<hailo_status(PipelinePad*)>);
+
+    friend class PipelinePad;
 };
 
 // An element with one source pad only (generates data)
@@ -306,7 +390,8 @@ class SourceElement : public PipelineElement
 {
 public:
     SourceElement(const std::string &name, DurationCollector &&duration_collector,
-                  std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
+                  std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+                  PipelineDirection pipeline_direction);
     PipelinePad &source();
 
 protected:
@@ -318,7 +403,8 @@ class SinkElement : public PipelineElement
 {
 public:
     SinkElement(const std::string &name, DurationCollector &&duration_collector,
-                std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
+                std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+                PipelineDirection pipeline_direction);
     PipelinePad &sink();
 
 protected:
@@ -330,7 +416,8 @@ class IntermediateElement : public PipelineElement
 {
 public:
     IntermediateElement(const std::string &name, DurationCollector &&duration_collector,
-                        std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
+                        std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+                        PipelineDirection pipeline_direction);
     virtual PipelinePad &next_pad() = 0;
 
 protected:
@@ -341,15 +428,23 @@ class FilterElement : public IntermediateElement
 {
 public:
     FilterElement(const std::string &name, DurationCollector &&duration_collector,
-                  std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
+                  std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+                  PipelineDirection pipeline_direction, BufferPoolPtr buffer_pool, std::chrono::milliseconds timeout);
     virtual ~FilterElement() = default;
 
-    virtual hailo_status run_push(PipelineBuffer &&buffer) override;
+    virtual hailo_status run_push(PipelineBuffer &&buffer, const PipelinePad &sink) override;
+    virtual void run_push_async(PipelineBuffer &&buffer, const PipelinePad &sink) override;
     virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) override;
+    virtual hailo_status enqueue_execution_buffer(MemoryView mem_view, const TransferDoneCallbackAsyncInfer &exec_done, const std::string &source_name) override;
+    virtual Expected<bool> are_buffer_pools_full() override;
+    virtual hailo_status fill_buffer_pools(bool is_dma_able) override;
+    virtual std::vector<AccumulatorPtr> get_queue_size_accumulators() override;
 
 protected:
     // The optional buffer functions as an output buffer that the user can write to instead of acquiring a new buffer
     virtual Expected<PipelineBuffer> action(PipelineBuffer &&input, PipelineBuffer &&optional) = 0;
+    BufferPoolPtr m_pool;
+    std::chrono::milliseconds m_timeout;
 };
 
 class BaseQueueElement : public IntermediateElement
@@ -367,7 +462,8 @@ protected:
     BaseQueueElement(SpscQueue<PipelineBuffer> &&queue, EventPtr shutdown_event, const std::string &name,
         std::chrono::milliseconds timeout, DurationCollector &&duration_collector,
         AccumulatorPtr &&queue_size_accumulator, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
-        Event &&activation_event, Event &&deactivation_event);
+        Event &&activation_event, Event &&deactivation_event,
+        PipelineDirection pipeline_direction);
 
     hailo_status pipeline_status();
 
@@ -377,12 +473,16 @@ protected:
     virtual hailo_status execute_clear_abort() override;
     virtual hailo_status execute_wait_for_finish() override;
 
+    virtual hailo_status enqueue_execution_buffer(MemoryView mem_view, const TransferDoneCallbackAsyncInfer &exec_done, const std::string &source_name) override;
+    virtual Expected<bool> are_buffer_pools_full() override;
+    virtual hailo_status fill_buffer_pools(bool is_dma_able) override;
+
     /// Starts/stops the queue thread. This functions needs to be called on subclasses ctor and dtor
     /// accordingly because otherwise, if we will start/stop thread in this class we will face pure-call
     /// to `run_in_thread`.
     /// This functions don't return status because they are meant to be called on ctor and dtor 
-    void start_thread();
-    void stop_thread();
+    virtual void start_thread();
+    virtual void stop_thread();
 
     virtual std::vector<AccumulatorPtr> get_queue_size_accumulators() override;
 
@@ -407,15 +507,18 @@ class PushQueueElement : public BaseQueueElement
 public:
     static Expected<std::shared_ptr<PushQueueElement>> create(const std::string &name, std::chrono::milliseconds timeout,
         size_t queue_size, hailo_pipeline_elem_stats_flags_t flags, EventPtr shutdown_event,
-        std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+        std::shared_ptr<std::atomic<hailo_status>> pipeline_status, PipelineDirection pipeline_direction = PipelineDirection::PUSH);
     static Expected<std::shared_ptr<PushQueueElement>> create(const std::string &name, const hailo_vstream_params_t &vstream_params,
-        EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+        EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
+        PipelineDirection pipeline_direction = PipelineDirection::PUSH);
     PushQueueElement(SpscQueue<PipelineBuffer> &&queue, EventPtr shutdown_event, const std::string &name,
         std::chrono::milliseconds timeout, DurationCollector &&duration_collector, AccumulatorPtr &&queue_size_accumulator,
-        std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, Event &&activation_event, Event &&deactivation_event);
+        std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, Event &&activation_event, Event &&deactivation_event,
+        PipelineDirection pipeline_direction, bool should_start_thread = true);
     virtual ~PushQueueElement();
 
-    virtual hailo_status run_push(PipelineBuffer &&buffer) override;
+    virtual hailo_status run_push(PipelineBuffer &&buffer, const PipelinePad &sink) override;
+    virtual void run_push_async(PipelineBuffer &&buffer, const PipelinePad &sink) override;
     virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) override;
     virtual PipelinePad &next_pad() override;
 
@@ -426,20 +529,45 @@ protected:
     virtual hailo_status execute_abort() override;
 };
 
+class AsyncPushQueueElement : public PushQueueElement
+{
+public:
+    static Expected<std::shared_ptr<AsyncPushQueueElement>> create(const std::string &name, std::chrono::milliseconds timeout,
+        size_t queue_size, hailo_pipeline_elem_stats_flags_t flags, EventPtr shutdown_event,
+        std::shared_ptr<std::atomic<hailo_status>> pipeline_status, PipelineDirection pipeline_direction = PipelineDirection::PUSH);
+    static Expected<std::shared_ptr<AsyncPushQueueElement>> create(const std::string &name, const ElementBuildParams &build_params,
+        PipelineDirection pipeline_direction);
+    AsyncPushQueueElement(SpscQueue<PipelineBuffer> &&queue, EventPtr shutdown_event, const std::string &name,
+        std::chrono::milliseconds timeout, DurationCollector &&duration_collector, AccumulatorPtr &&queue_size_accumulator,
+        std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, Event &&activation_event, Event &&deactivation_event,
+        PipelineDirection pipeline_direction);
+
+    virtual hailo_status run_push(PipelineBuffer &&buffer, const PipelinePad &sink) override;
+    virtual void run_push_async(PipelineBuffer &&buffer, const PipelinePad &sink) override;
+
+protected:
+    virtual hailo_status run_in_thread() override;
+    virtual std::string thread_name() override { return "ASYNC_PUSH_Q"; };
+    virtual void start_thread() override;
+};
+
 class PullQueueElement : public BaseQueueElement
 {
 public:
     static Expected<std::shared_ptr<PullQueueElement>> create(const std::string &name, std::chrono::milliseconds timeout,
         size_t queue_size, hailo_pipeline_elem_stats_flags_t flags, EventPtr shutdown_event,
-        std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+        std::shared_ptr<std::atomic<hailo_status>> pipeline_status, PipelineDirection pipeline_direction = PipelineDirection::PULL);
     static Expected<std::shared_ptr<PullQueueElement>> create(const std::string &name, const hailo_vstream_params_t &vstream_params,
-        EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+        EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
+        PipelineDirection pipeline_direction = PipelineDirection::PULL);
     PullQueueElement(SpscQueue<PipelineBuffer> &&queue, EventPtr shutdown_event, const std::string &name,
         std::chrono::milliseconds timeout, DurationCollector &&duration_collector, AccumulatorPtr &&queue_size_accumulator,
-        std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, Event &&activation_event, Event &&deactivation_event);
+        std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, Event &&activation_event, Event &&deactivation_event,
+        PipelineDirection pipeline_direction);
     virtual ~PullQueueElement();
 
-    virtual hailo_status run_push(PipelineBuffer &&buffer) override;
+    virtual hailo_status run_push(PipelineBuffer &&buffer, const PipelinePad &sink) override;
+    virtual void run_push_async(PipelineBuffer &&buffer, const PipelinePad &sink) override;
     virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) override;
     virtual PipelinePad &next_pad() override;
 
@@ -469,12 +597,15 @@ class UserBufferQueueElement : public PullQueueElement
 {
 public:
     static Expected<std::shared_ptr<UserBufferQueueElement>> create(const std::string &name, std::chrono::milliseconds timeout,
-        hailo_pipeline_elem_stats_flags_t flags, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+        hailo_pipeline_elem_stats_flags_t flags, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
+        PipelineDirection pipeline_direction = PipelineDirection::PULL);
     static Expected<std::shared_ptr<UserBufferQueueElement>> create(const std::string &name, const hailo_vstream_params_t &vstream_params,
-        EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+        EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
+        PipelineDirection pipeline_direction = PipelineDirection::PULL);
     UserBufferQueueElement(SpscQueue<PipelineBuffer> &&queue, SpscQueue<PipelineBuffer> &&full_buffer_queue, EventPtr shutdown_event,
         const std::string &name, std::chrono::milliseconds timeout, DurationCollector &&duration_collector, AccumulatorPtr &&queue_size_accumulator,
-        std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, Event &&activation_event, Event &&deactivation_event);
+        std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, Event &&activation_event, Event &&deactivation_event,
+        PipelineDirection pipeline_direction);
 
     virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) override;
 
@@ -500,29 +631,50 @@ class BaseMuxElement : public PipelineElement
 {
 public:
     BaseMuxElement(size_t sink_count, const std::string &name, std::chrono::milliseconds timeout,
-        DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
+        DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+        BufferPoolPtr buffer_pool, PipelineDirection pipeline_direction = PipelineDirection::PULL);
     virtual ~BaseMuxElement() = default;
 
-    virtual hailo_status run_push(PipelineBuffer &&buffer) override;
+    virtual hailo_status run_push(PipelineBuffer &&buffer, const PipelinePad &sink) override;
+    virtual void run_push_async(PipelineBuffer &&buffer, const PipelinePad &sink) override;
     virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) override;
+    virtual hailo_status enqueue_execution_buffer(MemoryView mem_view, const TransferDoneCallbackAsyncInfer &exec_done, const std::string &source_name) override;
+    virtual Expected<bool> are_buffer_pools_full() override;
+    virtual hailo_status fill_buffer_pools(bool is_dma_able) override;
 
 protected:
     virtual Expected<PipelineBuffer> action(std::vector<PipelineBuffer> &&inputs, PipelineBuffer &&optional) = 0;
     virtual std::vector<PipelinePad*> execution_pads() override;
 
     std::chrono::milliseconds m_timeout;
+    BufferPoolPtr m_pool;
+
+private:
+    bool has_all_sinks_arrived();
+    std::unordered_map<std::string, bool> m_sink_has_arrived;
+    std::mutex m_mutex;
+    std::unordered_map<std::string, uint32_t> m_index_of_sink;
+    std::unordered_map<std::string, PipelineBuffer> m_input_buffers;
+    std::vector<PipelinePad*> m_next_pads;
+    std::condition_variable m_cv;
 };
 
 class BaseDemuxElement : public PipelineElement
 {
 public:
     BaseDemuxElement(size_t source_count, const std::string &name, std::chrono::milliseconds timeout,
-        DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
+        DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+        std::vector<BufferPoolPtr> pools, PipelineDirection pipeline_direction);
     virtual ~BaseDemuxElement() = default;
 
-    virtual hailo_status run_push(PipelineBuffer &&buffer) override;
+    virtual hailo_status run_push(PipelineBuffer &&buffer, const PipelinePad &sink) override;
+    virtual void run_push_async(PipelineBuffer &&buffer, const PipelinePad &sink) override;
     virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) override;
     hailo_status set_timeout(std::chrono::milliseconds timeout);
+    virtual hailo_status enqueue_execution_buffer(MemoryView mem_view, const TransferDoneCallbackAsyncInfer &exec_done, const std::string &source_name) override;
+    virtual Expected<bool> are_buffer_pools_full() override;
+    virtual hailo_status fill_buffer_pools(bool is_dma_able) override;
+    hailo_status fill_buffer_pool(bool is_dma_able, size_t pool_id);
 
 protected:
     virtual hailo_status execute_activate() override;
@@ -533,18 +685,19 @@ protected:
     virtual std::vector<PipelinePad*> execution_pads() override;
 
     std::chrono::milliseconds m_timeout;
+    std::vector<BufferPoolPtr> m_pools;
 
 private:
-    bool were_all_sinks_called();
-    PipelinePad &next_pad();
+    bool were_all_srcs_arrived();
 
     std::atomic_bool m_is_activated;
     std::atomic_bool m_was_stream_aborted;
-    std::unordered_map<const PipelinePad*, uint32_t> m_index_of_source;
+    std::unordered_map<std::string, uint32_t> m_index_of_source;
     std::vector<bool> m_was_source_called;
     std::vector<PipelineBuffer> m_buffers_for_action;
     std::mutex m_mutex;
     std::condition_variable m_cv;
+    std::vector<PipelinePad*> m_next_pads;
 };
 
 enum class AccumulatorType
index 426bd4b88597383a0c983006f4afac972988631c..dc2115eaf05e259c74ed7b0b23297953804a3b3f 100644 (file)
@@ -7,14 +7,32 @@
  * @brief Implementation of the virtual stream
  **/
 
+#include "common/logger_macros.hpp"
 #include "common/utils.hpp"
+#include "hailo/expected.hpp"
+#include "hailo/hailort.h"
+#include "hailo/stream.hpp"
 #include "hailo/vstream.hpp"
+#include "hailo/hef.hpp"
+#include "hailo/vdevice.hpp"
 #include "hailo/hailort_defaults.hpp"
 #include "hailo/hailort_common.hpp"
+#include "net_flow/pipeline/pipeline.hpp"
+#include "stream_common/stream_internal.hpp"
+#include "net_flow/ops/nms_post_process.hpp"
+#include "net_flow/ops/ssd_post_process.hpp"
+#include "net_flow/ops/yolox_post_process.hpp"
+#include "net_flow/ops/yolov5_post_process.hpp"
+#include "net_flow/ops/argmax_post_process.hpp"
+#include "net_flow/ops/softmax_post_process.hpp"
+#include "net_flow/ops/yolov5_seg_post_process.hpp"
 
 #include "common/runtime_statistics_internal.hpp"
 
 #include "net_flow/pipeline/vstream_internal.hpp"
+#include <cstdint>
+#include <math.h>
+#include <memory>
 
 #ifdef HAILO_SUPPORT_MULTI_PROCESS
 #include "rpc/rpc_definitions.hpp"
@@ -34,23 +52,25 @@ static std::map<std::string, std::vector<AccumulatorPtr>> get_pipeline_queue_siz
     const std::vector<std::shared_ptr<PipelineElement>> &pipeline);
 
 Expected<std::shared_ptr<PreInferElement>> PreInferElement::create(const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
-    const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info,
+    const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, const std::vector<hailo_quant_info_t> &dst_quant_infos,
     const std::string &name, std::chrono::milliseconds timeout, size_t buffer_pool_size, hailo_pipeline_elem_stats_flags_t elem_flags,
-    hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
+    hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
+    PipelineDirection pipeline_direction, bool is_dma_able)
 {
     auto transform_context = InputTransformContext::create(src_image_shape, src_format, dst_image_shape, dst_format,
-        dst_quant_info);
+        dst_quant_infos);
     CHECK_EXPECTED(transform_context, "Failed Creating InputTransformContext");
 
+    bool is_empty = false;
     auto buffer_pool = BufferPool::create(transform_context.value()->get_dst_frame_size(), buffer_pool_size, shutdown_event, elem_flags,
-        vstream_flags);
+        vstream_flags, is_empty, is_dma_able);
     CHECK_EXPECTED(buffer_pool, "Failed creating BufferPool for {}", name);
 
     auto duration_collector = DurationCollector::create(elem_flags);
     CHECK_EXPECTED(duration_collector);
 
     auto pre_infer_elem_ptr = make_shared_nothrow<PreInferElement>(transform_context.release(),
-        buffer_pool.release(), name, timeout, duration_collector.release(), std::move(pipeline_status));
+        buffer_pool.release(), name, timeout, duration_collector.release(), std::move(pipeline_status), pipeline_direction);
     CHECK_AS_EXPECTED(nullptr != pre_infer_elem_ptr, HAILO_OUT_OF_HOST_MEMORY);
 
     LOGGER__INFO("Created {}", pre_infer_elem_ptr->name());
@@ -59,21 +79,29 @@ Expected<std::shared_ptr<PreInferElement>> PreInferElement::create(const hailo_3
 }
 
 Expected<std::shared_ptr<PreInferElement>> PreInferElement::create(const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
-        const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, const std::string &name,
-        const hailo_vstream_params_t &vstream_params, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
+        const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, const std::vector<hailo_quant_info_t> &dst_quant_infos, const std::string &name,
+        const hailo_vstream_params_t &vstream_params, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
+        PipelineDirection pipeline_direction, bool is_dma_able)
 {
-    return PreInferElement::create(src_image_shape, src_format, dst_image_shape, dst_format, dst_quant_info, name,
+    return PreInferElement::create(src_image_shape, src_format, dst_image_shape, dst_format, dst_quant_infos, name,
         std::chrono::milliseconds(vstream_params.timeout_ms), vstream_params.queue_size, vstream_params.pipeline_elements_stats_flags,
-        vstream_params.vstream_stats_flags, shutdown_event, pipeline_status);
+        vstream_params.vstream_stats_flags, shutdown_event, pipeline_status, pipeline_direction, is_dma_able);
+}
+
+Expected<std::shared_ptr<PreInferElement>> PreInferElement::create(const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
+    const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, const std::vector<hailo_quant_info_t> &dst_quant_infos,
+    const std::string &name, const ElementBuildParams &build_params, PipelineDirection pipeline_direction, bool is_dma_able)
+{
+    return PreInferElement::create(src_image_shape, src_format, dst_image_shape, dst_format, dst_quant_infos, name,
+        build_params.timeout, build_params.buffer_pool_size, build_params.elem_stats_flags, build_params.vstream_stats_flags,
+        build_params.shutdown_event, build_params.pipeline_status, pipeline_direction, is_dma_able);
 }
 
 PreInferElement::PreInferElement(std::unique_ptr<InputTransformContext> &&transform_context, BufferPoolPtr buffer_pool,
                                 const std::string &name, std::chrono::milliseconds timeout, DurationCollector &&duration_collector,
-                                std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
-    FilterElement(name, std::move(duration_collector), std::move(pipeline_status)),
-    m_transform_context(std::move(transform_context)),
-    m_pool(buffer_pool),
-    m_timeout(timeout)
+                                std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, PipelineDirection pipeline_direction) :
+    FilterElement(name, std::move(duration_collector), std::move(pipeline_status), pipeline_direction, buffer_pool, timeout),
+    m_transform_context(std::move(transform_context))
 {}
 
 Expected<PipelineBuffer> PreInferElement::run_pull(PipelineBuffer &&/*optional*/, const PipelinePad &/*source*/)
@@ -82,14 +110,6 @@ Expected<PipelineBuffer> PreInferElement::run_pull(PipelineBuffer &&/*optional*/
     return make_unexpected(HAILO_INVALID_OPERATION);
 }
 
-std::vector<AccumulatorPtr> PreInferElement::get_queue_size_accumulators()
-{
-    if (nullptr == m_pool->get_queue_size_accumulator()) {
-        return std::vector<AccumulatorPtr>();
-    }
-    return {m_pool->get_queue_size_accumulator()};
-}
-
 PipelinePad &PreInferElement::next_pad()
 {
     // Note: The next elem to be run is downstream from this elem (i.e. buffers are pushed)
@@ -121,6 +141,9 @@ Expected<PipelineBuffer> PreInferElement::action(PipelineBuffer &&input, Pipelin
     m_duration_collector.start_measurement();
     const auto status = m_transform_context->transform(input.as_view(), dst);
     m_duration_collector.complete_measurement();
+    auto exec_done_cb = input.get_exec_done_cb();
+    CompletionInfoAsyncInferInternal completion_info {status};
+    exec_done_cb(completion_info);
     CHECK_SUCCESS_AS_EXPECTED(status);
 
     // Note: The latency to be measured starts as the input buffer is sent to the InputVStream (via write())
@@ -129,26 +152,196 @@ Expected<PipelineBuffer> PreInferElement::action(PipelineBuffer &&input, Pipelin
     return transformed_buffer.release();
 }
 
+Expected<std::shared_ptr<ConvertNmsToDetectionsElement>> ConvertNmsToDetectionsElement::create(
+        const hailo_nms_info_t &nms_info, const std::string &name, hailo_pipeline_elem_stats_flags_t elem_flags,
+        std::shared_ptr<std::atomic<hailo_status>> pipeline_status, std::chrono::milliseconds timeout,
+        hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event, size_t buffer_pool_size,
+        PipelineDirection pipeline_direction, bool is_last_copy_element)
+{
+    // The actual data will be in the metadata
+    auto frame_size = 0;
+    auto buffer_pool_expected = BufferPool::create(frame_size, buffer_pool_size, shutdown_event, elem_flags, vstream_flags, is_last_copy_element);
+    CHECK_EXPECTED(buffer_pool_expected, "Failed creating BufferPool for {}", name);
+    auto buffer_pool = buffer_pool_expected.release();
+
+    auto duration_collector = DurationCollector::create(elem_flags);
+    CHECK_EXPECTED(duration_collector);
+
+    auto convert_nms_to_detections_elem_ptr = make_shared_nothrow<ConvertNmsToDetectionsElement>(std::move(nms_info),
+        name, duration_collector.release(), std::move(pipeline_status), buffer_pool, timeout, pipeline_direction);
+    CHECK_AS_EXPECTED(nullptr != convert_nms_to_detections_elem_ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+    LOGGER__INFO("Created {}", convert_nms_to_detections_elem_ptr->name());
+
+    return convert_nms_to_detections_elem_ptr;
+}
+
+Expected<std::shared_ptr<ConvertNmsToDetectionsElement>> ConvertNmsToDetectionsElement::create(
+        const hailo_nms_info_t &nms_info, const std::string &name, const ElementBuildParams &build_params,
+        PipelineDirection pipeline_direction, bool is_last_copy_element)
+{
+    return ConvertNmsToDetectionsElement::create(nms_info, name, build_params.elem_stats_flags, build_params.pipeline_status,
+        build_params.timeout, build_params.vstream_stats_flags, build_params.shutdown_event, build_params.buffer_pool_size,
+        pipeline_direction, is_last_copy_element);
+}
+
+ConvertNmsToDetectionsElement::ConvertNmsToDetectionsElement(const hailo_nms_info_t &&nms_info, const std::string &name,
+                                   DurationCollector &&duration_collector,
+                                   std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+                                   BufferPoolPtr buffer_pool, std::chrono::milliseconds timeout, PipelineDirection pipeline_direction) :
+    FilterElement(name, std::move(duration_collector), std::move(pipeline_status), pipeline_direction, buffer_pool, timeout),
+    m_nms_info(std::move(nms_info))
+{}
+
+hailo_status ConvertNmsToDetectionsElement::run_push(PipelineBuffer &&buffer, const PipelinePad &sink)
+{
+    CHECK(PipelineDirection::PUSH == m_pipeline_direction, HAILO_INVALID_OPERATION,
+        "ConvertNmsToDetectionsElement {} does not support run_push operation", name());
+    return FilterElement::run_push(std::move(buffer), sink);
+}
+
+PipelinePad &ConvertNmsToDetectionsElement::next_pad()
+{
+    if (PipelineDirection::PUSH == m_pipeline_direction){
+        return *m_sources[0].next();
+    }
+    return *m_sinks[0].prev();
+}
+
+std::string ConvertNmsToDetectionsElement::description() const
+{
+    std::stringstream element_description;
+    element_description << "(" << this->name() << ")";
+    return element_description.str();
+}
+
+Expected<PipelineBuffer> ConvertNmsToDetectionsElement::action(PipelineBuffer &&input, PipelineBuffer &&optional)
+{
+    auto buffer = m_pool->get_available_buffer(std::move(optional), m_timeout);
+    if (HAILO_SHUTDOWN_EVENT_SIGNALED == buffer.status()) {
+        return make_unexpected(buffer.status());
+    }
+    CHECK_EXPECTED(buffer, "{} (D2H) failed with status={}", name(), buffer.status());
+
+    buffer->set_metadata(input.get_metadata());
+
+    m_duration_collector.start_measurement();
+
+    auto detections_pair = net_flow::NmsPostProcessOp::transform__d2h_NMS_DETECTIONS(input.data(), m_nms_info);
+    auto detections_pipeline_data = make_shared_nothrow<IouPipelineData>
+        (std::move(detections_pair.first),std::move(detections_pair.second));
+    buffer->set_additional_data(detections_pipeline_data);
+
+    m_duration_collector.complete_measurement();
+
+    return buffer.release();
+}
+
+Expected<std::shared_ptr<FillNmsFormatElement>> FillNmsFormatElement::create(const hailo_nms_info_t nms_info,
+        const hailo_format_t &dst_format, const net_flow::NmsPostProcessConfig nms_config, const std::string &name,
+        hailo_pipeline_elem_stats_flags_t elem_flags, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
+        std::chrono::milliseconds timeout, hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event,
+        size_t buffer_pool_size, PipelineDirection pipeline_direction, bool is_last_copy_element)
+{
+    auto frame_size = HailoRTCommon::get_nms_host_frame_size(nms_info, dst_format);
+    auto buffer_pool_expected = BufferPool::create(frame_size, buffer_pool_size, shutdown_event, elem_flags, vstream_flags, is_last_copy_element);
+    CHECK_EXPECTED(buffer_pool_expected, "Failed creating BufferPool for {}", name);
+    auto buffer_pool = buffer_pool_expected.release();
+
+    auto duration_collector = DurationCollector::create(elem_flags);
+    CHECK_EXPECTED(duration_collector);
+
+    auto fill_nms_format_element = make_shared_nothrow<FillNmsFormatElement>(std::move(nms_config),
+        name, duration_collector.release(), std::move(pipeline_status), buffer_pool, timeout, pipeline_direction);
+    CHECK_AS_EXPECTED(nullptr != fill_nms_format_element, HAILO_OUT_OF_HOST_MEMORY);
+
+    LOGGER__INFO("Created {}", fill_nms_format_element->name());
+
+    return fill_nms_format_element;
+}
+
+Expected<std::shared_ptr<FillNmsFormatElement>> FillNmsFormatElement::create(const hailo_nms_info_t nms_info,
+        const hailo_format_t &dst_format, const net_flow::NmsPostProcessConfig nms_config, const std::string &name,
+        const ElementBuildParams &build_params, PipelineDirection pipeline_direction, bool is_last_copy_element)
+{
+    return FillNmsFormatElement::create(nms_info, dst_format, nms_config, name, build_params.elem_stats_flags,
+        build_params.pipeline_status, build_params.timeout, build_params.vstream_stats_flags,
+        build_params.shutdown_event, build_params.buffer_pool_size, pipeline_direction, is_last_copy_element);
+}
+
+FillNmsFormatElement::FillNmsFormatElement(const net_flow::NmsPostProcessConfig &&nms_config, const std::string &name,
+                                   DurationCollector &&duration_collector,
+                                   std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+                                   BufferPoolPtr buffer_pool, std::chrono::milliseconds timeout, PipelineDirection pipeline_direction) :
+    FilterElement(name, std::move(duration_collector), std::move(pipeline_status), pipeline_direction, buffer_pool, timeout),
+    m_nms_config(std::move(nms_config))
+{}
+
+hailo_status FillNmsFormatElement::run_push(PipelineBuffer &&buffer, const PipelinePad &sink)
+{
+    CHECK(PipelineDirection::PUSH == m_pipeline_direction, HAILO_INVALID_OPERATION,
+        "FillNmsFormatElement {} does not support run_push operation", name());
+    return FilterElement::run_push(std::move(buffer), sink);
+}
+
+PipelinePad &FillNmsFormatElement::next_pad()
+{
+    if (PipelineDirection::PUSH == m_pipeline_direction){
+        return *m_sources[0].next();
+    }
+    return *m_sinks[0].prev();
+}
+
+std::string FillNmsFormatElement::description() const
+{
+    std::stringstream element_description;
+    element_description << "(" << this->name() << ")";
+    return element_description.str();
+}
+
+Expected<PipelineBuffer> FillNmsFormatElement::action(PipelineBuffer &&input, PipelineBuffer &&optional)
+{
+    auto buffer_expected = m_pool->get_available_buffer(std::move(optional), m_timeout);
+    if (HAILO_SHUTDOWN_EVENT_SIGNALED == buffer_expected.status()) {
+        return make_unexpected(buffer_expected.status());
+    }
+    CHECK_EXPECTED(buffer_expected, "{} (D2H) failed with status={}", name(), buffer_expected.status());
+    auto buffer = buffer_expected.release();
+
+    buffer.set_metadata(input.get_metadata());
+
+    m_duration_collector.start_measurement();
+
+    auto detections = input.get_metadata().get_additional_data<IouPipelineData>();
+    auto dst = buffer.as_view();
+    net_flow::NmsPostProcessOp::fill_nms_format_buffer(dst, detections->m_detections, detections->m_detections_classes_count,
+        m_nms_config);
+
+    m_duration_collector.complete_measurement();
+
+    return buffer;
+}
+
 Expected<std::shared_ptr<PostInferElement>> PostInferElement::create(const hailo_3d_image_shape_t &src_image_shape,
     const hailo_format_t &src_format, const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format,
-    const hailo_quant_info_t &dst_quant_info, const hailo_nms_info_t &nms_info, const std::string &name,
+    const std::vector<hailo_quant_info_t> &dst_quant_infos, const hailo_nms_info_t &nms_info, const std::string &name,
     hailo_pipeline_elem_stats_flags_t elem_flags, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
     std::chrono::milliseconds timeout, hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event,
-    size_t buffer_pool_size)
+    size_t buffer_pool_size, PipelineDirection pipeline_direction, bool is_last_copy_element)
 {
     auto frame_size = (dst_format.order == HAILO_FORMAT_ORDER_HAILO_NMS) ? HailoRTCommon::get_nms_host_frame_size(nms_info, dst_format) : HailoRTCommon::get_frame_size(dst_image_shape, dst_format);
-    auto buffer_pool_expected = BufferPool::create(frame_size, buffer_pool_size, shutdown_event, elem_flags, vstream_flags);
+    auto buffer_pool_expected = BufferPool::create(frame_size, buffer_pool_size, shutdown_event, elem_flags, vstream_flags, is_last_copy_element);
     CHECK_EXPECTED(buffer_pool_expected, "Failed creating BufferPool for {}", name);
 
     auto transform_context = OutputTransformContext::create(src_image_shape, src_format, dst_image_shape, dst_format,
-        dst_quant_info, nms_info);
+        dst_quant_infos, nms_info);
     CHECK_EXPECTED(transform_context, "Failed Creating OutputTransformContext");
 
     auto duration_collector = DurationCollector::create(elem_flags);
     CHECK_EXPECTED(duration_collector);
 
     auto post_infer_elem_ptr = make_shared_nothrow<PostInferElement>(transform_context.release(),
-        name, duration_collector.release(), std::move(pipeline_status), buffer_pool_expected.release(), timeout);
+        name, duration_collector.release(), std::move(pipeline_status), buffer_pool_expected.release(), timeout, pipeline_direction);
     CHECK_AS_EXPECTED(nullptr != post_infer_elem_ptr, HAILO_OUT_OF_HOST_MEMORY);
 
     LOGGER__INFO("Created {}", post_infer_elem_ptr->name());
@@ -157,34 +350,55 @@ Expected<std::shared_ptr<PostInferElement>> PostInferElement::create(const hailo
 }
 
 Expected<std::shared_ptr<PostInferElement>> PostInferElement::create(const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
-        const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, const hailo_nms_info_t &nms_info,
+        const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, const std::vector<hailo_quant_info_t> &dst_quant_infos, const hailo_nms_info_t &nms_info,
         const std::string &name, const hailo_vstream_params_t &vstream_params, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
-        EventPtr shutdown_event)
+        EventPtr shutdown_event, PipelineDirection pipeline_direction, bool is_last_copy_element)
 {
-    return PostInferElement::create(src_image_shape, src_format, dst_image_shape, dst_format, dst_quant_info, nms_info,
+    return PostInferElement::create(src_image_shape, src_format, dst_image_shape, dst_format, dst_quant_infos, nms_info,
         name, vstream_params.pipeline_elements_stats_flags, pipeline_status, std::chrono::milliseconds(vstream_params.timeout_ms),
-        vstream_params.vstream_stats_flags, shutdown_event, vstream_params.queue_size);
+        vstream_params.vstream_stats_flags, shutdown_event, vstream_params.queue_size, pipeline_direction, is_last_copy_element);
+}
+
+Expected<std::shared_ptr<PostInferElement>> PostInferElement::create(const hailo_3d_image_shape_t &src_image_shape,
+    const hailo_format_t &src_format, const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format,
+    const std::vector<hailo_quant_info_t> &dst_quant_infos, const hailo_nms_info_t &nms_info, const std::string &name,
+    const ElementBuildParams &build_params, PipelineDirection pipeline_direction, bool is_last_copy_element)
+{
+    return PostInferElement::create(src_image_shape, src_format, dst_image_shape, dst_format,
+        dst_quant_infos, nms_info, name, build_params.elem_stats_flags, build_params.pipeline_status,
+        build_params.timeout, build_params.vstream_stats_flags, build_params.shutdown_event, build_params.buffer_pool_size,
+        pipeline_direction, is_last_copy_element);
 }
 
 PostInferElement::PostInferElement(std::unique_ptr<OutputTransformContext> &&transform_context, const std::string &name,
                                    DurationCollector &&duration_collector,
                                    std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
-                                   BufferPoolPtr buffer_pool, std::chrono::milliseconds timeout) :
-    FilterElement(name, std::move(duration_collector), std::move(pipeline_status)),
-    m_transform_context(std::move(transform_context)),
-    m_pool(buffer_pool),
-    m_timeout(timeout)
+                                   BufferPoolPtr buffer_pool, std::chrono::milliseconds timeout,
+                                   PipelineDirection pipeline_direction) :
+    FilterElement(name, std::move(duration_collector), std::move(pipeline_status), pipeline_direction, buffer_pool, timeout),
+    m_transform_context(std::move(transform_context))
 {}
 
-hailo_status PostInferElement::run_push(PipelineBuffer &&/*buffer*/)
+Expected<PipelineBuffer> PostInferElement::run_pull(PipelineBuffer &&optional, const PipelinePad &source)
 {
-    LOGGER__ERROR("PostInferElement does not support run_push operation");
-    return HAILO_INVALID_OPERATION;
+    CHECK_AS_EXPECTED(m_pipeline_direction == PipelineDirection::PULL, HAILO_INVALID_OPERATION,
+        "PostInferElement {} does not support run_pull operation", name()
+    );
+    return FilterElement::run_pull(std::move(optional), source);
+}
+
+hailo_status PostInferElement::run_push(PipelineBuffer &&buffer, const PipelinePad &sink)
+{
+    CHECK(PipelineDirection::PUSH == m_pipeline_direction, HAILO_INVALID_OPERATION,
+        "PostInferElement {} does not support run_push operation", name());
+    return FilterElement::run_push(std::move(buffer), sink);
 }
 
 PipelinePad &PostInferElement::next_pad()
 {
-    // Note: The next elem to be run is upstream from this elem (i.e. buffers are pulled)
+    if (PipelineDirection::PUSH == m_pipeline_direction){
+        return *m_sources[0].next();
+    }
     return *m_sinks[0].prev();
 }
 
@@ -215,14 +429,6 @@ Expected<PipelineBuffer> PostInferElement::action(PipelineBuffer &&input, Pipeli
     return buffer.release();
 }
 
-std::vector<AccumulatorPtr> PostInferElement::get_queue_size_accumulators()
-{
-    if (nullptr == m_pool->get_queue_size_accumulator()) {
-        return std::vector<AccumulatorPtr>();
-    }
-    return {m_pool->get_queue_size_accumulator()};
-}
-
 static hailo_nms_info_t fuse_nms_info(const std::vector<hailo_nms_info_t> &nms_infos)
 {
     hailo_nms_info_t fused_info = nms_infos[0];
@@ -230,26 +436,117 @@ static hailo_nms_info_t fuse_nms_info(const std::vector<hailo_nms_info_t> &nms_i
     fused_info.number_of_classes = 0;
     for (const auto &nms_info : nms_infos) {
         fused_info.number_of_classes += nms_info.number_of_classes;
+        assert(nms_infos[0].max_bboxes_per_class == nms_info.max_bboxes_per_class);
+        assert(nms_infos[0].bbox_size == nms_info.bbox_size);
+        assert(nms_infos[0].chunks_per_frame == nms_info.chunks_per_frame);
+        assert(nms_infos[0].burst_size == nms_info.burst_size);
+        assert(nms_infos[0].burst_type == nms_info.burst_type);
     }
-
     return fused_info;
 }
 
+Expected<std::shared_ptr<RemoveOverlappingBboxesElement>> RemoveOverlappingBboxesElement::create(
+        const net_flow::NmsPostProcessConfig nms_config, const std::string &name, hailo_pipeline_elem_stats_flags_t elem_flags,
+        std::shared_ptr<std::atomic<hailo_status>> pipeline_status, std::chrono::milliseconds timeout, hailo_vstream_stats_flags_t vstream_flags,
+        EventPtr shutdown_event, size_t buffer_pool_size, PipelineDirection pipeline_direction, bool is_last_copy_element)
+{
+    // The actual data will be in the metadata
+    auto frame_size = 0;
+    auto buffer_pool_expected = BufferPool::create(frame_size, buffer_pool_size, shutdown_event, elem_flags, vstream_flags, is_last_copy_element);
+    CHECK_EXPECTED(buffer_pool_expected, "Failed creating BufferPool for {}", name);
+    auto buffer_pool = buffer_pool_expected.release();
+
+    auto duration_collector = DurationCollector::create(elem_flags);
+    CHECK_EXPECTED(duration_collector);
+
+    auto convert_nms_removed_overlapping_elem_ptr = make_shared_nothrow<RemoveOverlappingBboxesElement>(std::move(nms_config),
+        name, duration_collector.release(), std::move(pipeline_status), buffer_pool, timeout, pipeline_direction);
+    CHECK_AS_EXPECTED(nullptr != convert_nms_removed_overlapping_elem_ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+    LOGGER__INFO("Created {}", convert_nms_removed_overlapping_elem_ptr->name());
+
+    return convert_nms_removed_overlapping_elem_ptr;
+}
+
+Expected<std::shared_ptr<RemoveOverlappingBboxesElement>> RemoveOverlappingBboxesElement::create(const net_flow::NmsPostProcessConfig nms_config,
+    const std::string &name, const ElementBuildParams &build_params, PipelineDirection pipeline_direction, bool is_last_copy_element)
+{
+    return RemoveOverlappingBboxesElement::create(nms_config, name,
+        build_params.elem_stats_flags, build_params.pipeline_status, build_params.timeout, build_params.vstream_stats_flags,
+        build_params.shutdown_event, build_params.buffer_pool_size, pipeline_direction, is_last_copy_element);
+}
+
+RemoveOverlappingBboxesElement::RemoveOverlappingBboxesElement(const net_flow::NmsPostProcessConfig &&nms_config, const std::string &name,
+                                   DurationCollector &&duration_collector,
+                                   std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+                                   BufferPoolPtr buffer_pool, std::chrono::milliseconds timeout,
+                                   PipelineDirection pipeline_direction) :
+    FilterElement(name, std::move(duration_collector), std::move(pipeline_status), pipeline_direction, buffer_pool, timeout),
+    m_nms_config(std::move(nms_config))
+{}
+
+hailo_status RemoveOverlappingBboxesElement::run_push(PipelineBuffer &&buffer, const PipelinePad &sink)
+{
+    CHECK(PipelineDirection::PUSH == m_pipeline_direction, HAILO_INVALID_OPERATION,
+        "RemoveOverlappingBboxesElement {} does not support run_push operation", name());
+    return FilterElement::run_push(std::move(buffer), sink);
+}
+
+PipelinePad &RemoveOverlappingBboxesElement::next_pad()
+{
+    if (PipelineDirection::PUSH == m_pipeline_direction){
+        return *m_sources[0].next();
+    }
+    return *m_sinks[0].prev();
+}
+
+std::string RemoveOverlappingBboxesElement::description() const
+{
+    std::stringstream element_description;
+    element_description << "(" << this->name() << ")";
+    return element_description.str();
+}
+
+Expected<PipelineBuffer> RemoveOverlappingBboxesElement::action(PipelineBuffer &&input, PipelineBuffer &&optional)
+{
+    auto buffer = m_pool->get_available_buffer(std::move(optional), m_timeout);
+    if (HAILO_SHUTDOWN_EVENT_SIGNALED == buffer.status()) {
+        return make_unexpected(buffer.status());
+    }
+    CHECK_EXPECTED(buffer, "{} (D2H) failed with status={}", name(), buffer.status());
+
+    buffer->set_metadata(input.get_metadata());
+
+    m_duration_collector.start_measurement();
+    auto detections_pipeline_data = input.get_metadata().get_additional_data<IouPipelineData>();
+
+    net_flow::NmsPostProcessOp::remove_overlapping_boxes(detections_pipeline_data->m_detections,
+        detections_pipeline_data->m_detections_classes_count, m_nms_config.nms_iou_th);
+    m_duration_collector.complete_measurement();
+
+    return buffer.release();
+}
+
 Expected<std::shared_ptr<NmsPostProcessMuxElement>> NmsPostProcessMuxElement::create(std::shared_ptr<net_flow::Op> nms_op,
-    hailo_nms_info_t nms_info, const std::string &name, std::chrono::milliseconds timeout, size_t buffer_pool_size,
+    const std::string &name, std::chrono::milliseconds timeout, size_t buffer_pool_size,
     hailo_pipeline_elem_stats_flags_t elem_flags, hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event,
-    std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
+    std::shared_ptr<std::atomic<hailo_status>> pipeline_status, PipelineDirection pipeline_direction, bool is_last_copy_element)
 {
     assert(nms_op->outputs_metadata().size() == 1);
-    auto buffer_pool = BufferPool::create(HailoRTCommon::get_nms_host_frame_size(nms_info, nms_op->outputs_metadata().begin()->second.format),
-        buffer_pool_size, shutdown_event, elem_flags, vstream_flags);
+    auto vstream_info = nms_op->metadata()->get_output_vstream_info();
+    CHECK_EXPECTED(vstream_info);
+
+    auto buffer_size = HailoRTCommon::get_nms_host_frame_size(nms_op->metadata()->get_output_vstream_info()->nms_shape,
+        nms_op->outputs_metadata().begin()->second.format);
+
+    auto buffer_pool = BufferPool::create(buffer_size, buffer_pool_size, shutdown_event, elem_flags, vstream_flags, is_last_copy_element);
     CHECK_EXPECTED(buffer_pool, "Failed creating BufferPool");
 
     auto duration_collector = DurationCollector::create(elem_flags);
     CHECK_EXPECTED(duration_collector);
 
     auto nms_elem_ptr = make_shared_nothrow<NmsPostProcessMuxElement>(nms_op, buffer_pool.release(),
-        name, timeout, duration_collector.release(), std::move(pipeline_status));
+        name, timeout, duration_collector.release(), std::move(pipeline_status), pipeline_direction);
     CHECK_AS_EXPECTED(nullptr != nms_elem_ptr, HAILO_OUT_OF_HOST_MEMORY);
 
     LOGGER__INFO("Created {}", nms_elem_ptr->name());
@@ -257,21 +554,30 @@ Expected<std::shared_ptr<NmsPostProcessMuxElement>> NmsPostProcessMuxElement::cr
 }
 
 Expected<std::shared_ptr<NmsPostProcessMuxElement>> NmsPostProcessMuxElement::create(std::shared_ptr<net_flow::Op> nms_op,
-        hailo_nms_info_t nms_info, const std::string &name, const hailo_vstream_params_t &vstream_params,
-        EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
+    const std::string &name, const ElementBuildParams &build_params, PipelineDirection pipeline_direction, bool is_last_copy_element)
+{
+    return NmsPostProcessMuxElement::create(nms_op, name, build_params.timeout,
+        build_params.buffer_pool_size, build_params.elem_stats_flags, build_params.vstream_stats_flags,
+        build_params.shutdown_event, build_params.pipeline_status, pipeline_direction, is_last_copy_element);
+}
+
+Expected<std::shared_ptr<NmsPostProcessMuxElement>> NmsPostProcessMuxElement::create(std::shared_ptr<net_flow::Op> nms_op,
+       const std::string &name, const hailo_vstream_params_t &vstream_params,
+        EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status, PipelineDirection pipeline_direction, bool is_last_copy_element)
 {
-    return NmsPostProcessMuxElement::create(nms_op, nms_info, name, std::chrono::milliseconds(vstream_params.timeout_ms),
+    return NmsPostProcessMuxElement::create(nms_op, name, std::chrono::milliseconds(vstream_params.timeout_ms),
         vstream_params.queue_size, vstream_params.pipeline_elements_stats_flags, vstream_params.vstream_stats_flags, shutdown_event,
-        pipeline_status);
+        pipeline_status, pipeline_direction, is_last_copy_element);
 }
 
 NmsPostProcessMuxElement::NmsPostProcessMuxElement(std::shared_ptr<net_flow::Op> nms_op, BufferPoolPtr &&pool,
                                                    const std::string &name, std::chrono::milliseconds timeout,
                                                    DurationCollector &&duration_collector,
-                                                   std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
-    BaseMuxElement(nms_op->inputs_metadata().size(), name, timeout, std::move(duration_collector), std::move(pipeline_status)),
-    m_nms_op(nms_op),
-    m_pool(std::move(pool))
+                                                   std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+                                                   PipelineDirection pipeline_direction) :
+    BaseMuxElement(nms_op->inputs_metadata().size(), name, timeout, std::move(duration_collector), std::move(pipeline_status),
+        std::move(pool), pipeline_direction),
+    m_nms_op(nms_op)
 {}
 
 std::vector<AccumulatorPtr> NmsPostProcessMuxElement::get_queue_size_accumulators()
@@ -296,7 +602,7 @@ Expected<PipelineBuffer> NmsPostProcessMuxElement::action(std::vector<PipelineBu
     CHECK_EXPECTED(acquired_buffer);
     outputs.insert({"", acquired_buffer.value().as_view()}); // TODO: fill with correct name
     m_duration_collector.start_measurement();
-    
+
     auto post_process_result = m_nms_op->execute(inputs, outputs);
     m_duration_collector.complete_measurement();
     CHECK_SUCCESS_AS_EXPECTED(post_process_result);
@@ -306,18 +612,18 @@ Expected<PipelineBuffer> NmsPostProcessMuxElement::action(std::vector<PipelineBu
 Expected<std::shared_ptr<NmsMuxElement>> NmsMuxElement::create(const std::vector<hailo_nms_info_t> &nms_infos,
     const std::string &name, std::chrono::milliseconds timeout, size_t buffer_pool_size,
     hailo_pipeline_elem_stats_flags_t elem_flags, hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event,
-    std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
+    std::shared_ptr<std::atomic<hailo_status>> pipeline_status, PipelineDirection pipeline_direction, bool is_last_copy_element)
 {
     const auto &fused_info = fuse_nms_info(nms_infos);
     auto buffer_pool = BufferPool::create(HailoRTCommon::get_nms_hw_frame_size(fused_info),
-        buffer_pool_size, shutdown_event, elem_flags, vstream_flags);
+        buffer_pool_size, shutdown_event, elem_flags, vstream_flags, is_last_copy_element);
     CHECK_EXPECTED(buffer_pool, "Failed creating BufferPool");
 
     auto duration_collector = DurationCollector::create(elem_flags);
     CHECK_EXPECTED(duration_collector);
 
     auto nms_elem_ptr = make_shared_nothrow<NmsMuxElement>(nms_infos, fused_info, buffer_pool.release(),
-        name, timeout, duration_collector.release(), std::move(pipeline_status));
+        name, timeout, duration_collector.release(), std::move(pipeline_status), pipeline_direction);
     CHECK_AS_EXPECTED(nullptr != nms_elem_ptr, HAILO_OUT_OF_HOST_MEMORY);
 
     LOGGER__INFO("Created {}", nms_elem_ptr->name());
@@ -326,19 +632,27 @@ Expected<std::shared_ptr<NmsMuxElement>> NmsMuxElement::create(const std::vector
 }
 
 Expected<std::shared_ptr<NmsMuxElement>> NmsMuxElement::create(const std::vector<hailo_nms_info_t> &nms_infos, const std::string &name,
-        const hailo_vstream_params_t &vstream_params, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
+        const hailo_vstream_params_t &vstream_params, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
+        PipelineDirection pipeline_direction, bool is_last_copy_element)
 {
     return NmsMuxElement::create(nms_infos, name, std::chrono::milliseconds(vstream_params.timeout_ms), vstream_params.queue_size,
-        vstream_params.pipeline_elements_stats_flags, vstream_params.vstream_stats_flags, shutdown_event, pipeline_status);
+        vstream_params.pipeline_elements_stats_flags, vstream_params.vstream_stats_flags, shutdown_event, pipeline_status, pipeline_direction,
+        is_last_copy_element);
+}
+
+Expected<std::shared_ptr<NmsMuxElement>> NmsMuxElement::create(const std::vector<hailo_nms_info_t> &nms_infos,
+    const std::string &name, const ElementBuildParams &build_params, PipelineDirection pipeline_direction, bool is_last_copy_element)
+{
+    return NmsMuxElement::create(nms_infos, name, build_params.timeout, build_params.buffer_pool_size, build_params.elem_stats_flags,
+        build_params.vstream_stats_flags, build_params.shutdown_event, build_params.pipeline_status, pipeline_direction, is_last_copy_element);
 }
 
 NmsMuxElement::NmsMuxElement(const std::vector<hailo_nms_info_t> &nms_infos, const hailo_nms_info_t &fused_nms_info, BufferPoolPtr &&pool,
                              const std::string &name, std::chrono::milliseconds timeout, DurationCollector &&duration_collector,
-                             std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
-    BaseMuxElement(nms_infos.size(), name, timeout, std::move(duration_collector), std::move(pipeline_status)),
+                             std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, PipelineDirection pipeline_direction) :
+    BaseMuxElement(nms_infos.size(), name, timeout, std::move(duration_collector), std::move(pipeline_status), std::move(pool), pipeline_direction),
     m_nms_infos(nms_infos),
-    m_fused_nms_info(fused_nms_info),
-    m_pool(std::move(pool))
+    m_fused_nms_info(fused_nms_info)
 {}
 
 const hailo_nms_info_t &NmsMuxElement::get_fused_nms_info() const
@@ -381,7 +695,8 @@ Expected<PipelineBuffer> NmsMuxElement::action(std::vector<PipelineBuffer> &&inp
 
 Expected<std::shared_ptr<TransformDemuxElement>> TransformDemuxElement::create(std::shared_ptr<OutputDemuxer> demuxer,
     const std::string &name, std::chrono::milliseconds timeout, size_t buffer_pool_size, hailo_pipeline_elem_stats_flags_t elem_flags,
-    hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
+    hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
+    PipelineDirection pipeline_direction)
 {
     std::vector<BufferPoolPtr> pools;
     pools.reserve(demuxer->get_edges_stream_info().size());
@@ -396,22 +711,48 @@ Expected<std::shared_ptr<TransformDemuxElement>> TransformDemuxElement::create(s
     CHECK_EXPECTED(duration_collector);
 
     auto demux_elem_ptr = make_shared_nothrow<TransformDemuxElement>(demuxer, std::move(pools), name, timeout,
-        duration_collector.release(), std::move(pipeline_status));
+        duration_collector.release(), std::move(pipeline_status), pipeline_direction);
     CHECK_AS_EXPECTED(nullptr != demux_elem_ptr, HAILO_OUT_OF_HOST_MEMORY);
 
     return demux_elem_ptr;
 }
 
+Expected<std::shared_ptr<TransformDemuxElement>> TransformDemuxElement::create(std::shared_ptr<OutputDemuxer> demuxer,
+    const std::string &name, const ElementBuildParams &build_params,
+    PipelineDirection pipeline_direction)
+{
+    return TransformDemuxElement::create(demuxer, name, build_params.timeout, build_params.buffer_pool_size, build_params.elem_stats_flags,
+        build_params.vstream_stats_flags, build_params.shutdown_event, build_params.pipeline_status, pipeline_direction);
+}
+
 TransformDemuxElement::TransformDemuxElement(std::shared_ptr<OutputDemuxer> demuxer, std::vector<BufferPoolPtr> &&pools,
                                              const std::string &name, std::chrono::milliseconds timeout,
                                              DurationCollector &&duration_collector,
-                                             std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
+                                             std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+                                             PipelineDirection pipeline_direction) :
     BaseDemuxElement(demuxer->get_edges_stream_info().size(), name, timeout, std::move(duration_collector),
-                     std::move(pipeline_status)),
-    m_demuxer(demuxer),
-    m_pools(std::move(pools))
+                     std::move(pipeline_status), std::move(pools), pipeline_direction),
+    m_demuxer(demuxer)
+{}
+
+PixBufferElement::PixBufferElement(const std::string &name, std::chrono::milliseconds timeout,
+    DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+    size_t sources_count, hailo_format_order_t order) :
+        BaseDemuxElement(sources_count, name, timeout, std::move(duration_collector), std::move(pipeline_status),
+            {}, PipelineDirection::PUSH),
+            m_order(order)
 {}
 
+Expected<std::shared_ptr<PixBufferElement>> PixBufferElement::create(const std::string &name,
+    std::chrono::milliseconds timeout, DurationCollector &&duration_collector,
+    std::shared_ptr<std::atomic<hailo_status>> pipeline_status, size_t sources_count, hailo_format_order_t order)
+{
+    auto pix_buffer_splitter_elem_ptr = make_shared_nothrow<PixBufferElement>(name, timeout,
+        std::move(duration_collector), std::move(pipeline_status), sources_count, order);
+    CHECK_AS_EXPECTED(nullptr != pix_buffer_splitter_elem_ptr, HAILO_OUT_OF_HOST_MEMORY);
+    return pix_buffer_splitter_elem_ptr;
+}
+
 std::vector<AccumulatorPtr> TransformDemuxElement::get_queue_size_accumulators()
 {
     std::vector<AccumulatorPtr> result;
@@ -439,7 +780,7 @@ Expected<std::vector<PipelineBuffer>> TransformDemuxElement::action(PipelineBuff
         }
         CHECK_EXPECTED(acquired_buffer, "Failed to acquire buffer");
         outputs.emplace_back(acquired_buffer.release());
-        
+
         raw_buffers.push_back(outputs.back().as_view());
     }
 
@@ -451,122 +792,207 @@ Expected<std::vector<PipelineBuffer>> TransformDemuxElement::action(PipelineBuff
     return outputs;
 }
 
+Expected<std::vector<PipelineBuffer>> PixBufferElement::action(PipelineBuffer &&input)
+{
+    // splits the planes into buffers
+    m_duration_collector.start_measurement();
+    std::vector<PipelineBuffer> outputs;
+
+    auto input_pix_buffer_expected = input.as_hailo_pix_buffer(m_order);
+    CHECK_EXPECTED(input_pix_buffer_expected);
+    auto input_pix_buffer = input_pix_buffer_expected.release();
+
+    if (PipelineBuffer::Type::FLUSH == input.get_type()) {
+        for (uint32_t i = 0; i < input_pix_buffer.number_of_planes; i++) {
+            outputs.emplace_back(PipelineBuffer(PipelineBuffer::Type::FLUSH));
+        }
+    } else {
+        for (uint32_t i = 0; i < input_pix_buffer.number_of_planes; i++){
+            outputs.emplace_back(MemoryView(input_pix_buffer.planes[i].user_ptr, input_pix_buffer.planes[i].bytes_used));
+        }
+    }
+
+    m_duration_collector.complete_measurement();
+    return outputs;
+}
+
 Expected<std::shared_ptr<ArgmaxPostProcessElement>> ArgmaxPostProcessElement::create(std::shared_ptr<net_flow::Op> argmax_op,
     const std::string &name, hailo_pipeline_elem_stats_flags_t elem_flags,
-    std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
+    std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
+    size_t buffer_pool_size, std::chrono::milliseconds timeout, hailo_vstream_stats_flags_t vstream_flags,
+    EventPtr shutdown_event, PipelineDirection pipeline_direction, bool is_last_copy_element)
 {
+    auto out_metadata = argmax_op->outputs_metadata().begin()->second;
+    auto buffer_size = HailoRTCommon::get_frame_size(out_metadata.shape, out_metadata.format);
+    auto buffer_pool = BufferPool::create(buffer_size, buffer_pool_size, shutdown_event, elem_flags, vstream_flags, is_last_copy_element);
+    CHECK_EXPECTED(buffer_pool, "Failed creating BufferPool for {}", name);
+
     auto duration_collector = DurationCollector::create(elem_flags);
     CHECK_EXPECTED(duration_collector);
     auto argmax_elem_ptr = make_shared_nothrow<ArgmaxPostProcessElement>(argmax_op,
-        name, duration_collector.release(), std::move(pipeline_status));
+        name, duration_collector.release(), std::move(pipeline_status), timeout, buffer_pool.release(), pipeline_direction);
     CHECK_AS_EXPECTED(nullptr != argmax_elem_ptr, HAILO_OUT_OF_HOST_MEMORY);
     LOGGER__INFO("Created {}", argmax_elem_ptr->name());
     return argmax_elem_ptr;
 }
 
+Expected<std::shared_ptr<ArgmaxPostProcessElement>> ArgmaxPostProcessElement::create(std::shared_ptr<net_flow::Op> argmax_op,
+    const std::string &name, const ElementBuildParams &build_params, PipelineDirection pipeline_direction, bool is_last_copy_element)
+{
+    return ArgmaxPostProcessElement::create(argmax_op, name,
+        build_params.elem_stats_flags, build_params.pipeline_status, build_params.buffer_pool_size, build_params.timeout,
+        build_params.vstream_stats_flags, build_params.shutdown_event, pipeline_direction, is_last_copy_element);
+}
+
 ArgmaxPostProcessElement::ArgmaxPostProcessElement(std::shared_ptr<net_flow::Op> argmax_op, const std::string &name,
-                                   DurationCollector &&duration_collector,
-                                   std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
-    FilterElement(name, std::move(duration_collector), std::move(pipeline_status)),
+    DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+    std::chrono::milliseconds timeout, BufferPoolPtr buffer_pool, PipelineDirection pipeline_direction) :
+    FilterElement(name, std::move(duration_collector), std::move(pipeline_status), pipeline_direction, buffer_pool, timeout),
     m_argmax_op(argmax_op)
 {}
 
-hailo_status ArgmaxPostProcessElement::run_push(PipelineBuffer &&/*buffer*/)
+Expected<PipelineBuffer> ArgmaxPostProcessElement::run_pull(PipelineBuffer &&optional, const PipelinePad &source)
 {
-    LOGGER__ERROR("ArgmaxPostProcessElement does not support run_push operation");
-    return HAILO_INVALID_OPERATION;
+    CHECK_AS_EXPECTED(m_pipeline_direction == PipelineDirection::PULL, HAILO_INVALID_OPERATION,
+        "ArgmaxPostProcessElement {} does not support run_pull operation", name());
+    return FilterElement::run_pull(std::move(optional), source);
+}
+
+hailo_status ArgmaxPostProcessElement::run_push(PipelineBuffer &&buffer, const PipelinePad &sink)
+{
+    CHECK(PipelineDirection::PUSH == m_pipeline_direction, HAILO_INVALID_OPERATION,
+        "ArgmaxPostProcessElement {} does not support run_push operation", name());
+    return FilterElement::run_push(std::move(buffer), sink);
 }
 
 PipelinePad &ArgmaxPostProcessElement::next_pad()
 {
-    // Note: The next elem to be run is upstream from this elem (i.e. buffers are pulled)
+    if (PipelineDirection::PUSH == m_pipeline_direction){
+        return *m_sources[0].next();
+    }
     return *m_sinks[0].prev();
 }
 
 std::string ArgmaxPostProcessElement::description() const
 {
     std::stringstream element_description;
-    element_description << "(" << this->name() << " | " << m_argmax_op->get_op_description() << ")";
+    element_description << "(" << this->name() << " | " << m_argmax_op->metadata()->get_op_description() << ")";
     return element_description.str();
 }
 
 Expected<PipelineBuffer> ArgmaxPostProcessElement::action(PipelineBuffer &&input, PipelineBuffer &&optional)
 {
+    auto buffer = m_pool->get_available_buffer(std::move(optional), m_timeout);
+    if (HAILO_SHUTDOWN_EVENT_SIGNALED == buffer.status()) {
+        return make_unexpected(buffer.status());
+    }
+    CHECK_EXPECTED(buffer, "{} (D2H) failed with status={}", name(), buffer.status());
+
     std::map<std::string, MemoryView> inputs;
     std::map<std::string, MemoryView> outputs;
     auto &input_name = m_argmax_op->inputs_metadata().begin()->first;
     auto &output_name = m_argmax_op->outputs_metadata().begin()->first;
     inputs.insert({input_name, input.as_view()});
-    outputs.insert({output_name, optional.as_view()});
+    outputs.insert({output_name, buffer->as_view()});
     m_duration_collector.start_measurement();
     auto post_process_result = m_argmax_op->execute(inputs, outputs);
     CHECK_SUCCESS_AS_EXPECTED(post_process_result);
     m_duration_collector.complete_measurement();
 
-    return std::move(optional);
+    return buffer.release();
 }
 
 Expected<std::shared_ptr<SoftmaxPostProcessElement>> SoftmaxPostProcessElement::create(std::shared_ptr<net_flow::Op> softmax_op,
     const std::string &name, hailo_pipeline_elem_stats_flags_t elem_flags,
-    std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
+    std::shared_ptr<std::atomic<hailo_status>> pipeline_status, size_t buffer_pool_size, std::chrono::milliseconds timeout,
+    hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event, PipelineDirection pipeline_direction, bool is_last_copy_element)
 {
+    auto out_metadata = softmax_op->outputs_metadata().begin()->second;
+    auto buffer_size = HailoRTCommon::get_frame_size(out_metadata.shape, out_metadata.format);
+    auto buffer_pool = BufferPool::create(buffer_size, buffer_pool_size, shutdown_event, elem_flags, vstream_flags, is_last_copy_element);
+    CHECK_EXPECTED(buffer_pool, "Failed creating BufferPool for {}", name);
+
     auto duration_collector = DurationCollector::create(elem_flags);
     CHECK_EXPECTED(duration_collector);
     auto softmax_elem_ptr = make_shared_nothrow<SoftmaxPostProcessElement>(softmax_op,
-        name, duration_collector.release(), std::move(pipeline_status));
+        name, duration_collector.release(), std::move(pipeline_status), timeout, buffer_pool.release(), pipeline_direction);
     CHECK_AS_EXPECTED(nullptr != softmax_elem_ptr, HAILO_OUT_OF_HOST_MEMORY);
     LOGGER__INFO("Created {}", softmax_elem_ptr->name());
     return softmax_elem_ptr;
 }
 
+Expected<std::shared_ptr<SoftmaxPostProcessElement>> SoftmaxPostProcessElement::create(std::shared_ptr<net_flow::Op> softmax_op,
+    const std::string &name, const ElementBuildParams &build_params, PipelineDirection pipeline_direction, bool is_last_copy_element)
+{
+    return SoftmaxPostProcessElement::create(softmax_op, name, build_params.elem_stats_flags, build_params.pipeline_status, build_params.buffer_pool_size,
+        build_params.timeout, build_params.vstream_stats_flags, build_params.shutdown_event, pipeline_direction, is_last_copy_element);
+}
+
 SoftmaxPostProcessElement::SoftmaxPostProcessElement(std::shared_ptr<net_flow::Op> softmax_op, const std::string &name,
-                                   DurationCollector &&duration_collector,
-                                   std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status) :
-    FilterElement(name, std::move(duration_collector), std::move(pipeline_status)),
+    DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+    std::chrono::milliseconds timeout, BufferPoolPtr buffer_pool, PipelineDirection pipeline_direction) :
+    FilterElement(name, std::move(duration_collector), std::move(pipeline_status), pipeline_direction, buffer_pool, timeout),
     m_softmax_op(softmax_op)
 {}
 
-hailo_status SoftmaxPostProcessElement::run_push(PipelineBuffer &&/*buffer*/)
+Expected<PipelineBuffer> SoftmaxPostProcessElement::run_pull(PipelineBuffer &&optional, const PipelinePad &source)
 {
-    LOGGER__ERROR("SoftmaxPostProcessElement does not support run_push operation");
-    return HAILO_INVALID_OPERATION;
+    CHECK_AS_EXPECTED(m_pipeline_direction == PipelineDirection::PULL, HAILO_INVALID_OPERATION,
+        "SoftmaxPostProcessElement {} does not support run_pull operation", name());
+    return FilterElement::run_pull(std::move(optional), source);
+}
+
+hailo_status SoftmaxPostProcessElement::run_push(PipelineBuffer &&buffer, const PipelinePad &sink)
+{
+    CHECK(PipelineDirection::PUSH == m_pipeline_direction, HAILO_INVALID_OPERATION,
+        "SoftmaxPostProcessElement {} does not support run_push operation", name());
+    return FilterElement::run_push(std::move(buffer), sink);
 }
 
 PipelinePad &SoftmaxPostProcessElement::next_pad()
 {
-    // Note: The next elem to be run is upstream from this elem (i.e. buffers are pulled)
+    if (PipelineDirection::PUSH == m_pipeline_direction){
+        return *m_sources[0].next();
+    }
     return *m_sinks[0].prev();
 }
 
 std::string SoftmaxPostProcessElement::description() const
 {
     std::stringstream element_description;
-    element_description << "(" << this->name() << " | " << m_softmax_op->get_op_description() << ")";
+    element_description << "(" << this->name() << " | " << m_softmax_op->metadata()->get_op_description() << ")";
     return element_description.str();
 }
 
 Expected<PipelineBuffer> SoftmaxPostProcessElement::action(PipelineBuffer &&input, PipelineBuffer &&optional)
 {
+    auto buffer = m_pool->get_available_buffer(std::move(optional), m_timeout);
+    if (HAILO_SHUTDOWN_EVENT_SIGNALED == buffer.status()) {
+        return make_unexpected(buffer.status());
+    }
+    CHECK_EXPECTED(buffer, "{} (D2H) failed with status={}", name(), buffer.status());
+
     std::map<std::string, MemoryView> inputs;
     std::map<std::string, MemoryView> outputs;
     auto &input_name = m_softmax_op->inputs_metadata().begin()->first;
     auto &output_name = m_softmax_op->outputs_metadata().begin()->first;
     inputs.insert({input_name, input.as_view()});
-    outputs.insert({output_name, optional.as_view()});
+    outputs.insert({output_name, buffer->as_view()});
     m_duration_collector.start_measurement();
     auto post_process_result = m_softmax_op->execute(inputs, outputs);
     CHECK_SUCCESS_AS_EXPECTED(post_process_result);
     m_duration_collector.complete_measurement();
 
-    return std::move(optional);
+    return buffer.release();
 }
 
-BaseVStream::BaseVStream(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
+BaseVStream::BaseVStream(const hailo_vstream_info_t &vstream_info, const std::vector<hailo_quant_info_t> &quant_infos, const hailo_vstream_params_t &vstream_params,
                          std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
                          std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
                          EventPtr shutdown_event, AccumulatorPtr pipeline_latency_accumulator, EventPtr &&core_op_activated_event,
                          hailo_status &output_status) :
     m_vstream_info(vstream_info),
+    m_quant_infos(quant_infos),
     m_vstream_params(vstream_params),
     m_measure_pipeline_latency((vstream_params.vstream_stats_flags & HAILO_VSTREAM_STATS_MEASURE_LATENCY) != 0),
     m_entry_element(pipeline_entry),
@@ -607,6 +1033,7 @@ BaseVStream& BaseVStream::operator=(BaseVStream &&other) noexcept
         // operator= is used only for vstream creation BEFORE activation. otherwise we should deactivate vstream here
         assert(!m_is_activated);
         m_vstream_info = std::move(other.m_vstream_info);
+        m_quant_infos = std::move(other.m_quant_infos);
         m_vstream_params = std::move(other.m_vstream_params);
         m_measure_pipeline_latency = std::move(other.m_measure_pipeline_latency);
         m_entry_element = std::move(other.m_entry_element);
@@ -708,12 +1135,24 @@ hailo_status BaseVStream::stop_and_clear()
     return status;
 }
 
+hailo_status BaseVStream::before_fork()
+{
+    return HAILO_SUCCESS;
+}
+
+hailo_status BaseVStream::after_fork_in_parent()
+{
+    return HAILO_SUCCESS;
+}
+
+hailo_status BaseVStream::after_fork_in_child()
+{
+    return HAILO_SUCCESS;
+}
+
 size_t BaseVStream::get_frame_size() const
 {
-    if (HAILO_FORMAT_ORDER_HAILO_NMS == m_vstream_info.format.order) {
-        return HailoRTCommon::get_nms_host_frame_size(m_vstream_info.nms_shape, m_vstream_params.user_buffer_format);
-    }
-    return HailoRTCommon::get_frame_size(m_vstream_info.shape, m_vstream_params.user_buffer_format);
+    return HailoRTCommon::get_frame_size(m_vstream_info, m_vstream_params.user_buffer_format);
 }
 
 const hailo_vstream_info_t &BaseVStream::get_info() const
@@ -721,6 +1160,11 @@ const hailo_vstream_info_t &BaseVStream::get_info() const
     return m_vstream_info;
 }
 
+const std::vector<hailo_quant_info_t> &BaseVStream::get_quant_infos() const
+{
+    return m_quant_infos;
+}
+
 const hailo_format_t &BaseVStream::get_user_buffer_format() const
 {
     return m_vstream_params.user_buffer_format;
@@ -762,13 +1206,13 @@ const std::vector<std::shared_ptr<PipelineElement>> &BaseVStream::get_pipeline()
     return m_pipeline;
 }
 
-Expected<InputVStream> InputVStream::create(const hailo_vstream_info_t &vstream_info,
+Expected<InputVStream> InputVStream::create(const hailo_vstream_info_t &vstream_info, const std::vector<hailo_quant_info_t> &quant_infos,
         const hailo_vstream_params_t &vstream_params, std::shared_ptr<PipelineElement> pipeline_entry,
         std::shared_ptr<SinkElement> pipeline_exit, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
         std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, EventPtr core_op_activated_event,
         AccumulatorPtr pipeline_latency_accumulator)
 {
-    auto vstream_internal = InputVStreamInternal::create(vstream_info, vstream_params, pipeline_entry, pipeline_exit,
+    auto vstream_internal = InputVStreamInternal::create(vstream_info, quant_infos, vstream_params, pipeline_entry, pipeline_exit,
         std::move(pipeline), std::move(pipeline_status), shutdown_event, core_op_activated_event, pipeline_latency_accumulator);
     CHECK_EXPECTED(vstream_internal);
 
@@ -781,6 +1225,57 @@ hailo_status InputVStream::write(const MemoryView &buffer)
     return m_vstream->write(std::move(buffer));
 }
 
+hailo_status InputVStream::write(const hailo_pix_buffer_t &buffer)
+{
+    // If only one plane is passed, address it s memview
+    if (1 == buffer.number_of_planes) {
+        return write(MemoryView(buffer.planes[0].user_ptr, buffer.planes[0].bytes_used));
+    }
+
+    // If model is multi planar, pass the pix buffer
+    if (m_vstream->is_multi_planar()){
+        return m_vstream->write(buffer);
+    }
+
+    // Other cases - allocate a contiguous buffer to hold all plains
+    bool is_contiguous = true;
+    uint32_t planes_total_size = 0;
+    /* assuming contiguous memory. If not, this will be overriden by the coming loop */
+    void *data_ptr = buffer.planes[0].user_ptr;
+
+    /* calculate total data size by summing the planes' sizes and check if the planes are contiguous */
+    for (uint32_t plane_index = 0; plane_index < buffer.number_of_planes; plane_index++){
+        auto &plane = buffer.planes[plane_index];
+        planes_total_size += plane.bytes_used;
+
+        if (is_contiguous && (plane_index + 1 < buffer.number_of_planes)){
+            auto &next_plane = buffer.planes[plane_index+1];
+            if ((static_cast<uint8_t*>(plane.user_ptr) + plane.bytes_used) != next_plane.user_ptr){
+                is_contiguous = false;
+            }
+        }
+    }
+
+    BufferPtr contiguous_buffer = nullptr;
+    if (! is_contiguous) {
+        /* copy to a contiguous buffer, and then pass it */
+        auto expected_buffer = Buffer::create_shared(planes_total_size);
+        CHECK_EXPECTED_AS_STATUS(expected_buffer);
+        contiguous_buffer = expected_buffer.release();
+        uint32_t copied_bytes = 0;
+
+        for (uint32_t plane_index = 0; plane_index < buffer.number_of_planes; plane_index++){
+            auto &plane = buffer.planes[plane_index];
+            std::memcpy(contiguous_buffer->data() + copied_bytes, plane.user_ptr, plane.bytes_used);
+            copied_bytes += plane.bytes_used;
+        }
+
+        data_ptr = contiguous_buffer->data();
+    }
+
+    return m_vstream->write(std::move(MemoryView(data_ptr, planes_total_size)));
+}
+
 hailo_status InputVStream::flush()
 {
     return m_vstream->flush();
@@ -834,6 +1329,11 @@ const hailo_vstream_info_t &InputVStream::get_info() const
     return m_vstream->get_info();
 }
 
+const std::vector<hailo_quant_info_t> &InputVStream::get_quant_infos() const
+{
+    return m_vstream->get_quant_infos();
+}
+
 const hailo_format_t &InputVStream::get_user_buffer_format() const
 {
     return m_vstream->get_user_buffer_format();
@@ -894,6 +1394,17 @@ std::string InputVStream::get_pipeline_description() const
     return m_vstream->get_pipeline_description();
 }
 
+bool InputVStream::is_aborted()
+{
+    return m_vstream->is_aborted();
+}
+
+bool InputVStream::is_multi_planar()
+{
+    return m_vstream->is_multi_planar();
+}
+
+
 hailo_status InputVStream::before_fork()
 {
     return m_vstream->before_fork();
@@ -909,20 +1420,15 @@ hailo_status InputVStream::after_fork_in_child()
     return m_vstream->after_fork_in_child();
 }
 
-bool InputVStream::is_aborted()
-{
-    return m_vstream->is_aborted();
-}
-
 InputVStream::InputVStream(std::shared_ptr<InputVStreamInternal> vstream) : m_vstream(std::move(vstream)) {}
 
 Expected<OutputVStream> OutputVStream::create(
-        const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
+        const hailo_vstream_info_t &vstream_info, const std::vector<hailo_quant_info_t> &quant_infos, const hailo_vstream_params_t &vstream_params,
         std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
         std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event,
         EventPtr core_op_activated_event, AccumulatorPtr pipeline_latency_accumulator)
 {
-    auto vstream_internal = OutputVStreamInternal::create(vstream_info, vstream_params, pipeline_entry,
+    auto vstream_internal = OutputVStreamInternal::create(vstream_info, quant_infos, vstream_params, pipeline_entry,
         std::move(pipeline), std::move(pipeline_status), shutdown_event, core_op_activated_event, pipeline_latency_accumulator);
     CHECK_EXPECTED(vstream_internal);
 
@@ -983,6 +1489,11 @@ const hailo_vstream_info_t &OutputVStream::get_info() const
     return m_vstream->get_info();
 }
 
+const std::vector<hailo_quant_info_t> &OutputVStream::get_quant_infos() const
+{
+    return m_vstream->get_quant_infos();
+}
+
 const hailo_format_t &OutputVStream::get_user_buffer_format() const
 {
     return m_vstream->get_user_buffer_format();
@@ -1043,6 +1554,11 @@ std::string OutputVStream::get_pipeline_description() const
     return m_vstream->get_pipeline_description();
 }
 
+bool OutputVStream::is_aborted()
+{
+    return m_vstream->is_aborted();
+}
+
 hailo_status OutputVStream::before_fork()
 {
     return m_vstream->before_fork();
@@ -1058,9 +1574,19 @@ hailo_status OutputVStream::after_fork_in_child()
     return m_vstream->after_fork_in_child();
 }
 
-bool OutputVStream::is_aborted()
+hailo_status OutputVStream::set_nms_score_threshold(float32_t threshold)
 {
-    return m_vstream->is_aborted();
+    return m_vstream->set_nms_score_threshold(threshold);
+}
+
+hailo_status OutputVStream::set_nms_iou_threshold(float32_t threshold)
+{
+    return m_vstream->set_nms_iou_threshold(threshold);
+}
+
+hailo_status OutputVStream::set_nms_max_proposals_per_class(uint32_t max_proposals_per_class)
+{
+    return m_vstream->set_nms_max_proposals_per_class(max_proposals_per_class);
 }
 
 OutputVStream::OutputVStream(std::shared_ptr<OutputVStreamInternal> vstream) : m_vstream(std::move(vstream)) {}
@@ -1110,59 +1636,65 @@ std::map<std::string, std::vector<AccumulatorPtr>> get_pipeline_queue_size_accum
 }
 
 Expected<std::shared_ptr<InputVStreamInternal>> InputVStreamInternal::create(const hailo_vstream_info_t &vstream_info,
-    const hailo_vstream_params_t &vstream_params, std::shared_ptr<PipelineElement> pipeline_entry,
+    const std::vector<hailo_quant_info_t> &quant_infos, const hailo_vstream_params_t &vstream_params, std::shared_ptr<PipelineElement> pipeline_entry,
     std::shared_ptr<SinkElement> pipeline_exit, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
     std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, EventPtr core_op_activated_event,
     AccumulatorPtr pipeline_latency_accumulator)
 {
-    auto vstream = InputVStreamImpl::create(vstream_info, vstream_params, pipeline_entry, pipeline_exit,
+    auto vstream = InputVStreamImpl::create(vstream_info, quant_infos, vstream_params, pipeline_entry, pipeline_exit,
         std::move(pipeline), std::move(pipeline_status), shutdown_event, core_op_activated_event, pipeline_latency_accumulator);
     CHECK_EXPECTED(vstream);
     auto vstream_ptr = std::shared_ptr<InputVStreamInternal>(vstream.release());
     return vstream_ptr;
 }
 
-InputVStreamInternal::InputVStreamInternal(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
+InputVStreamInternal::InputVStreamInternal(const hailo_vstream_info_t &vstream_info, const std::vector<hailo_quant_info_t> &quant_infos, const hailo_vstream_params_t &vstream_params,
                          std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
                          std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
                          EventPtr shutdown_event, AccumulatorPtr pipeline_latency_accumulator, EventPtr &&core_op_activated_event,
                          hailo_status &output_status) :
-    BaseVStream(vstream_info, vstream_params, pipeline_entry, std::move(pipeline), std::move(pipeline_status),
+    BaseVStream(vstream_info, quant_infos, vstream_params, pipeline_entry, std::move(pipeline), std::move(pipeline_status),
                 shutdown_event, pipeline_latency_accumulator, std::move(core_op_activated_event), output_status){}
 
 Expected<std::shared_ptr<InputVStreamImpl>> InputVStreamImpl::create(const hailo_vstream_info_t &vstream_info,
-    const hailo_vstream_params_t &vstream_params, std::shared_ptr<PipelineElement> pipeline_entry,
+    const std::vector<hailo_quant_info_t> &quant_infos, const hailo_vstream_params_t &vstream_params, std::shared_ptr<PipelineElement> pipeline_entry,
     std::shared_ptr<SinkElement> pipeline_exit, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
     std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, EventPtr core_op_activated_event,
     AccumulatorPtr pipeline_latency_accumulator)
 {
     hailo_status status = HAILO_UNINITIALIZED;
-
-    if (nullptr != pipeline_latency_accumulator) {
-        pipeline_exit->sink().set_push_complete_callback([pipeline_latency_accumulator](const PipelineBuffer::Metadata& metadata) {
-                const auto duration_sec = std::chrono::duration_cast<std::chrono::duration<double>>(
-                    std::chrono::steady_clock::now() - metadata.get_start_time()).count();
-                pipeline_latency_accumulator->add_data_point(duration_sec);
-            });
+
+    if (nullptr != pipeline_latency_accumulator) {
+        if (pipeline_exit) {
+            pipeline_exit->sink().set_push_complete_callback([pipeline_latency_accumulator](const PipelineBuffer::Metadata& metadata) {
+                    const auto duration_sec = std::chrono::duration_cast<std::chrono::duration<double>>(
+                        std::chrono::steady_clock::now() - metadata.get_start_time()).count();
+                    pipeline_latency_accumulator->add_data_point(duration_sec);
+                });
+        }
     }
 
-    auto vstream_ptr = std::shared_ptr<InputVStreamImpl>(new InputVStreamImpl(vstream_info, vstream_params, std::move(pipeline_entry), std::move(pipeline),
+    auto vstream_ptr = std::shared_ptr<InputVStreamImpl>(new InputVStreamImpl(vstream_info, quant_infos, vstream_params, std::move(pipeline_entry), std::move(pipeline),
         std::move(pipeline_status), shutdown_event, pipeline_latency_accumulator, std::move(core_op_activated_event), status));
     CHECK_SUCCESS_AS_EXPECTED(status, "Failed to create virtual stream");
 
     return vstream_ptr;
 }
 
-InputVStreamImpl::InputVStreamImpl(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
+InputVStreamImpl::InputVStreamImpl(const hailo_vstream_info_t &vstream_info, const std::vector<hailo_quant_info_t> &quant_infos, const hailo_vstream_params_t &vstream_params,
     std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
     std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, AccumulatorPtr pipeline_latency_accumulator,
     EventPtr core_op_activated_event, hailo_status &output_status) :
-    InputVStreamInternal(vstream_info, vstream_params, pipeline_entry, std::move(pipeline), std::move(pipeline_status),
+    InputVStreamInternal(vstream_info, quant_infos, vstream_params, pipeline_entry, std::move(pipeline), std::move(pipeline_status),
         shutdown_event, pipeline_latency_accumulator, std::move(core_op_activated_event), output_status)
 {
+    // TODO: propagate a flag instead of using dynamic_pointer_cast (will be disabled when we'll disable RTTI)
+    m_is_multi_planar = (nullptr != std::dynamic_pointer_cast<PixBufferElement>(pipeline_entry));
+
     if (HAILO_SUCCESS != output_status) {
         return;
     }
+
     LOGGER__INFO("Creating {}...", name());
 }
 
@@ -1180,7 +1712,30 @@ hailo_status InputVStreamImpl::write(const MemoryView &buffer)
             "Trying to write to vstream {} before its network group is activated", name());
     }
 
-    auto status = m_entry_element->run_push(PipelineBuffer(buffer, m_measure_pipeline_latency));
+    assert(1 == m_entry_element->sinks().size());
+    auto status = m_entry_element->sinks()[0].run_push(PipelineBuffer(buffer, false, nullptr, m_measure_pipeline_latency));
+    if (HAILO_SHUTDOWN_EVENT_SIGNALED == status) {
+        LOGGER__INFO("Sending to VStream was shutdown!");
+        status = m_pipeline_status->load();
+    }
+    if (HAILO_STREAM_ABORTED_BY_USER == status) {
+        LOGGER__INFO("Sending to VStream was aborted!");
+        return HAILO_STREAM_ABORTED_BY_USER;
+    }
+    return status;
+}
+
+hailo_status InputVStreamImpl::write(const hailo_pix_buffer_t &buffer)
+{
+    if (nullptr != m_core_op_activated_event) {
+        CHECK(m_is_activated, HAILO_VSTREAM_PIPELINE_NOT_ACTIVATED, "Failed to write buffer! Virtual stream {} is not activated!", name());
+        auto status = m_core_op_activated_event->wait(std::chrono::milliseconds(0));
+        CHECK(HAILO_TIMEOUT != status, HAILO_NETWORK_GROUP_NOT_ACTIVATED,
+            "Trying to write to vstream {} before its network group is activated", name());
+    }
+
+    assert(1 == m_entry_element->sinks().size());
+    auto status = m_entry_element->sinks()[0].run_push(PipelineBuffer(buffer));
     if (HAILO_SHUTDOWN_EVENT_SIGNALED == status) {
         LOGGER__INFO("Sending to VStream was shutdown!");
         status = m_pipeline_status->load();
@@ -1194,7 +1749,8 @@ hailo_status InputVStreamImpl::write(const MemoryView &buffer)
 
 hailo_status InputVStreamImpl::flush()
 {
-    auto status = m_entry_element->run_push(PipelineBuffer(PipelineBuffer::Type::FLUSH));
+    assert(1 == m_entry_element->sinks().size());
+    auto status =  m_entry_element->sinks()[0].run_push(PipelineBuffer(PipelineBuffer::Type::FLUSH));
     CHECK_SUCCESS(status);
 
     status = m_entry_element->flush();
@@ -1203,34 +1759,39 @@ hailo_status InputVStreamImpl::flush()
     return HAILO_SUCCESS;
 }
 
+bool InputVStreamImpl::is_multi_planar() const
+{
+    return m_is_multi_planar;
+}
+
 #ifdef HAILO_SUPPORT_MULTI_PROCESS
-Expected<std::shared_ptr<InputVStreamClient>> InputVStreamClient::create(uint32_t input_vstream_handle)
+Expected<std::shared_ptr<InputVStreamClient>> InputVStreamClient::create(VStreamIdentifier &&identifier)
 {
     grpc::ChannelArguments ch_args;
     ch_args.SetMaxReceiveMessageSize(-1);
-    auto channel = grpc::CreateCustomChannel(HAILORT_SERVICE_DEFAULT_ADDR, grpc::InsecureChannelCredentials(), ch_args);
+    auto channel = grpc::CreateCustomChannel(hailort::HAILORT_SERVICE_ADDRESS, grpc::InsecureChannelCredentials(), ch_args);
     CHECK_AS_EXPECTED(channel != nullptr, HAILO_INTERNAL_FAILURE);
 
     auto client = make_unique_nothrow<HailoRtRpcClient>(channel);
     CHECK_AS_EXPECTED(client != nullptr, HAILO_OUT_OF_HOST_MEMORY);
 
-    auto user_buffer_format = client->InputVStream_get_user_buffer_format(input_vstream_handle);
+    auto user_buffer_format = client->InputVStream_get_user_buffer_format(identifier);
     CHECK_EXPECTED(user_buffer_format);
 
-    auto vstream_info = client->InputVStream_get_info(input_vstream_handle);
+    auto vstream_info = client->InputVStream_get_info(identifier);
     CHECK_EXPECTED(vstream_info);
 
-    return std::shared_ptr<InputVStreamClient>(new InputVStreamClient(std::move(client), std::move(input_vstream_handle),
+    return std::shared_ptr<InputVStreamClient>(new InputVStreamClient(std::move(client), std::move(identifier),
         user_buffer_format.release(), vstream_info.release()));
 }
 
-InputVStreamClient::InputVStreamClient(std::unique_ptr<HailoRtRpcClient> client, uint32_t input_vstream_handle, hailo_format_t &&user_buffer_format, 
-    hailo_vstream_info_t &&info)
-    : m_client(std::move(client)), m_handle(std::move(input_vstream_handle)), m_user_buffer_format(user_buffer_format), m_info(info) {}
+InputVStreamClient::InputVStreamClient(std::unique_ptr<HailoRtRpcClient> client, VStreamIdentifier &&identifier, hailo_format_t &&user_buffer_format,
+    hailo_vstream_info_t &&info) :
+        m_client(std::move(client)), m_identifier(std::move(identifier)), m_user_buffer_format(user_buffer_format), m_info(info) {}
 
 InputVStreamClient::~InputVStreamClient()
 {
-    auto reply = m_client->InputVStream_release(m_handle, OsUtils::get_curr_pid());
+    auto reply = m_client->InputVStream_release(m_identifier, OsUtils::get_curr_pid());
     if (reply != HAILO_SUCCESS) {
         LOGGER__CRITICAL("InputVStream_release failed!");
     }
@@ -1238,12 +1799,27 @@ InputVStreamClient::~InputVStreamClient()
 
 hailo_status InputVStreamClient::write(const MemoryView &buffer)
 {
-    return m_client->InputVStream_write(m_handle, buffer);
+    return m_client->InputVStream_write(m_identifier, buffer);
+}
+
+hailo_status InputVStreamClient::write(const hailo_pix_buffer_t &buffer)
+{
+    return m_client->InputVStream_write(m_identifier, buffer);
 }
 
 hailo_status InputVStreamClient::flush()
 {
-    return m_client->InputVStream_flush(m_handle);
+    return m_client->InputVStream_flush(m_identifier);
+}
+
+bool InputVStreamClient::is_multi_planar() const
+{
+    auto is_multi_planar_exp = m_client->InputVStream_is_multi_planar(m_identifier);
+    if (!is_multi_planar_exp) {
+        LOGGER__CRITICAL("InputVStream_is_multi_planar failed with status={}", is_multi_planar_exp.status());
+        return true;
+    }
+    return is_multi_planar_exp.release();
 }
 
 hailo_status InputVStreamClient::abort()
@@ -1251,12 +1827,12 @@ hailo_status InputVStreamClient::abort()
     auto expected_client = HailoRtRpcClientUtils::create_client();
     CHECK_EXPECTED_AS_STATUS(expected_client);
     auto abort_client = expected_client.release();
-    return abort_client->InputVStream_abort(m_handle);
+    return abort_client->InputVStream_abort(m_identifier);
 }
 
 hailo_status InputVStreamClient::resume()
 {
-    return m_client->InputVStream_resume(m_handle);
+    return m_client->InputVStream_resume(m_identifier);
 }
 
 hailo_status InputVStreamClient::stop_and_clear()
@@ -1265,7 +1841,7 @@ hailo_status InputVStreamClient::stop_and_clear()
     CHECK_EXPECTED_AS_STATUS(expected_client);
     auto stop_and_clear_client = expected_client.release();
 
-    return stop_and_clear_client->InputVStream_stop_and_clear(m_handle);
+    return stop_and_clear_client->InputVStream_stop_and_clear(m_identifier);
 }
 
 hailo_status InputVStreamClient::start_vstream()
@@ -1274,12 +1850,12 @@ hailo_status InputVStreamClient::start_vstream()
     CHECK_EXPECTED_AS_STATUS(expected_client);
     auto start_vstream_client = expected_client.release();
 
-    return start_vstream_client->InputVStream_start_vstream(m_handle);
+    return start_vstream_client->InputVStream_start_vstream(m_identifier);
 }
 
 size_t InputVStreamClient::get_frame_size() const
 {
-    auto frame_size = m_client->InputVStream_get_frame_size(m_handle);
+    auto frame_size = m_client->InputVStream_get_frame_size(m_identifier);
     if (!frame_size) {
         LOGGER__CRITICAL("InputVStream_get_frame_size failed with status={}", frame_size.status());
         return 0;
@@ -1299,7 +1875,7 @@ const hailo_format_t &InputVStreamClient::get_user_buffer_format() const
 
 std::string InputVStreamClient::name() const
 {
-    auto expected_name = m_client->InputVStream_name(m_handle);
+    auto expected_name = m_client->InputVStream_name(m_identifier);
     if (!expected_name) {
         LOGGER__CRITICAL("InputVStream_name failed with status={}", expected_name.status());
         return "";
@@ -1309,7 +1885,7 @@ std::string InputVStreamClient::name() const
 
 std::string InputVStreamClient::network_name() const
 {
-    auto expected_name = m_client->InputVStream_network_name(m_handle);
+    auto expected_name = m_client->InputVStream_network_name(m_identifier);
     if (!expected_name) {
         LOGGER__CRITICAL("InputVStream_name failed with status={}", expected_name.status());
         return "";
@@ -1365,17 +1941,12 @@ hailo_status InputVStreamClient::after_fork_in_parent()
 
 hailo_status InputVStreamClient::after_fork_in_child()
 {
-    auto status = create_client();
-    CHECK_SUCCESS(status);
-    auto expected_dup_handle = m_client->InputVStream_dup_handle(OsUtils::get_curr_pid(), m_handle);
-    CHECK_EXPECTED_AS_STATUS(expected_dup_handle);
-    m_handle = expected_dup_handle.value();
-    return HAILO_SUCCESS;
+    return create_client();
 }
 
 bool InputVStreamClient::is_aborted()
 {
-    auto is_aborted_exp = m_client->InputVStream_is_aborted(m_handle);
+    auto is_aborted_exp = m_client->InputVStream_is_aborted(m_identifier);
     if (!is_aborted_exp) {
         LOGGER__CRITICAL("InputVStream_is_aborted failed with status={}", is_aborted_exp.status());
         return true;
@@ -1397,28 +1968,29 @@ std::string InputVStreamInternal::get_pipeline_description() const
 }
 
 Expected<std::shared_ptr<OutputVStreamInternal>> OutputVStreamInternal::create(
-        const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
+        const hailo_vstream_info_t &vstream_info, const std::vector<hailo_quant_info_t> &quant_infos, const hailo_vstream_params_t &vstream_params,
         std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
         std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event,
         EventPtr core_op_activated_event, AccumulatorPtr pipeline_latency_accumulator)
 {
-    auto vstream = OutputVStreamImpl::create(vstream_info, vstream_params, pipeline_entry,
+    auto vstream = OutputVStreamImpl::create(vstream_info, quant_infos, vstream_params, pipeline_entry,
         std::move(pipeline), std::move(pipeline_status), shutdown_event, core_op_activated_event, pipeline_latency_accumulator);
     CHECK_EXPECTED(vstream);
     auto vstream_ptr = std::shared_ptr<OutputVStreamInternal>(vstream.release());
     return vstream_ptr;
 }
 
-OutputVStreamInternal::OutputVStreamInternal(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
+OutputVStreamInternal::OutputVStreamInternal(const hailo_vstream_info_t &vstream_info, const std::vector<hailo_quant_info_t> &quant_infos, const hailo_vstream_params_t &vstream_params,
                                              std::shared_ptr<PipelineElement> pipeline_entry,
                                              std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
                                              std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event,
                                              AccumulatorPtr pipeline_latency_accumulator,
                                              EventPtr core_op_activated_event, hailo_status &output_status) :
-    BaseVStream(vstream_info, vstream_params, pipeline_entry, std::move(pipeline), std::move(pipeline_status),
+    BaseVStream(vstream_info, quant_infos, vstream_params, pipeline_entry, std::move(pipeline), std::move(pipeline_status),
                 shutdown_event, pipeline_latency_accumulator, std::move(core_op_activated_event), output_status){}
 
-Expected<std::shared_ptr<OutputVStreamImpl>> OutputVStreamImpl::create(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
+Expected<std::shared_ptr<OutputVStreamImpl>> OutputVStreamImpl::create(const hailo_vstream_info_t &vstream_info,
+    const std::vector<hailo_quant_info_t> &quant_infos, const hailo_vstream_params_t &vstream_params,
     std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
     std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event,
     EventPtr core_op_activated_event, AccumulatorPtr pipeline_latency_accumulator)
@@ -1436,7 +2008,7 @@ Expected<std::shared_ptr<OutputVStreamImpl>> OutputVStreamImpl::create(const hai
             });
     }
 
-    auto vstream_ptr = std::shared_ptr<OutputVStreamImpl>(new OutputVStreamImpl(vstream_info, vstream_params, std::move(pipeline_entry), std::move(pipeline),
+    auto vstream_ptr = std::shared_ptr<OutputVStreamImpl>(new OutputVStreamImpl(vstream_info, quant_infos, vstream_params, std::move(pipeline_entry), std::move(pipeline),
         std::move(pipeline_status), shutdown_event, pipeline_latency_accumulator, std::move(core_op_activated_event), status));
     CHECK_SUCCESS_AS_EXPECTED(status, "Failed to create virtual stream");
 
@@ -1453,13 +2025,14 @@ std::string OutputVStreamInternal::get_pipeline_description() const
     return pipeline_str.str();
 }
 
-OutputVStreamImpl::OutputVStreamImpl(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
+OutputVStreamImpl::OutputVStreamImpl(const hailo_vstream_info_t &vstream_info, const std::vector<hailo_quant_info_t> &quant_infos,
+                                     const hailo_vstream_params_t &vstream_params,
                                      std::shared_ptr<PipelineElement> pipeline_entry,
                                      std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
                                      std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event,
                                      AccumulatorPtr pipeline_latency_accumulator,
                                      EventPtr core_op_activated_event, hailo_status &output_status) :
-    OutputVStreamInternal(vstream_info, vstream_params, pipeline_entry, std::move(pipeline), std::move(pipeline_status),
+    OutputVStreamInternal(vstream_info, quant_infos, vstream_params, pipeline_entry, std::move(pipeline), std::move(pipeline_status),
                 shutdown_event, pipeline_latency_accumulator, std::move(core_op_activated_event), output_status)
 {
     if (HAILO_SUCCESS != output_status) {
@@ -1500,7 +2073,7 @@ hailo_status OutputVStreamImpl::read(MemoryView buffer)
     }
 
     assert(1 == m_entry_element->sources().size());
-    auto recv_buffer = m_entry_element->sources()[0].run_pull(PipelineBuffer(buffer, m_measure_pipeline_latency));
+    auto recv_buffer = m_entry_element->sources()[0].run_pull(PipelineBuffer(buffer, false, nullptr, m_measure_pipeline_latency));
     auto status = recv_buffer.status();
     if (HAILO_SHUTDOWN_EVENT_SIGNALED == status) {
         LOGGER__INFO("Receiving to VStream was shutdown!");
@@ -1514,34 +2087,85 @@ hailo_status OutputVStreamImpl::read(MemoryView buffer)
     return status;
 }
 
+Expected<std::shared_ptr<net_flow::NmsOpMetadata>> OutputVStreamImpl::get_nms_metadata_from_pipeline() const
+{
+    CHECK_AS_EXPECTED(HailoRTCommon::is_nms(m_vstream_info), HAILO_INVALID_OPERATION,
+        "Output vstream '{}' is not NMS, there is no NMS op", name());
+
+    for (auto &elem : m_pipeline) {
+        if (auto nms_pp_elem = std::dynamic_pointer_cast<NmsPostProcessMuxElement>(elem)) {
+            // Assuming we have only 1 nms PP on the pipeline
+            auto nms_metadata = std::dynamic_pointer_cast<net_flow::NmsOpMetadata>(nms_pp_elem->get_op()->metadata());
+            CHECK_NOT_NULL_AS_EXPECTED(nms_metadata, HAILO_INVALID_OPERATION);
+            return nms_metadata;
+        }
+    }
+    LOGGER__ERROR("There is no NmsPostProcess in the '{}' pipeline. Unable to get nms op", name());
+    return make_unexpected(HAILO_INVALID_OPERATION);
+}
+
+hailo_status OutputVStreamImpl::set_nms_score_threshold(float32_t threshold)
+{
+    auto nms_metadata_expected = get_nms_metadata_from_pipeline();
+    CHECK_EXPECTED_AS_STATUS(nms_metadata_expected, "Unable to set nms score threshold in {}", name());
+    auto nms_metadata = nms_metadata_expected.release();
+
+    nms_metadata->nms_config().nms_score_th = threshold;
+    return HAILO_SUCCESS;
+}
+
+hailo_status OutputVStreamImpl::set_nms_iou_threshold(float32_t threshold)
+{
+    auto nms_metadata_expected = get_nms_metadata_from_pipeline();
+    CHECK_EXPECTED_AS_STATUS(nms_metadata_expected, "Unable to set nms IoU threshold in {}", name());
+    auto nms_metadata = nms_metadata_expected.release();
+
+    nms_metadata->nms_config().nms_iou_th = threshold;
+    return HAILO_SUCCESS;
+}
+
+hailo_status OutputVStreamImpl::set_nms_max_proposals_per_class(uint32_t max_proposals_per_class)
+{
+    auto nms_metadata_expected = get_nms_metadata_from_pipeline();
+    CHECK_EXPECTED_AS_STATUS(nms_metadata_expected, "Unable to set nms max proposals per class in {}", name());
+    auto nms_metadata = nms_metadata_expected.release();
+
+    nms_metadata->nms_config().max_proposals_per_class = max_proposals_per_class;
+    // Update vstream info
+    m_vstream_info.nms_shape.max_bboxes_per_class = max_proposals_per_class;
+
+    return HAILO_SUCCESS;
+}
+
+
 #ifdef HAILO_SUPPORT_MULTI_PROCESS
-Expected<std::shared_ptr<OutputVStreamClient>> OutputVStreamClient::create(uint32_t outputs_vstream_handle)
+Expected<std::shared_ptr<OutputVStreamClient>> OutputVStreamClient::create(const VStreamIdentifier &&identifier)
 {
     grpc::ChannelArguments ch_args;
     ch_args.SetMaxReceiveMessageSize(-1);
-    auto channel = grpc::CreateCustomChannel(HAILORT_SERVICE_DEFAULT_ADDR, grpc::InsecureChannelCredentials(), ch_args);
+    auto channel = grpc::CreateCustomChannel(hailort::HAILORT_SERVICE_ADDRESS, grpc::InsecureChannelCredentials(), ch_args);
     CHECK_AS_EXPECTED(channel != nullptr, HAILO_INTERNAL_FAILURE);
 
     auto client = make_unique_nothrow<HailoRtRpcClient>(channel);
     CHECK_AS_EXPECTED(client != nullptr, HAILO_OUT_OF_HOST_MEMORY);
 
-    auto user_buffer_format = client->OutputVStream_get_user_buffer_format(outputs_vstream_handle);
+    auto user_buffer_format = client->OutputVStream_get_user_buffer_format(identifier);
     CHECK_EXPECTED(user_buffer_format);
 
-    auto info = client->OutputVStream_get_info(outputs_vstream_handle);
+    auto info = client->OutputVStream_get_info(identifier);
     CHECK_EXPECTED(info);
 
-    return std::shared_ptr<OutputVStreamClient>(new OutputVStreamClient(std::move(client), std::move(outputs_vstream_handle),
+    return std::shared_ptr<OutputVStreamClient>(new OutputVStreamClient(std::move(client), std::move(identifier),
         user_buffer_format.release(), info.release()));
 }
 
-OutputVStreamClient::OutputVStreamClient(std::unique_ptr<HailoRtRpcClient> client, uint32_t outputs_vstream_handle, hailo_format_t &&user_buffer_format,
-    hailo_vstream_info_t &&info)
-    : m_client(std::move(client)), m_handle(std::move(outputs_vstream_handle)), m_user_buffer_format(user_buffer_format), m_info(info) {}
+OutputVStreamClient::OutputVStreamClient(std::unique_ptr<HailoRtRpcClient> client, const VStreamIdentifier &&identifier, hailo_format_t &&user_buffer_format,
+    hailo_vstream_info_t &&info) :
+        m_client(std::move(client)), m_identifier(std::move(identifier)), m_user_buffer_format(user_buffer_format), m_info(info) {}
 
 OutputVStreamClient::~OutputVStreamClient()
 {
-    auto reply = m_client->OutputVStream_release(m_handle, OsUtils::get_curr_pid());
+    auto reply = m_client->OutputVStream_release(m_identifier, OsUtils::get_curr_pid());
     if (reply != HAILO_SUCCESS) {
         LOGGER__CRITICAL("OutputVStream_release failed!");
     }
@@ -1549,7 +2173,7 @@ OutputVStreamClient::~OutputVStreamClient()
 
 hailo_status OutputVStreamClient::read(MemoryView buffer)
 {
-    return m_client->OutputVStream_read(m_handle, buffer);
+    return m_client->OutputVStream_read(m_identifier, buffer);
 }
 
 hailo_status OutputVStreamClient::abort()
@@ -1557,12 +2181,12 @@ hailo_status OutputVStreamClient::abort()
     auto expected_client = HailoRtRpcClientUtils::create_client();
     CHECK_EXPECTED_AS_STATUS(expected_client);
     auto abort_client = expected_client.release();
-    return abort_client->OutputVStream_abort(m_handle);
+    return abort_client->OutputVStream_abort(m_identifier);
 }
 
 hailo_status OutputVStreamClient::resume()
 {
-    return m_client->OutputVStream_resume(m_handle);
+    return m_client->OutputVStream_resume(m_identifier);
 }
 
 hailo_status OutputVStreamClient::stop_and_clear()
@@ -1571,7 +2195,7 @@ hailo_status OutputVStreamClient::stop_and_clear()
     CHECK_EXPECTED_AS_STATUS(expected_client);
     auto stop_and_clear_client = expected_client.release();
 
-    return stop_and_clear_client->OutputVStream_stop_and_clear(m_handle);
+    return stop_and_clear_client->OutputVStream_stop_and_clear(m_identifier);
 }
 
 hailo_status OutputVStreamClient::start_vstream()
@@ -1580,12 +2204,12 @@ hailo_status OutputVStreamClient::start_vstream()
     CHECK_EXPECTED_AS_STATUS(expected_client);
     auto start_vstream_client = expected_client.release();
 
-    return start_vstream_client->OutputVStream_start_vstream(m_handle);
+    return start_vstream_client->OutputVStream_start_vstream(m_identifier);
 }
 
 size_t OutputVStreamClient::get_frame_size() const
 {
-    auto frame_size =  m_client->OutputVStream_get_frame_size(m_handle);
+    auto frame_size =  m_client->OutputVStream_get_frame_size(m_identifier);
     if (!frame_size) {
         LOGGER__CRITICAL("OutputVStream_get_frame_size failed with status={}", frame_size.status());
         return 0;
@@ -1605,7 +2229,7 @@ const hailo_format_t &OutputVStreamClient::get_user_buffer_format() const
 
 std::string OutputVStreamClient::name() const
 {
-    auto expected_name = m_client->OutputVStream_name(m_handle);
+    auto expected_name = m_client->OutputVStream_name(m_identifier);
     if (!expected_name) {
         LOGGER__CRITICAL("OutputVStream_name failed with status={}", expected_name.status());
         return "";
@@ -1615,7 +2239,7 @@ std::string OutputVStreamClient::name() const
 
 std::string OutputVStreamClient::network_name() const
 {
-    auto expected_name = m_client->OutputVStream_network_name(m_handle);
+    auto expected_name = m_client->OutputVStream_network_name(m_identifier);
     if (!expected_name) {
         LOGGER__CRITICAL("OutputVStream_name failed with status={}", expected_name.status());
         return "";
@@ -1671,44 +2295,71 @@ hailo_status OutputVStreamClient::after_fork_in_parent()
 
 hailo_status OutputVStreamClient::after_fork_in_child()
 {
-    auto status = create_client();
-    CHECK_SUCCESS(status);
-    auto expected_dup_handle = m_client->OutputVStream_dup_handle(OsUtils::get_curr_pid(), m_handle);
-    CHECK_EXPECTED_AS_STATUS(expected_dup_handle);
-    m_handle = expected_dup_handle.value();
-    return HAILO_SUCCESS;
+    return create_client();
 }
 
 bool OutputVStreamClient::is_aborted()
 {
-    auto is_aborted_exp = m_client->OutputVStream_is_aborted(m_handle);
+    auto is_aborted_exp = m_client->OutputVStream_is_aborted(m_identifier);
     if (!is_aborted_exp) {
         LOGGER__CRITICAL("OutputVStream_is_aborted failed with status={}", is_aborted_exp.status());
         return true;
     }
     return is_aborted_exp.release();
 }
+
+hailo_status OutputVStreamClient::set_nms_score_threshold(float32_t threshold)
+{
+    auto expected_client = HailoRtRpcClientUtils::create_client();
+    CHECK_EXPECTED_AS_STATUS(expected_client);
+    auto vstream_client = expected_client.release();
+
+    CHECK_SUCCESS(vstream_client->OutputVStream_set_nms_score_threshold(m_identifier, threshold));
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status OutputVStreamClient::set_nms_iou_threshold(float32_t threshold)
+{
+    auto expected_client = HailoRtRpcClientUtils::create_client();
+    CHECK_EXPECTED_AS_STATUS(expected_client);
+    auto vstream_client = expected_client.release();
+
+    CHECK_SUCCESS(vstream_client->OutputVStream_set_nms_iou_threshold(m_identifier, threshold));
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status OutputVStreamClient::set_nms_max_proposals_per_class(uint32_t max_proposals_per_class)
+{
+    auto expected_client = HailoRtRpcClientUtils::create_client();
+    CHECK_EXPECTED_AS_STATUS(expected_client);
+    auto vstream_client = expected_client.release();
+
+    CHECK_SUCCESS(vstream_client->OutputVStream_set_nms_max_proposals_per_class(m_identifier, max_proposals_per_class));
+    m_info.nms_shape.max_bboxes_per_class = max_proposals_per_class;
+
+    return HAILO_SUCCESS;
+}
+
 #endif // HAILO_SUPPORT_MULTI_PROCESS
 
 Expected<std::shared_ptr<HwReadElement>> HwReadElement::create(std::shared_ptr<OutputStream> stream, const std::string &name, std::chrono::milliseconds timeout,
     size_t buffer_pool_size, hailo_pipeline_elem_stats_flags_t elem_flags, hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event,
-    std::shared_ptr<std::atomic<hailo_status>> pipeline_status, std::unique_ptr<OutputTransformContext> transform_context)
+    std::shared_ptr<std::atomic<hailo_status>> pipeline_status, PipelineDirection pipeline_direction)
 {
     auto buffer_pool = BufferPool::create(stream->get_frame_size(), buffer_pool_size, shutdown_event, elem_flags, vstream_flags);
     CHECK_EXPECTED(buffer_pool, "Failed creating BufferPool for {}", name);
 
-    BufferPoolPtr transform_pool = nullptr;
-    if (transform_context) {
-        auto expected_transform_pool = BufferPool::create(transform_context->get_dst_frame_size(), buffer_pool_size, shutdown_event, elem_flags, vstream_flags);
-        CHECK_EXPECTED(expected_transform_pool, "Failed creating BufferPool for {}", name);        
-        transform_pool = expected_transform_pool.release();
-    }
+    // On HwReadElement the stream always owns the buffer, hence, we set the mode explicitly.
+    auto status = dynamic_cast<OutputStreamBase&>(*stream).set_buffer_mode(StreamBufferMode::OWNING);
+    CHECK_SUCCESS_AS_EXPECTED(status);
 
     auto duration_collector = DurationCollector::create(elem_flags);
     CHECK_EXPECTED(duration_collector);
 
     auto hw_read_elem_ptr = make_shared_nothrow<HwReadElement>(stream, buffer_pool.release(), name, timeout,
-        duration_collector.release(), shutdown_event, std::move(pipeline_status), transform_pool, std::move(transform_context));
+        duration_collector.release(), shutdown_event, std::move(pipeline_status), pipeline_direction);
     CHECK_AS_EXPECTED(nullptr != hw_read_elem_ptr, HAILO_OUT_OF_HOST_MEMORY);
 
     LOGGER__INFO("Created {}", hw_read_elem_ptr->name());
@@ -1719,15 +2370,13 @@ Expected<std::shared_ptr<HwReadElement>> HwReadElement::create(std::shared_ptr<O
 HwReadElement::HwReadElement(std::shared_ptr<OutputStream> stream, BufferPoolPtr buffer_pool, const std::string &name,
                              std::chrono::milliseconds timeout, DurationCollector &&duration_collector,
                              EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
-                             BufferPoolPtr transform_pool, std::unique_ptr<OutputTransformContext> transform_context) :
-    SourceElement(name, std::move(duration_collector), std::move(pipeline_status)),
+                             PipelineDirection pipeline_direction) :
+    SourceElement(name, std::move(duration_collector), std::move(pipeline_status), pipeline_direction),
     m_stream(stream),
     m_pool(buffer_pool),
-    m_transform_pool(transform_pool),
     m_timeout(timeout),
     m_shutdown_event(shutdown_event),
-    m_activation_wait_or_shutdown(stream->get_core_op_activated_event(), shutdown_event),
-    m_transform_context(std::move(transform_context))
+    m_activation_wait_or_shutdown(stream->get_core_op_activated_event(), shutdown_event)
 {}
 
 uint32_t HwReadElement::get_invalid_frames_count()
@@ -1792,7 +2441,13 @@ std::vector<AccumulatorPtr> HwReadElement::get_queue_size_accumulators()
     return {m_pool->get_queue_size_accumulator()};
 }
 
-hailo_status HwReadElement::run_push(PipelineBuffer &&/*buffer*/)
+void HwReadElement::run_push_async(PipelineBuffer &&/*buffer*/, const PipelinePad &/*sink*/)
+{
+    LOGGER__ERROR("run_push_async is not supported for {}", name());
+    assert(false);
+}
+
+hailo_status HwReadElement::run_push(PipelineBuffer &&/*buffer*/, const PipelinePad &/*sink*/)
 {
     return HAILO_INVALID_OPERATION;
 }
@@ -1840,16 +2495,6 @@ Expected<PipelineBuffer> HwReadElement::run_pull(PipelineBuffer &&optional, cons
         CHECK_SUCCESS_AS_EXPECTED(status, "{} (D2H) failed with status={}", name(), status);
         m_duration_collector.complete_measurement();
 
-        // TODO: This is for rare cases where a transormation is needed before another pipeline element
-        // Should be handled by the computational graph, and not here.
-        if (m_transform_context) {
-            auto transform_buffer = m_transform_pool->get_available_buffer(PipelineBuffer(), m_timeout);
-            CHECK_EXPECTED(buffer);
-            status = m_transform_context->transform(buffer_view, transform_buffer.value().as_view());
-            CHECK_SUCCESS_AS_EXPECTED(status);
-            return transform_buffer.release();
-        }
-
         return buffer.release();
     }
 }
@@ -1876,17 +2521,21 @@ hailo_status HwReadElement::execute_deactivate()
 }
 
 Expected<std::shared_ptr<HwWriteElement>> HwWriteElement::create(std::shared_ptr<InputStream> stream, const std::string &name,
-    hailo_pipeline_elem_stats_flags_t elem_flags, std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
+    hailo_pipeline_elem_stats_flags_t elem_flags, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
+    PipelineDirection pipeline_direction)
 {
-
     auto duration_collector = DurationCollector::create(elem_flags);
     CHECK_EXPECTED(duration_collector);
 
     auto got_flush_event = Event::create_shared(Event::State::not_signalled);
-    CHECK_AS_EXPECTED(nullptr != got_flush_event, HAILO_OUT_OF_HOST_MEMORY);
+    CHECK_EXPECTED(got_flush_event);
+
+    // On HwWriteElement the stream always owns the buffer, hence, we set the mode explicitly.
+    auto status = dynamic_cast<InputStreamBase&>(*stream).set_buffer_mode(StreamBufferMode::OWNING);
+    CHECK_SUCCESS_AS_EXPECTED(status);
 
     auto hw_write_elem_ptr = make_shared_nothrow<HwWriteElement>(stream, name,
-        duration_collector.release(), std::move(pipeline_status), got_flush_event);
+        duration_collector.release(), std::move(pipeline_status), got_flush_event.release(), pipeline_direction);
     CHECK_AS_EXPECTED(nullptr != hw_write_elem_ptr, HAILO_OUT_OF_HOST_MEMORY);
 
     LOGGER__INFO("Created {}", hw_write_elem_ptr->name());
@@ -1895,8 +2544,8 @@ Expected<std::shared_ptr<HwWriteElement>> HwWriteElement::create(std::shared_ptr
 }
 
 HwWriteElement::HwWriteElement(std::shared_ptr<InputStream> stream, const std::string &name, DurationCollector &&duration_collector,
-                               std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr got_flush_event) :
-    SinkElement(name, std::move(duration_collector), std::move(pipeline_status)),
+                               std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr got_flush_event, PipelineDirection pipeline_direction) :
+    SinkElement(name, std::move(duration_collector), std::move(pipeline_status), pipeline_direction),
     m_stream(stream), m_got_flush_event(got_flush_event)
 {}
 
@@ -1905,7 +2554,7 @@ Expected<PipelineBuffer> HwWriteElement::run_pull(PipelineBuffer &&/*optional*/,
     return make_unexpected(HAILO_INVALID_OPERATION);
 }
 
-hailo_status HwWriteElement::run_push(PipelineBuffer &&buffer)
+hailo_status HwWriteElement::run_push(PipelineBuffer &&buffer, const PipelinePad &/*sink*/)
 {
     if (PipelineBuffer::Type::FLUSH == buffer.get_type()) {
         hailo_status flush_status = m_stream->flush();
@@ -1927,98 +2576,418 @@ hailo_status HwWriteElement::run_push(PipelineBuffer &&buffer)
         LOGGER__INFO("Failed to send on input stream {} because stream was aborted", m_stream->to_string());
         return HAILO_STREAM_ABORTED_BY_USER;
     }
-    CHECK_SUCCESS(status, "{} (H2D) failed with status={}", name(), status);
+    CHECK_SUCCESS(status, "{} (H2D) failed with status={}", name(), status);
+
+    return HAILO_SUCCESS;
+}
+
+void HwWriteElement::run_push_async(PipelineBuffer &&/*buffer*/, const PipelinePad &/*sink*/)
+{
+    LOGGER__ERROR("run_push_async is not supported for {}", name());
+    assert(false);
+}
+
+hailo_status HwWriteElement::execute_activate()
+{
+    return HAILO_SUCCESS;
+}
+
+hailo_status HwWriteElement::execute_deactivate()
+{
+    // The flush operation will block until all buffers currently in the pipeline will be processed.
+    // We assume that no buffers are sent after the call for deactivate.
+    hailo_status flush_status = m_stream->flush();
+    if (HAILO_STREAM_ABORTED_BY_USER == flush_status) {
+        LOGGER__INFO("Failed flushing input stream {} because stream was aborted", m_stream->to_string());
+        return HAILO_SUCCESS;
+    } else if (HAILO_STREAM_NOT_ACTIVATED == flush_status) {
+        LOGGER__INFO("Failed flushing input stream {} because stream is not activated", m_stream->to_string());
+        return HAILO_SUCCESS;
+    } else if (HAILO_SUCCESS != flush_status) {
+        LOGGER__ERROR("flush has failed in {} with status {}", name(), flush_status);
+    }
+
+    auto abort_status = m_stream->abort();
+    CHECK(((abort_status == HAILO_SUCCESS) || (abort_status == HAILO_STREAM_NOT_ACTIVATED)), abort_status,
+        "Failed to abort stream in {}", name());
+    return HAILO_SUCCESS;
+}
+
+hailo_status HwWriteElement::execute_post_deactivate(bool should_clear_abort)
+{
+    if (should_clear_abort) {
+        auto status = m_stream->clear_abort();
+        CHECK(((status == HAILO_SUCCESS) || (status == HAILO_STREAM_NOT_ACTIVATED)), status,
+            "Failed to clear abort stream in {}", name());
+    }
+    return HAILO_SUCCESS;
+}
+
+hailo_status HwWriteElement::execute_clear()
+{
+    return HAILO_SUCCESS;
+}
+
+hailo_status HwWriteElement::execute_flush()
+{
+    hailo_status status = m_got_flush_event->wait(m_stream->get_timeout());
+    CHECK_SUCCESS(status);
+
+    status = m_got_flush_event->reset();
+    CHECK_SUCCESS(status);
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status HwWriteElement::execute_abort()
+{
+    auto status = m_stream->abort();
+    CHECK(((status == HAILO_SUCCESS) || (status == HAILO_STREAM_NOT_ACTIVATED)), status,
+        "Failed to execute abort stream in {}", name());
+    return HAILO_SUCCESS;
+}
+
+hailo_status HwWriteElement::execute_clear_abort()
+{
+    auto status = m_stream->clear_abort();
+    CHECK(((status == HAILO_SUCCESS) || (status == HAILO_STREAM_NOT_ACTIVATED)), status,
+        "Failed to execute clear_abort stream in {}", name());
+    return HAILO_SUCCESS;
+}
+
+hailo_status HwWriteElement::execute_wait_for_finish()
+{
+    return HAILO_SUCCESS;
+}
+
+std::string HwWriteElement::description() const
+{
+    std::stringstream element_description;
+    element_description << "(" << this->name() << " | hw_frame_size: " << m_stream->get_info().hw_frame_size << ")";   
+
+    return element_description.str();
+}
+
+Expected<std::shared_ptr<LastAsyncElement>> LastAsyncElement::create(const std::string &name,
+    hailo_pipeline_elem_stats_flags_t elem_flags, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
+    PipelineDirection pipeline_direction)
+{
+    auto duration_collector = DurationCollector::create(elem_flags);
+    CHECK_EXPECTED(duration_collector);
+
+    auto last_async_elem_ptr = make_shared_nothrow<LastAsyncElement>(name,
+        duration_collector.release(), std::move(pipeline_status), pipeline_direction);
+    CHECK_NOT_NULL_AS_EXPECTED(last_async_elem_ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+    LOGGER__INFO("Created {}", last_async_elem_ptr->name());
+
+    return last_async_elem_ptr;
+}
+
+Expected<std::shared_ptr<LastAsyncElement>> LastAsyncElement::create(const std::string &name,
+    const ElementBuildParams &build_params, PipelineDirection pipeline_direction)
+{
+    return LastAsyncElement::create(name, build_params.elem_stats_flags,
+        build_params.pipeline_status, pipeline_direction);
+}
+
+LastAsyncElement::LastAsyncElement(const std::string &name, DurationCollector &&duration_collector,
+                               std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+                               PipelineDirection pipeline_direction):
+    SinkElement(name, std::move(duration_collector), std::move(pipeline_status), pipeline_direction)
+{}
+
+Expected<PipelineBuffer> LastAsyncElement::run_pull(PipelineBuffer &&/*optional*/, const PipelinePad &/*source*/)
+{
+    return make_unexpected(HAILO_INVALID_OPERATION);
+}
+
+hailo_status LastAsyncElement::run_push(PipelineBuffer &&/*optional*/, const PipelinePad &/*sink*/)
+{
+    return HAILO_INVALID_OPERATION;
+}
+
+void LastAsyncElement::run_push_async(PipelineBuffer &&buffer, const PipelinePad &/*sink*/)
+{
+    auto exec_done_cb = buffer.get_exec_done_cb();
+    CompletionInfoAsyncInferInternal completion_info{buffer.action_status()};
+    exec_done_cb(completion_info);
+}
+
+std::string LastAsyncElement::description() const
+{
+    std::stringstream element_description;
+    element_description << "(" << this->name() << ")";
+
+    return element_description.str();
+}
+
+hailo_status LastAsyncElement::execute_activate()
+{
+    return HAILO_SUCCESS;
+}
+
+hailo_status LastAsyncElement::execute_wait_for_finish()
+{
+    return HAILO_SUCCESS;
+}
+
+hailo_status LastAsyncElement::enqueue_execution_buffer(MemoryView mem_view, const TransferDoneCallbackAsyncInfer &exec_done, const std::string &source_name)
+{
+    (void)source_name;
+    return m_sinks[0].prev()->element().enqueue_execution_buffer(mem_view, exec_done, m_sinks[0].prev()->name());
+}
+
+Expected<bool> LastAsyncElement::are_buffer_pools_full()
+{
+    return m_sinks[0].prev()->element().are_buffer_pools_full();
+}
+
+hailo_status LastAsyncElement::fill_buffer_pools(bool is_dma_able) {
+    return m_sinks[0].prev()->element().fill_buffer_pools(is_dma_able);
+}
+
+Expected<std::shared_ptr<AsyncHwElement>> AsyncHwElement::create(const std::vector<std::shared_ptr<InputStream>> &input_streams,
+    const std::vector<std::shared_ptr<OutputStream>> &output_streams, std::chrono::milliseconds timeout, size_t buffer_pool_size,
+    hailo_pipeline_elem_stats_flags_t elem_flags, hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event, const std::string &name,
+    std::shared_ptr<std::atomic<hailo_status>> pipeline_status, PipelineDirection pipeline_direction, bool is_last_copy_element)
+{
+    std::unordered_map<std::string, BufferPoolPtr> output_streams_pools;
+    for (const auto &output_stream : output_streams) {
+        auto buffer_pool = BufferPool::create(output_stream->get_frame_size(), buffer_pool_size, shutdown_event, elem_flags, vstream_flags, is_last_copy_element);
+        CHECK_EXPECTED(buffer_pool);
+        output_streams_pools[output_stream->name()] = buffer_pool.release();
+    }
+
+    auto duration_collector = DurationCollector::create(elem_flags);
+    CHECK_EXPECTED(duration_collector);
+
+    auto elem_ptr = make_shared_nothrow<AsyncHwElement>(input_streams, output_streams, timeout, std::move(output_streams_pools), name,
+        duration_collector.release(), std::move(pipeline_status), pipeline_direction);
+    CHECK_AS_EXPECTED(nullptr != elem_ptr, HAILO_OUT_OF_HOST_MEMORY);
+
+    LOGGER__INFO("Created {}", elem_ptr->name());
+
+    return elem_ptr;
+}
+
+AsyncHwElement::AsyncHwElement(const std::vector<std::shared_ptr<InputStream>> &input_streams, const std::vector<std::shared_ptr<OutputStream>> &output_streams,
+                               std::chrono::milliseconds timeout, std::unordered_map<std::string, BufferPoolPtr> &&output_streams_pools, const std::string &name,
+                               DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+                               PipelineDirection pipeline_direction) :
+    PipelineElement(name, std::move(duration_collector), std::move(pipeline_status), pipeline_direction),
+    m_timeout(timeout),
+    m_output_streams_pools(std::move(output_streams_pools))
+{
+    m_sinks.reserve(input_streams.size());
+    m_sink_has_arrived.reserve(input_streams.size());
+    uint32_t i = 0;
+    for (auto &input : input_streams) {
+        m_sinks.emplace_back(*this, name, PipelinePad::Type::SINK);
+        const auto &sink_name = m_sinks[i++].name();
+        m_sink_name_to_input[sink_name] = input;
+        m_sink_name_to_index[sink_name] = static_cast<uint32_t>(m_sinks.size() - 1);
+        m_sink_has_arrived[sink_name] = false;
+    }
 
-    return HAILO_SUCCESS;
+    m_sources.reserve(output_streams.size());
+    i = 0;
+    for (auto &output : output_streams) {
+        m_sources.emplace_back(*this, name, PipelinePad::Type::SOURCE);
+        const auto &source_name = m_sources[i++].name();
+        m_source_name_to_output[source_name] = output;
+        m_source_name_to_index[source_name] = static_cast<uint32_t>(m_sources.size() - 1);
+    }
 }
 
-hailo_status HwWriteElement::execute_activate()
+bool AsyncHwElement::has_all_sinks_arrived()
 {
-    return HAILO_SUCCESS;
+    for (const auto &current_sink : m_sink_has_arrived) {
+        if (!current_sink.second) {
+            return false;
+        }
+    }
+    return true;
 }
 
-hailo_status HwWriteElement::execute_deactivate()
+// This func overides the regular dataflow of this element and calls all next elements run_push_async directly
+// (normally, the run_push_async of the next elements will be called by the LL async read_done)
+void AsyncHwElement::handle_error_in_hw_async_elem(hailo_status error_status)
 {
-    // The flush operation will block until all buffers currently in the pipeline will be processed.
-    // We assume that no buffers are sent after the call for deactivate.
-    hailo_status flush_status = m_stream->flush();
-    if (HAILO_STREAM_ABORTED_BY_USER == flush_status) {
-        LOGGER__INFO("Failed flushing input stream {} because stream was aborted", m_stream->to_string());
-        return HAILO_SUCCESS;
-    } else if (HAILO_STREAM_NOT_ACTIVATED == flush_status) {
-        LOGGER__INFO("Failed flushing input stream {} because stream is not activated", m_stream->to_string());
-        return HAILO_SUCCESS;
-    } else if (HAILO_SUCCESS != flush_status) {
-        LOGGER__ERROR("flush has failed in {} with status {}", name(), flush_status);
+    for (auto &name_output_stream_pair : m_source_name_to_output) {
+        auto source_id = get_source_index_from_output_stream_name(name_output_stream_pair.second->name());
+        auto expected_buffer = m_output_streams_pools[name_output_stream_pair.second->name()]->acquire_buffer_ptr(m_timeout);
+
+        if (HAILO_SUCCESS == expected_buffer.status()) {
+            expected_buffer->get()->set_action_status(error_status);
+            m_sources[m_source_name_to_index[name_output_stream_pair.first]].next()->run_push_async(std::move(*expected_buffer.value()));
+        } else {
+            m_sources[m_source_name_to_index[name_output_stream_pair.first]].next()->run_push_async(PipelineBuffer(error_status));
+        }
     }
 
-    auto abort_status = m_stream->abort();
-    CHECK(((abort_status == HAILO_SUCCESS) || (abort_status == HAILO_STREAM_NOT_ACTIVATED)), abort_status,
-        "Failed to abort stream in {}", name());
-    return HAILO_SUCCESS;
+    for (const auto &sink : m_sinks) {
+        m_sink_has_arrived[sink.name()] = false;
+    }
+    m_input_buffers.clear();
+
+    return;
 }
 
-hailo_status HwWriteElement::execute_post_deactivate(bool should_clear_abort)
+void AsyncHwElement::run_push_async(PipelineBuffer &&buffer, const PipelinePad &sink)
 {
-    if (should_clear_abort) {
-        auto status = m_stream->clear_abort();
-        CHECK(((status == HAILO_SUCCESS) || (status == HAILO_STREAM_NOT_ACTIVATED)), status,
-            "Failed to clear abort stream in {}", name());
+    assert(contains(m_sink_name_to_input, sink.name()));
+
+    std::unique_lock<std::mutex> lock(m_mutex);
+    m_sink_has_arrived[sink.name()] = true;
+    m_input_buffers[sink.name()] = std::move(buffer);
+
+    if (has_all_sinks_arrived()) {
+        for (auto &input_buffer : m_input_buffers) {
+            if (HAILO_SUCCESS != input_buffer.second.action_status()) {
+                handle_error_in_hw_async_elem(input_buffer.second.action_status());
+
+                // Manual unlocking is done before notifying, to avoid waking up the waiting thread only to block again
+                lock.unlock();
+                m_cv.notify_all();
+            }
+            auto input_stream = m_sink_name_to_input[input_buffer.first];
+
+            InputStream::TransferDoneCallback write_done = [exec_done_cb = input_buffer.second.get_exec_done_cb()] (const InputStream::CompletionInfo &completion_info) {
+                if (HAILO_SUCCESS != completion_info.status) {
+                    LOGGER__ERROR("Got an unexpected status on callback. status={}", completion_info.status);
+                }
+                CompletionInfoAsyncInferInternal completion_info_async_infer{completion_info.status};
+                exec_done_cb(completion_info_async_infer);
+            };
+
+            auto status = input_stream->write_async(input_buffer.second.data(), input_buffer.second.size(), write_done);
+            if (HAILO_SUCCESS != status) {
+                handle_non_recoverable_async_error(status);
+            }
+        }
+
+        read_async_on_all_streams();
+
+        for (const auto &curr_sink : m_sinks) {
+            m_sink_has_arrived[curr_sink.name()] = false;
+        }
+        m_input_buffers.clear();
+
+        // Manual unlocking is done before notifying, to avoid waking up the waiting thread only to block again
+        lock.unlock();
+        m_cv.notify_all();
+    } else {
+        auto cv_status = m_cv.wait_for(lock, m_timeout);
+        if (std::cv_status::timeout == cv_status) {
+            LOGGER__ERROR("Waiting for other threads in AsyncHwElement {} has reached a timeout (timeout={}ms)", name(), m_timeout.count());
+            handle_non_recoverable_async_error(HAILO_TIMEOUT);
+        }
     }
-    return HAILO_SUCCESS;
 }
 
-hailo_status HwWriteElement::execute_clear()
+hailo_status AsyncHwElement::run_push(PipelineBuffer &&/*optional*/, const PipelinePad &/*sink*/)
 {
-    return HAILO_SUCCESS;
+    return HAILO_INVALID_OPERATION;
 }
 
-hailo_status HwWriteElement::execute_flush()
+void AsyncHwElement::read_async_on_all_streams()
 {
-    hailo_status status = m_got_flush_event->wait(m_stream->get_timeout());
-    CHECK_SUCCESS(status);
+    std::unordered_map<std::string, std::shared_ptr<PipelineBuffer>> name_to_buffer_map;
+    for (auto &name_output_stream_pair : m_source_name_to_output) {
+        auto expected_buffer = m_output_streams_pools[name_output_stream_pair.second->name()]->acquire_buffer_ptr(m_timeout);
+        if (HAILO_SUCCESS != expected_buffer.status()) {
+            handle_non_recoverable_async_error(expected_buffer.status());
+            return;
+        }
+        name_to_buffer_map[name_output_stream_pair.first] = expected_buffer.release();
+    }
 
-    status = m_got_flush_event->reset();
+    for (auto &name_output_stream_pair : m_source_name_to_output) {
+        auto mem_view = name_to_buffer_map[name_output_stream_pair.first]->as_view();
+        OutputStream::TransferDoneCallback read_done = [this, source_name = name_output_stream_pair.first, buffer = name_to_buffer_map[name_output_stream_pair.first]] (const OutputStream::CompletionInfo &completion_info) {
+            buffer->set_action_status(completion_info.status);
+            m_sources[m_source_name_to_index[source_name]].next()->run_push_async(std::move(*buffer));
+        };
+        auto status = name_output_stream_pair.second->read_async(mem_view, read_done);
+        if (HAILO_SUCCESS != status) {
+            handle_non_recoverable_async_error(status);
+            return;
+        }
+    }
+}
+
+hailo_status AsyncHwElement::enqueue_execution_buffer(MemoryView mem_view, const TransferDoneCallbackAsyncInfer &exec_done, const std::string &source_name)
+{
+    CHECK(contains(m_source_name_to_output, source_name), HAILO_INTERNAL_FAILURE);
+
+    auto status = m_output_streams_pools[m_source_name_to_output[source_name]->name()]->enqueue_buffer(mem_view, exec_done);
     CHECK_SUCCESS(status);
 
     return HAILO_SUCCESS;
 }
 
-hailo_status HwWriteElement::execute_abort()
+Expected<bool> AsyncHwElement::are_buffer_pools_full()
 {
-    auto status = m_stream->abort();
-    CHECK(((status == HAILO_SUCCESS) || (status == HAILO_STREAM_NOT_ACTIVATED)), status,
-        "Failed to execute abort stream in {}", name());
+    for (const auto &output_streams_pool : m_output_streams_pools) {
+        if (output_streams_pool.second->is_full()) {
+            return true;
+        }
+    }
+    return false;
+}
+
+hailo_status AsyncHwElement::fill_buffer_pools(bool is_dma_able) {
+    for (auto &pool : m_output_streams_pools) {
+        auto status = pool.second->allocate_buffers(is_dma_able);
+        CHECK_SUCCESS(status);
+    }
     return HAILO_SUCCESS;
 }
 
-hailo_status HwWriteElement::execute_clear_abort()
+Expected<uint32_t> AsyncHwElement::get_source_index_from_output_stream_name(const std::string &output_stream_name)
 {
-    auto status = m_stream->clear_abort();
-    CHECK(((status == HAILO_SUCCESS) || (status == HAILO_STREAM_NOT_ACTIVATED)), status,
-        "Failed to execute clear_abort stream in {}", name());
-    return HAILO_SUCCESS;
+    for (auto &name_output_stream_pair : m_source_name_to_output) {
+        if (name_output_stream_pair.second->name() == output_stream_name) {
+            uint32_t ret_val = m_source_name_to_index.at(name_output_stream_pair.first);
+            return ret_val;
+        }
+    }
+    return make_unexpected(HAILO_NOT_FOUND);
 }
 
-hailo_status HwWriteElement::execute_wait_for_finish()
+Expected<uint32_t> AsyncHwElement::get_sink_index_from_input_stream_name(const std::string &input_stream_name)
 {
-    return HAILO_SUCCESS;
+    for (auto &name_input_stream_pair : m_sink_name_to_input) {
+        if (name_input_stream_pair.second->name() == input_stream_name) {
+            return Expected<uint32_t>(m_sink_name_to_index.at(name_input_stream_pair.first));
+        }
+    }
+    return make_unexpected(HAILO_INVALID_ARGUMENT);
 }
 
-std::string HwWriteElement::description() const
+Expected<PipelineBuffer> AsyncHwElement::run_pull(PipelineBuffer &&/*optional*/, const PipelinePad &/*source*/)
 {
-    std::stringstream element_description;
-    element_description << "(" << this->name() << " | hw_frame_size: " << m_stream->get_info().hw_frame_size << ")";   
+    return make_unexpected(HAILO_NOT_IMPLEMENTED);
+}
 
-    return element_description.str();
+std::vector<PipelinePad*> AsyncHwElement::execution_pads()
+{
+    std::vector<PipelinePad*> result;
+    result.reserve(m_sources.size());
+    for (auto& pad : m_sources) {
+        result.push_back(pad.next());
+    }
+    return result;
 }
 
 Expected<std::shared_ptr<CopyBufferElement>> CopyBufferElement::create(const std::string &name,
-    std::shared_ptr<std::atomic<hailo_status>> pipeline_status)
+    std::shared_ptr<std::atomic<hailo_status>> pipeline_status, std::chrono::milliseconds timeout, PipelineDirection pipeline_direction)
 {
     auto duration_collector = DurationCollector::create(HAILO_PIPELINE_ELEM_STATS_NONE);
     CHECK_EXPECTED(duration_collector);
-    auto elem_ptr = make_shared_nothrow<CopyBufferElement>(name, duration_collector.release(), std::move(pipeline_status));
+    auto elem_ptr = make_shared_nothrow<CopyBufferElement>(name, duration_collector.release(), std::move(pipeline_status),
+        timeout, pipeline_direction);
     CHECK_AS_EXPECTED(nullptr != elem_ptr, HAILO_OUT_OF_HOST_MEMORY);
 
     LOGGER__INFO("Created {}", elem_ptr->name());
@@ -2027,13 +2996,16 @@ Expected<std::shared_ptr<CopyBufferElement>> CopyBufferElement::create(const std
 }
 
 CopyBufferElement::CopyBufferElement(const std::string &name, DurationCollector &&duration_collector, 
-                                     std::shared_ptr<std::atomic<hailo_status>> pipeline_status) :
-    FilterElement(name, std::move(duration_collector), std::move(pipeline_status))
+                                     std::shared_ptr<std::atomic<hailo_status>> pipeline_status, std::chrono::milliseconds timeout,
+                                     PipelineDirection pipeline_direction) :
+    FilterElement(name, std::move(duration_collector), std::move(pipeline_status), pipeline_direction, nullptr, timeout)
 {}
 
 PipelinePad &CopyBufferElement::next_pad()
 {
-    // Note: The next elem to be run is downstream from this elem (i.e. buffers are pushed)
+    if (PipelineDirection::PUSH == m_pipeline_direction){
+        return *m_sources[0].next();
+    }
     return *m_sinks[0].prev();
 }
 
@@ -2090,51 +3062,12 @@ Expected<std::pair<std::vector<InputVStream>, std::vector<OutputVStream>>> VStre
 static hailo_vstream_params_t expand_vstream_params_autos(const hailo_stream_info_t &stream_info,
     const hailo_vstream_params_t &vstream_params)
 {
-    if (HAILO_FORMAT_ORDER_HAILO_NMS == stream_info.format.order) {
-        // TODO (HRT-11082): On NMS, return error if UINT16
-        if (HAILO_FORMAT_TYPE_UINT16 == vstream_params.user_buffer_format.type) {
-            LOGGER__WARNING("Passing 'HAILO_FORMAT_TYPE_UINT16' for NMS output is deprecated and will soon be unsupported. "\
-                "One should use HAILO_FORMAT_TYPE_FLOAT32");
-        }
-    }
     auto local_vstream_params = vstream_params;
     local_vstream_params.user_buffer_format = HailoRTDefaults::expand_auto_format(vstream_params.user_buffer_format,
         stream_info.format);
     return local_vstream_params;
 }
 
-static hailo_vstream_params_t expand_vstream_params_autos_argmax(const hailo_vstream_params_t &vstream_params,
-    hailo_format_t &op_input_format)
-{
-    auto local_vstream_params = vstream_params;
-    if (local_vstream_params.user_buffer_format.type == HAILO_FORMAT_TYPE_AUTO) {
-        local_vstream_params.user_buffer_format.type = op_input_format.type;
-    }
-    if (local_vstream_params.user_buffer_format.order == HAILO_FORMAT_ORDER_AUTO) {
-        if (op_input_format.order == HAILO_FORMAT_ORDER_NHCW || op_input_format.order == HAILO_FORMAT_ORDER_NHWC) {
-            local_vstream_params.user_buffer_format.order = HAILO_FORMAT_ORDER_NHW;
-        }
-        if (op_input_format.order == HAILO_FORMAT_ORDER_NC) {
-            local_vstream_params.user_buffer_format.order = HAILO_FORMAT_ORDER_NC;
-        }
-    }
-    return local_vstream_params;
-}
-
-static hailo_vstream_params_t expand_vstream_params_autos_softmax(const hailo_vstream_params_t &vstream_params,
-    hailo_format_t &op_input_format)
-{
-    auto local_vstream_params = vstream_params;
-    // Type should be float32, after de-quantization, and order NHWC or NC in softmax
-    if (local_vstream_params.user_buffer_format.type == HAILO_FORMAT_TYPE_AUTO) {
-        local_vstream_params.user_buffer_format.type = HAILO_FORMAT_TYPE_FLOAT32;
-    }
-    if (local_vstream_params.user_buffer_format.order == HAILO_FORMAT_ORDER_AUTO) {
-        local_vstream_params.user_buffer_format.order = op_input_format.order;
-    }
-    return local_vstream_params;
-}
-
 Expected<std::vector<InputVStream>> VStreamsBuilder::create_input_vstreams(ConfiguredNetworkGroup &net_group,
     const std::map<std::string, hailo_vstream_params_t> &inputs_params)
 {
@@ -2147,9 +3080,15 @@ Expected<std::vector<OutputVStream>> VStreamsBuilder::create_output_vstreams(Con
     return net_group.create_output_vstreams(outputs_params);
 }
 
-Expected<std::vector<InputVStream>> VStreamsBuilderUtils::create_inputs(std::shared_ptr<InputStream> input_stream, const hailo_vstream_info_t &vstream_info,
+Expected<std::vector<InputVStream>> VStreamsBuilderUtils::create_inputs(
+    std::vector<std::shared_ptr<InputStream>> input_streams, const hailo_vstream_info_t &vstream_info,
     const hailo_vstream_params_t &vstream_params)
 {
+    CHECK_AS_EXPECTED(!input_streams.empty(), HAILO_INVALID_ARGUMENT, "input streams can't be empty");
+    // if input streams has more than 1 value, it will be handled by handle_pix_buffer_splitter_flow. For all other purposes,
+    // assuming there is only 1 stream is valid
+    std::shared_ptr<InputStream> input_stream = input_streams.front();
+
     // TODO (HRT-4522): Support this measurement
     CHECK_AS_EXPECTED(!(vstream_params.vstream_stats_flags & HAILO_VSTREAM_STATS_MEASURE_FPS), HAILO_NOT_IMPLEMENTED,
         "Pipeline FPS statistics measurement is not implemented");
@@ -2162,8 +3101,9 @@ Expected<std::vector<InputVStream>> VStreamsBuilderUtils::create_inputs(std::sha
         core_op_activated_event = input_stream->get_core_op_activated_event();
     }
 
-    auto shutdown_event = Event::create_shared(Event::State::not_signalled);
-    CHECK_AS_EXPECTED(nullptr != shutdown_event, HAILO_OUT_OF_HOST_MEMORY);
+    auto shutdown_event_exp = Event::create_shared(Event::State::not_signalled);
+    CHECK_EXPECTED(shutdown_event_exp);
+    auto shutdown_event = shutdown_event_exp.release();
 
     auto pipeline_status = make_shared_nothrow<std::atomic<hailo_status>>(HAILO_SUCCESS);
     CHECK_AS_EXPECTED(nullptr != pipeline_status, HAILO_OUT_OF_HOST_MEMORY);
@@ -2173,44 +3113,52 @@ Expected<std::vector<InputVStream>> VStreamsBuilderUtils::create_inputs(std::sha
 
     auto user_timeout = std::chrono::milliseconds(vstream_params.timeout_ms);
 
-    auto hw_write_elem = HwWriteElement::create(input_stream,
-        PipelineObject::create_element_name("HwWriteElement", input_stream->name(), input_stream->get_info().index),
-        vstream_params.pipeline_elements_stats_flags, pipeline_status);
-    CHECK_EXPECTED(hw_write_elem);
-    elements.insert(elements.begin(), hw_write_elem.value());
+    if(input_streams.size() > 1) {
+        CHECK_SUCCESS_AS_EXPECTED(handle_pix_buffer_splitter_flow(input_streams, vstream_info,
+            std::move(elements), vstreams, vstream_params, shutdown_event, pipeline_status, core_op_activated_event,
+            pipeline_latency_accumulator.value()));
+    } else {
+        auto hw_write_elem = HwWriteElement::create(input_stream,
+            PipelineObject::create_element_name("HwWriteElement", input_stream->name(), input_stream->get_info().index),
+            vstream_params.pipeline_elements_stats_flags, pipeline_status);
+        CHECK_EXPECTED(hw_write_elem);
+        elements.insert(elements.begin(), hw_write_elem.value());
+
+        auto input_stream_base = std::static_pointer_cast<InputStreamBase>(input_stream);
+        auto should_transform = InputTransformContext::is_transformation_required(input_stream->get_info().shape,
+            vstream_params.user_buffer_format, input_stream->get_info().hw_shape, input_stream->get_info().format,
+            input_stream_base->get_quant_infos());
+        CHECK_EXPECTED(should_transform);
+
+        if (should_transform.value()) {
+            std::shared_ptr<SinkElement> elem_after_post_infer = hw_write_elem.value();
+            auto queue_elem = PushQueueElement::create(
+                PipelineObject::create_element_name("PushQueueElement", input_stream->get_info().name, input_stream->get_info().index),
+                vstream_params, shutdown_event, pipeline_status);
+            CHECK_EXPECTED(queue_elem);
+            elements.insert(elements.begin(), queue_elem.value());
+            CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(queue_elem.value(), hw_write_elem.value()));
 
-    auto should_transform = InputTransformContext::is_transformation_required(input_stream->get_info().shape,
-        vstream_params.user_buffer_format, input_stream->get_info().hw_shape, input_stream->get_info().format, 
-        input_stream->get_info().quant_info);
+            auto pre_infer_elem = PreInferElement::create(input_stream->get_info().shape, vstream_params.user_buffer_format,
+                input_stream->get_info().hw_shape, input_stream->get_info().format, input_stream_base->get_quant_infos(),
+                PipelineObject::create_element_name("PreInferElement", input_stream->get_info().name, input_stream->get_info().index),
+                vstream_params, shutdown_event, pipeline_status);
+            CHECK_EXPECTED(pre_infer_elem);
+            elements.insert(elements.begin(), pre_infer_elem.value());
+            CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(pre_infer_elem.value(), queue_elem.value()));
 
-    if (should_transform) {
-        std::shared_ptr<SinkElement> elem_after_post_infer = hw_write_elem.value();
-        auto queue_elem = PushQueueElement::create(
-            PipelineObject::create_element_name("PushQueueElement", input_stream->get_info().name, input_stream->get_info().index),
-            vstream_params, shutdown_event, pipeline_status);
-        CHECK_EXPECTED(queue_elem);
-        elements.insert(elements.begin(), queue_elem.value());
-        CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(queue_elem.value(), hw_write_elem.value()));
-
-        auto pre_infer_elem = PreInferElement::create(input_stream->get_info().shape, vstream_params.user_buffer_format,
-             input_stream->get_info().hw_shape, input_stream->get_info().format, input_stream->get_info().quant_info, 
-             PipelineObject::create_element_name("PreInferElement", input_stream->get_info().name, input_stream->get_info().index),
-             vstream_params, shutdown_event, pipeline_status);
-        CHECK_EXPECTED(pre_infer_elem);
-        elements.insert(elements.begin(), pre_infer_elem.value());
-        CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(pre_infer_elem.value(), queue_elem.value()));
-
-        input_stream->set_timeout(user_timeout);
-        auto vstream = InputVStream::create(vstream_info, vstream_params, pre_infer_elem.release(), hw_write_elem.release(), std::move(elements),
-            std::move(pipeline_status), shutdown_event, core_op_activated_event, pipeline_latency_accumulator.release());
-        CHECK_EXPECTED(vstream);
-        vstreams.emplace_back(vstream.release());
-    } else {
-        input_stream->set_timeout(user_timeout);
-        auto vstream = InputVStream::create(vstream_info, vstream_params, hw_write_elem.value(), hw_write_elem.value(), std::move(elements),
-            std::move(pipeline_status), shutdown_event, core_op_activated_event, pipeline_latency_accumulator.release());
-        CHECK_EXPECTED(vstream);
-        vstreams.emplace_back(vstream.release());
+            input_stream->set_timeout(user_timeout);
+            auto vstream = InputVStream::create(vstream_info, input_stream_base->get_quant_infos(), vstream_params, pre_infer_elem.release(), hw_write_elem.release(), std::move(elements),
+                std::move(pipeline_status), shutdown_event, core_op_activated_event, pipeline_latency_accumulator.release());
+            CHECK_EXPECTED(vstream);
+            vstreams.emplace_back(vstream.release());
+        } else {
+            input_stream->set_timeout(user_timeout);
+            auto vstream = InputVStream::create(vstream_info, input_stream_base->get_quant_infos(), vstream_params, hw_write_elem.value(), hw_write_elem.value(), std::move(elements),
+                std::move(pipeline_status), shutdown_event, core_op_activated_event, pipeline_latency_accumulator.release());
+            CHECK_EXPECTED(vstream);
+            vstreams.emplace_back(vstream.release());
+        }
     }
 
     for (const auto &vstream : vstreams) {
@@ -2226,13 +3174,19 @@ Expected<std::vector<OutputVStream>> VStreamsBuilderUtils::create_outputs(std::s
     std::vector<std::shared_ptr<PipelineElement>> elements;
     std::vector<OutputVStream> vstreams;
 
+    if (0 != (HAILO_FORMAT_FLAGS_HOST_ARGMAX & output_stream->get_info().format.flags))
+    {
+        LOGGER__WARNING("Using legacy implementation of Argmax in host. Please re-compile your model with latest DFC version");
+    }
+
     EventPtr core_op_activated_event = nullptr;
     if (!output_stream->is_scheduled()) {
         core_op_activated_event = output_stream->get_core_op_activated_event();
     }
 
-    auto shutdown_event = Event::create_shared(Event::State::not_signalled);
-    CHECK_AS_EXPECTED(nullptr != shutdown_event, HAILO_OUT_OF_HOST_MEMORY);
+    auto shutdown_event_exp = Event::create_shared(Event::State::not_signalled);
+    CHECK_EXPECTED(shutdown_event_exp);
+    auto shutdown_event = shutdown_event_exp.release();
 
     auto pipeline_status = make_shared_nothrow<std::atomic<hailo_status>>(HAILO_SUCCESS);
     CHECK_AS_EXPECTED(nullptr != pipeline_status, HAILO_OUT_OF_HOST_MEMORY);
@@ -2272,11 +3226,13 @@ Expected<std::vector<OutputVStream>> VStreamsBuilderUtils::create_outputs(std::s
         auto pipeline_latency_accumulator = create_pipeline_latency_accumulator(vstream_params);
         CHECK_EXPECTED(pipeline_latency_accumulator);
 
+        auto output_stream_base = std::static_pointer_cast<OutputStreamBase>(output_stream);
         auto should_transform = OutputTransformContext::is_transformation_required(output_stream->get_info().hw_shape, 
             output_stream->get_info().format, output_stream->get_info().shape, 
-            vstream_params.user_buffer_format, output_stream->get_info().quant_info);
+            vstream_params.user_buffer_format, output_stream_base->get_quant_infos());
+        CHECK_EXPECTED(should_transform);
 
-        if (should_transform) {
+        if (should_transform.value()) {
             auto hw_read_queue_element = add_pull_queue_element(output_stream, pipeline_status, elements, "PullQueueElement_hw_read",
                 shutdown_event, vstream_params);
             CHECK_EXPECTED(hw_read_queue_element);
@@ -2291,13 +3247,13 @@ Expected<std::vector<OutputVStream>> VStreamsBuilderUtils::create_outputs(std::s
             CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(post_infer_element.value(), user_buffer_queue_element.value()));
             output_stream->set_timeout(std::chrono::milliseconds(HAILO_INFINITE));
             hw_read_queue_element->get()->set_timeout(std::chrono::milliseconds(HAILO_INFINITE));
-            auto vstream = OutputVStream::create(vstream_info->second, vstream_params, user_buffer_queue_element.release(), std::move(elements),
+            auto vstream = OutputVStream::create(vstream_info->second, output_stream_base->get_quant_infos(), vstream_params, user_buffer_queue_element.release(), std::move(elements),
                 std::move(pipeline_status), shutdown_event, core_op_activated_event, pipeline_latency_accumulator.release());
             CHECK_EXPECTED(vstream);
             vstreams.emplace_back(vstream.release());
         } else {
             output_stream->set_timeout(std::chrono::milliseconds(vstream_params.timeout_ms));
-            auto vstream = OutputVStream::create(vstream_info->second, vstream_params, hw_read_element.release(), std::move(elements),
+            auto vstream = OutputVStream::create(vstream_info->second, output_stream_base->get_quant_infos(), vstream_params, hw_read_element.release(), std::move(elements),
                 std::move(pipeline_status), shutdown_event, core_op_activated_event, pipeline_latency_accumulator.release());
             CHECK_EXPECTED(vstream);
             vstreams.emplace_back(vstream.release());
@@ -2311,8 +3267,96 @@ Expected<std::vector<OutputVStream>> VStreamsBuilderUtils::create_outputs(std::s
     return vstreams;
 }
 
+Expected<std::vector<OutputVStream>> VStreamsBuilderUtils::create_output_post_process_iou(std::shared_ptr<OutputStream> output_stream,
+    hailo_vstream_params_t vstream_params, const net_flow::PostProcessOpMetadataPtr &iou_op_metadata)
+{
+    std::vector<std::shared_ptr<PipelineElement>> elements;
+    std::vector<OutputVStream> vstreams;
+
+    EventPtr core_op_activated_event = nullptr;
+    if (!output_stream->is_scheduled()) {
+        core_op_activated_event = output_stream->get_core_op_activated_event();
+    }
+
+    auto shutdown_event_exp = Event::create_shared(Event::State::not_signalled);
+    CHECK_AS_EXPECTED(shutdown_event_exp, HAILO_OUT_OF_HOST_MEMORY);
+    auto shutdown_event = shutdown_event_exp.release();
+
+    auto pipeline_status = make_shared_nothrow<std::atomic<hailo_status>>(HAILO_SUCCESS);
+    CHECK_AS_EXPECTED(nullptr != pipeline_status, HAILO_OUT_OF_HOST_MEMORY);
+
+    vstream_params.user_buffer_format = net_flow::NmsOpMetadata::expand_output_format_autos_by_op_type(vstream_params.user_buffer_format,
+        iou_op_metadata->type());
+
+    auto pipeline_latency_accumulator = create_pipeline_latency_accumulator(vstream_params);
+    CHECK_EXPECTED(pipeline_latency_accumulator);
+
+    auto hw_read_element = add_hw_read_element(output_stream, pipeline_status, elements, "HwReadElement", shutdown_event,
+        vstream_params.queue_size, vstream_params.pipeline_elements_stats_flags, vstream_params.vstream_stats_flags);
+    CHECK_EXPECTED(hw_read_element);
+
+    auto hw_read_queue_element = add_pull_queue_element(output_stream, pipeline_status, elements, "PullQueueElement_hw_read",
+        shutdown_event, vstream_params);
+    CHECK_EXPECTED(hw_read_queue_element);
+    hw_read_queue_element->get()->set_timeout(std::chrono::milliseconds(HAILO_INFINITE));
+    CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(hw_read_element.value(), hw_read_queue_element.value()));
+
+    auto post_infer_element = add_post_infer_element(output_stream, pipeline_status, elements,
+        "PostInferElement", vstream_params, shutdown_event);
+    CHECK_EXPECTED(post_infer_element);
+    CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(hw_read_queue_element.value(), post_infer_element.value()));
+
+    auto pre_nms_convert_queue_element = add_pull_queue_element(output_stream, pipeline_status, elements, "PullQueueElement_pre_nms_convert",
+        shutdown_event, vstream_params);
+    CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(post_infer_element.value(), pre_nms_convert_queue_element.value()));
+
+    auto nms_to_detections_element = add_nms_to_detections_convert_element(output_stream, pipeline_status, elements, "NmsFormatToDetectionsElement",
+        vstream_params, iou_op_metadata, vstream_params.queue_size, std::chrono::milliseconds(HAILO_INFINITE), vstream_params.vstream_stats_flags, shutdown_event);
+    CHECK_EXPECTED(nms_to_detections_element);
+    CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(pre_nms_convert_queue_element.value(), nms_to_detections_element.value()));
+
+    auto pre_remove_overlapping_bboxes_element_queue_element = add_pull_queue_element(output_stream, pipeline_status, elements, "PullQueueElement_pre_bboxes_removing",
+        shutdown_event, vstream_params);
+    CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(nms_to_detections_element.value(), pre_remove_overlapping_bboxes_element_queue_element.value()));
+
+    auto remove_overlapping_bboxes_element = add_remove_overlapping_bboxes_element(output_stream, pipeline_status, elements, "RemoveOverlappingBboxesElement",
+        vstream_params, iou_op_metadata, vstream_params.queue_size, std::chrono::milliseconds(HAILO_INFINITE), vstream_params.vstream_stats_flags, shutdown_event);
+    CHECK_EXPECTED(remove_overlapping_bboxes_element);
+    CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(pre_remove_overlapping_bboxes_element_queue_element.value(), remove_overlapping_bboxes_element.value()));
+
+    auto pre_fill_nms_format_element_queue_element = add_pull_queue_element(output_stream, pipeline_status, elements, "PullQueueElement_pre_fill_nms_format",
+        shutdown_event, vstream_params);
+    CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(remove_overlapping_bboxes_element.value(), pre_fill_nms_format_element_queue_element.value()));
+
+    auto fill_nms_format_element = add_fill_nms_format_element(output_stream, pipeline_status, elements, "FillNmsFormatElement",
+        vstream_params, iou_op_metadata, vstream_params.queue_size, std::chrono::milliseconds(HAILO_INFINITE), vstream_params.vstream_stats_flags, shutdown_event);
+    CHECK_EXPECTED(fill_nms_format_element);
+    CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(pre_fill_nms_format_element_queue_element.value(), fill_nms_format_element.value()));
+
+    auto user_buffer_queue_element = add_user_buffer_queue_element(output_stream, pipeline_status, elements,
+        "UserBufferQueueElement", shutdown_event, vstream_params);
+    CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(fill_nms_format_element.value(), user_buffer_queue_element.value()));
+    output_stream->set_timeout(std::chrono::milliseconds(HAILO_INFINITE));
+
+    auto output_vstream_info = iou_op_metadata->get_output_vstream_info();
+    CHECK_EXPECTED(output_vstream_info);
+
+    auto output_stream_base = std::static_pointer_cast<OutputStreamBase>(output_stream);
+    auto vstream = OutputVStream::create(output_vstream_info.value(), output_stream_base->get_quant_infos(), vstream_params, user_buffer_queue_element.release(), std::move(elements),
+        std::move(pipeline_status), shutdown_event, core_op_activated_event, pipeline_latency_accumulator.release());
+    CHECK_EXPECTED(vstream);
+    vstreams.emplace_back(vstream.release());
+
+    for (const auto &curr_vstream : vstreams) {
+        LOGGER__INFO("{}", curr_vstream.get_pipeline_description());
+    }
+
+    return vstreams;
+}
+
 Expected<std::vector<OutputVStream>> VStreamsBuilderUtils::create_output_post_process_softmax(std::shared_ptr<OutputStream> output_stream,
-    const NameToVStreamParamsMap &vstreams_params_map, const hailo_vstream_info_t &output_vstream_info, const NetFlowElement &softmax_op)
+    const NameToVStreamParamsMap &vstreams_params_map, const hailo_vstream_info_t &output_vstream_info,
+    const net_flow::PostProcessOpMetadataPtr &softmax_op_metadata)
 {
     std::vector<std::shared_ptr<PipelineElement>> elements;
     std::vector<OutputVStream> vstreams;
@@ -2322,8 +3366,9 @@ Expected<std::vector<OutputVStream>> VStreamsBuilderUtils::create_output_post_pr
         core_op_activated_event = output_stream->get_core_op_activated_event();
     }
 
-    auto shutdown_event = Event::create_shared(Event::State::not_signalled);
-    CHECK_AS_EXPECTED(nullptr != shutdown_event, HAILO_OUT_OF_HOST_MEMORY);
+    auto shutdown_event_exp = Event::create_shared(Event::State::not_signalled);
+    CHECK_EXPECTED(shutdown_event_exp);
+    auto shutdown_event = shutdown_event_exp.release();
 
     auto pipeline_status = make_shared_nothrow<std::atomic<hailo_status>>(HAILO_SUCCESS);
     CHECK_AS_EXPECTED(nullptr != pipeline_status, HAILO_OUT_OF_HOST_MEMORY);
@@ -2346,12 +3391,14 @@ Expected<std::vector<OutputVStream>> VStreamsBuilderUtils::create_output_post_pr
         "Pipeline FPS statistics measurement is not implemented");
 
     assert(1 == vstreams_params_map.size());
-    auto op_input_format = softmax_op.op->inputs_metadata().begin()->second.format;
-    auto vstream_params = expand_vstream_params_autos_softmax(vstreams_params_map.begin()->second, op_input_format);
+    auto op_input_format = softmax_op_metadata->inputs_metadata().begin()->second.format;
+    auto vstream_params = vstreams_params_map.begin()->second;
+    vstream_params.user_buffer_format = net_flow::SoftmaxOpMetadata::expand_output_format_autos(vstream_params.user_buffer_format, op_input_format);
     if (HAILO_FORMAT_FLAGS_QUANTIZED & vstream_params.user_buffer_format.flags) {
         vstream_params.user_buffer_format.flags &= ~HAILO_FORMAT_FLAGS_QUANTIZED;
-        LOGGER__WARNING("Note: The output_vstream {} format flag is marked as quantized, which is not supported with {}. "
-            "flag has been automatically set to False.", softmax_op.output_vstream_info.name, softmax_op.op->get_name());
+        // TODO: Delete override when changing CLI default flags
+        LOGGER__WARNING("The output_vstream {} format flag is marked as quantized, which is not supported with {}. "
+            "flag has been automatically set to False.", vstreams_params_map.begin()->first, softmax_op_metadata->get_name());
     }
 
     auto pipeline_latency_accumulator = create_pipeline_latency_accumulator(vstream_params);
@@ -2376,7 +3423,7 @@ Expected<std::vector<OutputVStream>> VStreamsBuilderUtils::create_output_post_pr
     CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(post_infer_element.value(), pre_softmax_queue_element.value()));
 
     auto softmax_element = add_softmax_element(output_stream, pipeline_status, elements, "SoftmaxPostProcessElement",
-        vstream_params, softmax_op);
+        vstream_params, softmax_op_metadata, buffer_pool_size, std::chrono::milliseconds(HAILO_INFINITE), hw_read_stream_stats_flags, shutdown_event);
     CHECK_EXPECTED(softmax_element);
     CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(pre_softmax_queue_element.value(), softmax_element.value()));
     auto user_buffer_queue_element = add_user_buffer_queue_element(output_stream, pipeline_status, elements,
@@ -2384,7 +3431,9 @@ Expected<std::vector<OutputVStream>> VStreamsBuilderUtils::create_output_post_pr
     CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(softmax_element.value(), user_buffer_queue_element.value()));
     output_stream->set_timeout(std::chrono::milliseconds(HAILO_INFINITE));
     hw_read_queue_element->get()->set_timeout(std::chrono::milliseconds(HAILO_INFINITE));
-    auto vstream = OutputVStream::create(output_vstream_info, vstream_params, user_buffer_queue_element.release(), std::move(elements),
+
+    auto output_stream_base = std::static_pointer_cast<OutputStreamBase>(output_stream);
+    auto vstream = OutputVStream::create(output_vstream_info, output_stream_base->get_quant_infos(), vstream_params, user_buffer_queue_element.release(), std::move(elements),
         std::move(pipeline_status), shutdown_event, core_op_activated_event, pipeline_latency_accumulator.release());
     CHECK_EXPECTED(vstream);
     vstreams.emplace_back(vstream.release());
@@ -2412,74 +3461,121 @@ static bool are_formats_equal(const hailo_format_t &format1, const hailo_format_
 
 Expected<std::vector<OutputVStream>> VStreamsBuilderUtils::create_output_vstreams_from_streams(const OutputStreamWithParamsVector &all_output_streams,
     OutputStreamPtrVector &output_streams, const hailo_vstream_params_t &vstream_params,
-    const std::unordered_map<std::string, std::shared_ptr<NetFlowElement>> &post_process_ops,
+    const std::unordered_map<std::string, net_flow::PostProcessOpMetadataPtr> &post_process_ops_metadata,
     const std::unordered_map<stream_name_t, op_name_t> &op_inputs_to_op_name, const std::map<std::string, hailo_vstream_info_t> &output_vstream_infos_map)
 {
     auto first_stream_info = output_streams[0]->get_info();
-    if ((HAILO_FORMAT_ORDER_HAILO_NMS == first_stream_info.format.order) &&
-        (first_stream_info.nms_info.is_defused)) {
+    if ((HailoRTCommon::is_nms(first_stream_info)) && (first_stream_info.nms_info.is_defused)) {
         // Case defuse NMS
         return create_output_nms(output_streams, vstream_params, output_vstream_infos_map);
     } else if (contains(op_inputs_to_op_name, static_cast<stream_name_t>(first_stream_info.name))) {
         // Case post-process on host
         auto &op_name = op_inputs_to_op_name.at(first_stream_info.name);
-        auto &op = post_process_ops.at(op_name);
-        switch (op.get()->op_type) {
-            case HAILO_NET_FLOW_OP_TYPE_NMS:
-            {
-                assert(1 <= op->op->outputs_metadata().size());
-                auto updated_outputs_metadata = op->op->outputs_metadata();
-                updated_outputs_metadata.begin()->second.format = vstream_params.user_buffer_format;
-                if (HAILO_FORMAT_ORDER_AUTO == updated_outputs_metadata.begin()->second.format.order) {
-                    updated_outputs_metadata.begin()->second.format.order = HAILO_FORMAT_ORDER_HAILO_NMS;
-                }
-                if (HAILO_FORMAT_TYPE_AUTO == updated_outputs_metadata.begin()->second.format.type) {
-                    updated_outputs_metadata.begin()->second.format.type = HAILO_FORMAT_TYPE_FLOAT32;
-                }
-                if (HAILO_FORMAT_FLAGS_QUANTIZED & updated_outputs_metadata.begin()->second.format.flags) {
-                    updated_outputs_metadata.begin()->second.format.flags &= ~HAILO_FORMAT_FLAGS_QUANTIZED;
-                    LOGGER__WARNING("Note: The output_vstream {} format flag is marked as quantized, which is not supported with {}. "
-                        "flag has been automatically set to False.", op->output_vstream_info.name, op->op->get_name());
-                }
-
-                op->op->set_outputs_metadata(updated_outputs_metadata);
-                CHECK_SUCCESS_AS_EXPECTED(op->op->validate_metadata());
-                return create_output_post_process_nms(output_streams, vstream_params, output_vstream_infos_map, *op);
+        auto &op_metadata = post_process_ops_metadata.at(op_name);
+        switch (op_metadata->type()) {
+        case net_flow::OperationType::YOLOX:
+        case net_flow::OperationType::SSD:
+        case net_flow::OperationType::YOLOV5:
+        case net_flow::OperationType::YOLOV5SEG:
+        case net_flow::OperationType::IOU:
+        {
+            assert(1 <= op_metadata->outputs_metadata().size());
+            auto updated_outputs_metadata = op_metadata->outputs_metadata();
+            updated_outputs_metadata.begin()->second.format =
+                net_flow::NmsOpMetadata::expand_output_format_autos_by_op_type(vstream_params.user_buffer_format, op_metadata->type());
+            if (HAILO_FORMAT_FLAGS_QUANTIZED & updated_outputs_metadata.begin()->second.format.flags) {
+                updated_outputs_metadata.begin()->second.format.flags &= ~HAILO_FORMAT_FLAGS_QUANTIZED;
+                // TODO: Delete override when changing CLI default flags
+                LOGGER__WARNING("The output_vstream {} format flag is marked as quantized, which is not supported with {}. "
+                    "flag has been automatically set to False.", updated_outputs_metadata.begin()->first, op_metadata->get_name());
             }
+            op_metadata->set_outputs_metadata(updated_outputs_metadata);
+            CHECK_SUCCESS_AS_EXPECTED(op_metadata->validate_format_info());
 
-            case HAILO_NET_FLOW_OP_TYPE_ARGMAX:
+            std::shared_ptr<hailort::net_flow::Op> op;
+            switch (op_metadata->type()) {
+            case (net_flow::OperationType::YOLOX):
+            {
+                auto metadata = std::dynamic_pointer_cast<net_flow::YoloxOpMetadata>(op_metadata);
+                assert(nullptr != metadata);
+                auto op_expected = net_flow::YOLOXPostProcessOp::create(metadata);
+                CHECK_EXPECTED(op_expected);
+                op = op_expected.release();
+                break;
+            }
+            case (net_flow::OperationType::YOLOV5):
+            {
+                auto metadata = std::dynamic_pointer_cast<net_flow::Yolov5OpMetadata>(op_metadata);
+                assert(nullptr != metadata);
+                auto op_expected = net_flow::YOLOv5PostProcessOp::create(metadata);
+                CHECK_EXPECTED(op_expected);
+                op = op_expected.release();
+                break;
+            }
+            case (net_flow::OperationType::YOLOV5SEG):
+            {
+                auto metadata = std::dynamic_pointer_cast<net_flow::Yolov5SegOpMetadata>(op_metadata);
+                assert(nullptr != metadata);
+                auto op_expected = net_flow::Yolov5SegPostProcess::create(metadata);
+                CHECK_EXPECTED(op_expected);
+                op = op_expected.release();
+                break;
+            }
+            case (net_flow::OperationType::SSD):
+            {
+                auto metadata = std::dynamic_pointer_cast<net_flow::SSDOpMetadata>(op_metadata);
+                assert(nullptr != metadata);
+                auto op_expected = net_flow::SSDPostProcessOp::create(metadata);
+                CHECK_EXPECTED(op_expected);
+                op = op_expected.release();
+                break;
+            }
+            case (net_flow::OperationType::IOU):
             {
-                assert(output_streams.size() == 1);
-                NameToVStreamParamsMap name_to_vstream_params_map;
-                for (auto &output_stream : all_output_streams) {
-                    if (output_stream.first->get_info().name == output_streams[0]->get_info().name) {
-                        for (auto &vstream : output_stream.second) {
-                            name_to_vstream_params_map.insert(vstream);
-                        }
+                return create_output_post_process_iou(output_streams[0], vstream_params, op_metadata);
+            }
+            default:
+                break;
+            }
+
+            return create_output_post_process_nms(output_streams, vstream_params, output_vstream_infos_map, op);
+        }
+
+        case net_flow::OperationType::ARGMAX:
+        {
+            assert(output_streams.size() == 1);
+            NameToVStreamParamsMap name_to_vstream_params_map;
+            for (auto &output_stream : all_output_streams) {
+                if (output_stream.first->get_info().name == output_streams[0]->get_info().name) {
+                    for (auto &vstream : output_stream.second) {
+                        name_to_vstream_params_map.insert(vstream);
                     }
                 }
-                auto output_vstream_info = output_vstream_infos_map.at(op.get()->name);
-                return create_output_post_process_argmax(output_streams[0], name_to_vstream_params_map, output_vstream_info, *op);
             }
+            auto output_vstream_info = op_metadata->get_output_vstream_info();
+            CHECK_EXPECTED(output_vstream_info);
+            return create_output_post_process_argmax(output_streams[0], name_to_vstream_params_map, output_vstream_info.release(), op_metadata);
+        }
 
-             case HAILO_NET_FLOW_OP_TYPE_SOFTMAX:
-            {
-                assert(output_streams.size() == 1);
-                NameToVStreamParamsMap name_to_vstream_params_map;
-                for (auto &output_stream : all_output_streams) {
-                    if (output_stream.first->get_info().name == output_streams[0]->get_info().name) {
-                        for (auto &vstream : output_stream.second) {
-                            name_to_vstream_params_map.insert(vstream);
-                        }
+        case net_flow::OperationType::SOFTMAX:
+        {
+            assert(output_streams.size() == 1);
+            NameToVStreamParamsMap name_to_vstream_params_map;
+            for (auto &output_stream : all_output_streams) {
+                if (output_stream.first->get_info().name == output_streams[0]->get_info().name) {
+                    for (auto &vstream : output_stream.second) {
+                        name_to_vstream_params_map.insert(vstream);
                     }
                 }
-                auto output_vstream_info = output_vstream_infos_map.at(op.get()->name);
-                return create_output_post_process_softmax(output_streams[0], name_to_vstream_params_map, output_vstream_info, *op);
-             }
+            }
+            auto output_vstream_info = op_metadata->get_output_vstream_info();
+            CHECK_EXPECTED(output_vstream_info);
+            return create_output_post_process_softmax(output_streams[0], name_to_vstream_params_map, output_vstream_info.release(), op_metadata);
+            }
 
-            default:
-                LOGGER__ERROR("op type {} of op {} is not in any of the supported post process OP types", op.get()->op_type, op_name);
-                return make_unexpected(HAILO_INVALID_OPERATION);
+        default:
+            LOGGER__ERROR("op type {} of op {} is not in any of the supported post process OP types", net_flow::OpMetadata::get_operation_type_str(op_metadata->type()), op_name);
+            return make_unexpected(HAILO_INVALID_OPERATION);
         }
     } else {
         // All other cases
@@ -2505,8 +3601,9 @@ Expected<std::vector<OutputVStream>> VStreamsBuilderUtils::create_output_nms(Out
             HAILO_INVALID_ARGUMENT, "All nms streams of the same virtual output must have the same format");
     }
 
-    auto shutdown_event = Event::create_shared(Event::State::not_signalled);
-    CHECK_AS_EXPECTED(nullptr != shutdown_event, HAILO_OUT_OF_HOST_MEMORY);
+    auto shutdown_event_exp = Event::create_shared(Event::State::not_signalled);
+    CHECK_EXPECTED(shutdown_event_exp);
+    auto shutdown_event = shutdown_event_exp.release();
 
     auto pipeline_status = make_shared_nothrow<std::atomic<hailo_status>>(HAILO_SUCCESS);
     CHECK_AS_EXPECTED(nullptr != pipeline_status, HAILO_OUT_OF_HOST_MEMORY);
@@ -2528,10 +3625,11 @@ Expected<std::vector<OutputVStream>> VStreamsBuilderUtils::create_output_nms(Out
 Expected<std::vector<OutputVStream>> VStreamsBuilderUtils::create_output_post_process_nms(OutputStreamPtrVector &output_streams,
     hailo_vstream_params_t vstreams_params,
     const std::map<std::string, hailo_vstream_info_t> &output_vstream_infos,
-    const NetFlowElement &nms_op)
+    const std::shared_ptr<hailort::net_flow::Op> &nms_op)
 {
-    auto shutdown_event = Event::create_shared(Event::State::not_signalled);
-    CHECK_AS_EXPECTED(nullptr != shutdown_event, HAILO_OUT_OF_HOST_MEMORY);
+    auto shutdown_event_exp = Event::create_shared(Event::State::not_signalled);
+    CHECK_EXPECTED(shutdown_event_exp);
+    auto shutdown_event = shutdown_event_exp.release();
 
     auto pipeline_status = make_shared_nothrow<std::atomic<hailo_status>>(HAILO_SUCCESS);
     CHECK_AS_EXPECTED(nullptr != pipeline_status, HAILO_OUT_OF_HOST_MEMORY);
@@ -2577,17 +3675,25 @@ Expected<std::shared_ptr<PullQueueElement>> VStreamsBuilderUtils::add_pull_queue
 
 Expected<std::shared_ptr<ArgmaxPostProcessElement>> VStreamsBuilderUtils::add_argmax_element(std::shared_ptr<OutputStream> &output_stream,
     std::shared_ptr<std::atomic<hailo_status>> &pipeline_status, std::vector<std::shared_ptr<PipelineElement>> &elements,
-    const std::string &element_name, hailo_vstream_params_t &vstream_params, const NetFlowElement &argmax_op)
+    const std::string &element_name, hailo_vstream_params_t &vstream_params, const net_flow::PostProcessOpMetadataPtr &argmax_op_metadata,
+    size_t buffer_pool_size, std::chrono::milliseconds timeout, const hailo_vstream_stats_flags_t &vstream_flags, EventPtr &shutdown_event)
 {
     // Updating metadata according to user request. TODO: HRT-9737
-    auto updated_outputs_metadata = argmax_op.op.get()->outputs_metadata();
+    auto updated_outputs_metadata = argmax_op_metadata.get()->outputs_metadata();
     updated_outputs_metadata.begin()->second.format = vstream_params.user_buffer_format;
-    argmax_op.op.get()->set_outputs_metadata(updated_outputs_metadata);
-    CHECK_SUCCESS_AS_EXPECTED(argmax_op.op.get()->validate_metadata());
+    auto metadata = std::dynamic_pointer_cast<net_flow::ArgmaxOpMetadata>(argmax_op_metadata);
+    assert(nullptr != metadata);
+    metadata->set_outputs_metadata(updated_outputs_metadata);
+    CHECK_SUCCESS_AS_EXPECTED(metadata->validate_format_info());
     // Updating metadata according to use request. TODO: HRT-9737 - End
-    auto argmax_element = ArgmaxPostProcessElement::create(argmax_op.op,
+
+    auto op_expected = net_flow::ArgmaxPostProcessOp::create(metadata);
+    CHECK_EXPECTED(op_expected);
+    auto argmax_op = op_expected.release();
+
+    auto argmax_element = ArgmaxPostProcessElement::create(argmax_op,
         PipelineObject::create_element_name(element_name, output_stream->name(), output_stream->get_info().index),
-        vstream_params.pipeline_elements_stats_flags, pipeline_status);
+        vstream_params.pipeline_elements_stats_flags, pipeline_status, buffer_pool_size, timeout, vstream_flags, shutdown_event);
     CHECK_EXPECTED(argmax_element);
     elements.push_back(argmax_element.value());
     return argmax_element;
@@ -2595,27 +3701,81 @@ Expected<std::shared_ptr<ArgmaxPostProcessElement>> VStreamsBuilderUtils::add_ar
 
 Expected<std::shared_ptr<SoftmaxPostProcessElement>> VStreamsBuilderUtils::add_softmax_element(std::shared_ptr<OutputStream> &output_stream,
     std::shared_ptr<std::atomic<hailo_status>> &pipeline_status, std::vector<std::shared_ptr<PipelineElement>> &elements,
-    const std::string &element_name, hailo_vstream_params_t &vstream_params, const NetFlowElement &softmax_op)
+    const std::string &element_name, hailo_vstream_params_t &vstream_params, const net_flow::PostProcessOpMetadataPtr &softmax_op_metadata,
+    size_t buffer_pool_size, std::chrono::milliseconds timeout, const hailo_vstream_stats_flags_t &vstream_flags, EventPtr &shutdown_event)
 {
     // Updating metadata according to user request. TODO: HRT-9737
     // Currently softmax only supports inputs to be float32 and order NHWC or NC
-    auto updated_inputs_metadata = softmax_op.op.get()->inputs_metadata();
+    auto updated_inputs_metadata = softmax_op_metadata.get()->inputs_metadata();
     updated_inputs_metadata.begin()->second.format = vstream_params.user_buffer_format;
-    softmax_op.op.get()->set_inputs_metadata(updated_inputs_metadata);
-
-    auto updated_outputs_metadata = softmax_op.op.get()->outputs_metadata();
+    auto updated_outputs_metadata = softmax_op_metadata.get()->outputs_metadata();
     updated_outputs_metadata.begin()->second.format = vstream_params.user_buffer_format;
-    softmax_op.op.get()->set_outputs_metadata(updated_outputs_metadata);
-    CHECK_SUCCESS_AS_EXPECTED(softmax_op.op.get()->validate_metadata());
+    auto metadata = std::dynamic_pointer_cast<net_flow::SoftmaxOpMetadata>(softmax_op_metadata);
+    assert(nullptr != metadata);
+    metadata->set_outputs_metadata(updated_outputs_metadata);
+    metadata->set_inputs_metadata(updated_inputs_metadata);
+    CHECK_SUCCESS_AS_EXPECTED(metadata->validate_format_info());
     // Updating metadata according to use request. TODO: HRT-9737 - End
-    auto softmax_element = SoftmaxPostProcessElement::create(softmax_op.op,
+
+    auto op_expected = net_flow::SoftmaxPostProcessOp::create(metadata);
+    CHECK_EXPECTED(op_expected);
+    auto softmax_op = op_expected.release();
+    auto softmax_element = SoftmaxPostProcessElement::create(softmax_op,
         PipelineObject::create_element_name(element_name, output_stream->name(), output_stream->get_info().index),
-        vstream_params.pipeline_elements_stats_flags, pipeline_status);
+        vstream_params.pipeline_elements_stats_flags, pipeline_status, buffer_pool_size, timeout, vstream_flags, shutdown_event);
     CHECK_EXPECTED(softmax_element);
     elements.push_back(softmax_element.value());
     return softmax_element;
 }
 
+Expected<std::shared_ptr<ConvertNmsToDetectionsElement>> VStreamsBuilderUtils::add_nms_to_detections_convert_element(std::shared_ptr<OutputStream> &output_stream,
+    std::shared_ptr<std::atomic<hailo_status>> &pipeline_status, std::vector<std::shared_ptr<PipelineElement>> &elements,
+    const std::string &element_name, hailo_vstream_params_t &vstream_params, const net_flow::PostProcessOpMetadataPtr &op_metadata,
+    size_t buffer_pool_size, std::chrono::milliseconds timeout, const hailo_vstream_stats_flags_t &vstream_flags, EventPtr &shutdown_event)
+{
+    auto metadata = std::dynamic_pointer_cast<net_flow::NmsOpMetadata>(op_metadata);
+    assert(nullptr != metadata);
+
+    auto nms_to_detections_element = ConvertNmsToDetectionsElement::create(metadata->nms_info(),
+        PipelineObject::create_element_name(element_name, output_stream->name(), output_stream->get_info().index),
+        vstream_params.pipeline_elements_stats_flags, pipeline_status, timeout, vstream_flags, shutdown_event, buffer_pool_size);
+    CHECK_EXPECTED(nms_to_detections_element);
+    elements.push_back(nms_to_detections_element.value());
+    return nms_to_detections_element;
+}
+
+Expected<std::shared_ptr<RemoveOverlappingBboxesElement>> VStreamsBuilderUtils::add_remove_overlapping_bboxes_element(std::shared_ptr<OutputStream> &output_stream,
+    std::shared_ptr<std::atomic<hailo_status>> &pipeline_status, std::vector<std::shared_ptr<PipelineElement>> &elements,
+    const std::string &element_name, hailo_vstream_params_t &vstream_params, const net_flow::PostProcessOpMetadataPtr &op_metadata,
+    size_t buffer_pool_size, std::chrono::milliseconds timeout, const hailo_vstream_stats_flags_t &vstream_flags, EventPtr &shutdown_event)
+{
+    auto metadata = std::dynamic_pointer_cast<net_flow::NmsOpMetadata>(op_metadata);
+    assert(nullptr != metadata);
+
+    auto remove_overlapping_bboxes_element = RemoveOverlappingBboxesElement::create(metadata->nms_config(),
+        PipelineObject::create_element_name(element_name, output_stream->name(), output_stream->get_info().index),
+        vstream_params.pipeline_elements_stats_flags, pipeline_status, timeout, vstream_flags, shutdown_event, buffer_pool_size);
+    CHECK_EXPECTED(remove_overlapping_bboxes_element);
+    elements.push_back(remove_overlapping_bboxes_element.value());
+    return remove_overlapping_bboxes_element;
+}
+
+Expected<std::shared_ptr<FillNmsFormatElement>> VStreamsBuilderUtils::add_fill_nms_format_element(std::shared_ptr<OutputStream> &output_stream,
+    std::shared_ptr<std::atomic<hailo_status>> &pipeline_status, std::vector<std::shared_ptr<PipelineElement>> &elements,
+    const std::string &element_name, hailo_vstream_params_t &vstream_params, const net_flow::PostProcessOpMetadataPtr &op_metadata,
+    size_t buffer_pool_size, std::chrono::milliseconds timeout, const hailo_vstream_stats_flags_t &vstream_flags, EventPtr &shutdown_event)
+{
+    auto metadata = std::dynamic_pointer_cast<net_flow::NmsOpMetadata>(op_metadata);
+    assert(nullptr != metadata);
+
+    auto fill_nms_format_element = FillNmsFormatElement::create(metadata->nms_info(), vstream_params.user_buffer_format, metadata->nms_config(),
+        PipelineObject::create_element_name(element_name, output_stream->name(), output_stream->get_info().index),
+        vstream_params.pipeline_elements_stats_flags, pipeline_status, timeout, vstream_flags, shutdown_event, buffer_pool_size);
+    CHECK_EXPECTED(fill_nms_format_element);
+    elements.push_back(fill_nms_format_element.value());
+    return fill_nms_format_element;
+}
+
 Expected<std::shared_ptr<UserBufferQueueElement>> VStreamsBuilderUtils::add_user_buffer_queue_element(std::shared_ptr<OutputStream> &output_stream,
     std::shared_ptr<std::atomic<hailo_status>> &pipeline_status, std::vector<std::shared_ptr<PipelineElement>> &elements,
     const std::string &element_name, EventPtr &shutdown_event, const hailo_vstream_params_t &vstream_params)
@@ -2632,8 +3792,9 @@ Expected<std::shared_ptr<PostInferElement>> VStreamsBuilderUtils::add_post_infer
     std::shared_ptr<std::atomic<hailo_status>> &pipeline_status, std::vector<std::shared_ptr<PipelineElement>> &elements,
     const std::string &element_name, const hailo_vstream_params_t &vstream_params, EventPtr shutdown_event)
 {
+    auto output_stream_base = std::static_pointer_cast<OutputStreamBase>(output_stream);
     auto post_infer_element = PostInferElement::create(output_stream->get_info().hw_shape, output_stream->get_info().format,
-        output_stream->get_info().shape, vstream_params.user_buffer_format, output_stream->get_info().quant_info, output_stream->get_info().nms_info,
+        output_stream->get_info().shape, vstream_params.user_buffer_format, output_stream_base->get_quant_infos(), output_stream->get_info().nms_info,
         PipelineObject::create_element_name(element_name, output_stream->name(), output_stream->get_info().index),
         vstream_params, pipeline_status, shutdown_event);
     CHECK_EXPECTED(post_infer_element);
@@ -2642,7 +3803,8 @@ Expected<std::shared_ptr<PostInferElement>> VStreamsBuilderUtils::add_post_infer
 }
 
 Expected<std::vector<OutputVStream>> VStreamsBuilderUtils::create_output_post_process_argmax(std::shared_ptr<OutputStream> output_stream,
-    const NameToVStreamParamsMap &vstreams_params_map, const hailo_vstream_info_t &output_vstream_info, const NetFlowElement &argmax_op)
+    const NameToVStreamParamsMap &vstreams_params_map, const hailo_vstream_info_t &output_vstream_info,
+    const net_flow::PostProcessOpMetadataPtr &argmax_op_metadata)
 {
     std::vector<std::shared_ptr<PipelineElement>> elements;
     std::vector<OutputVStream> vstreams;
@@ -2652,8 +3814,9 @@ Expected<std::vector<OutputVStream>> VStreamsBuilderUtils::create_output_post_pr
         core_op_activated_event = output_stream->get_core_op_activated_event();
     }
 
-    auto shutdown_event = Event::create_shared(Event::State::not_signalled);
-    CHECK_AS_EXPECTED(nullptr != shutdown_event, HAILO_OUT_OF_HOST_MEMORY);
+    auto shutdown_event_exp = Event::create_shared(Event::State::not_signalled);
+    CHECK_EXPECTED(shutdown_event_exp);
+    auto shutdown_event = shutdown_event_exp.release();
 
     auto pipeline_status = make_shared_nothrow<std::atomic<hailo_status>>(HAILO_SUCCESS);
     CHECK_AS_EXPECTED(nullptr != pipeline_status, HAILO_OUT_OF_HOST_MEMORY);
@@ -2680,8 +3843,9 @@ Expected<std::vector<OutputVStream>> VStreamsBuilderUtils::create_output_post_pr
     CHECK_EXPECTED(hw_read_element);
 
     assert(1 == vstreams_params_map.size());
-    auto op_input_format = argmax_op.op->inputs_metadata().begin()->second.format;
-    auto vstream_params = expand_vstream_params_autos_argmax(vstreams_params_map.begin()->second, op_input_format);
+    auto op_input_format = argmax_op_metadata->inputs_metadata().begin()->second.format;
+    auto vstream_params = vstreams_params_map.begin()->second;
+    vstream_params.user_buffer_format = net_flow::ArgmaxOpMetadata::expand_output_format_autos(vstream_params.user_buffer_format, op_input_format);
 
     auto hw_read_queue_element = add_pull_queue_element(output_stream, pipeline_status, elements, "PullQueueElement_hw_read",
         shutdown_event, vstream_params);
@@ -2690,7 +3854,7 @@ Expected<std::vector<OutputVStream>> VStreamsBuilderUtils::create_output_post_pr
     CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(hw_read_element.value(), hw_read_queue_element.value()));
 
     auto argmax_element = add_argmax_element(output_stream, pipeline_status, elements, "ArgmaxPostProcessElement",
-        vstream_params, argmax_op);
+        vstream_params, argmax_op_metadata, buffer_pool_size, std::chrono::milliseconds(HAILO_INFINITE), hw_read_stream_stats_flags, shutdown_event);
     CHECK_EXPECTED(argmax_element);
 
     CHECK_SUCCESS_AS_EXPECTED(PipelinePad::link_pads(hw_read_queue_element.value(), argmax_element.value()));
@@ -2706,7 +3870,8 @@ Expected<std::vector<OutputVStream>> VStreamsBuilderUtils::create_output_post_pr
 
     output_stream->set_timeout(std::chrono::milliseconds(HAILO_INFINITE));
     hw_read_queue_element->get()->set_timeout(std::chrono::milliseconds(HAILO_INFINITE));
-    auto vstream = OutputVStream::create(output_vstream_info, vstream_params, post_argmax_queue_element.release(), std::move(elements),
+    auto output_stream_base = std::static_pointer_cast<OutputStreamBase>(output_stream);
+    auto vstream = OutputVStream::create(output_vstream_info, output_stream_base->get_quant_infos(), vstream_params, post_argmax_queue_element.release(), std::move(elements),
         std::move(pipeline_status), shutdown_event, core_op_activated_event, pipeline_latency_accumulator.release());
     CHECK_EXPECTED(vstream);
     vstreams.emplace_back(vstream.release());
@@ -2718,6 +3883,84 @@ Expected<std::vector<OutputVStream>> VStreamsBuilderUtils::create_output_post_pr
     return vstreams;
 }
 
+hailo_status VStreamsBuilderUtils::handle_pix_buffer_splitter_flow(std::vector<std::shared_ptr<InputStream>> streams,
+    const hailo_vstream_info_t &vstream_info, std::vector<std::shared_ptr<PipelineElement>> &&base_elements,
+    std::vector<InputVStream> &vstreams, const hailo_vstream_params_t &vstream_params, EventPtr shutdown_event,
+    std::shared_ptr<std::atomic<hailo_status>> pipeline_status, EventPtr &core_op_activated_event,
+    AccumulatorPtr accumalator)
+{
+    // sorting the streams based on their plane index
+    auto compartor = [](std::shared_ptr<InputStream> a, std::shared_ptr<InputStream> b) {
+        return static_cast<InputStreamBase&>(*a).get_layer_info().plane_index <
+        static_cast<InputStreamBase&>(*b).get_layer_info().plane_index; };
+    std::sort(streams.begin(), streams.end(), compartor);
+
+    auto duration_collector_expected = DurationCollector::create(vstream_params.pipeline_elements_stats_flags);
+    CHECK_EXPECTED_AS_STATUS(duration_collector_expected);
+
+    auto planes_splitter = PixBufferElement::create(PipelineObject::create_element_name("PixBufferElement",
+        vstream_info.name, 0), std::chrono::milliseconds(HAILO_INFINITE), duration_collector_expected.release(),
+        pipeline_status, streams.size(), vstream_info.format.order);
+    CHECK_EXPECTED_AS_STATUS(planes_splitter);
+    base_elements.push_back(planes_splitter.value());
+
+    uint32_t stream_number = 0;
+
+    for (const auto &stream : streams){
+         auto hw_write_elem = HwWriteElement::create(stream,
+            PipelineObject::create_element_name("HwWriteElement", stream->name(), stream->get_info().index),
+            vstream_params.pipeline_elements_stats_flags, pipeline_status);
+        CHECK_EXPECTED_AS_STATUS(hw_write_elem);
+        base_elements.insert(base_elements.begin(), hw_write_elem.value());
+
+        auto &stream_info = stream->get_info();
+        auto &src_image_shape = stream_info.shape;
+        auto &dst_image_shape = stream_info.hw_shape;
+        auto &dst_format = stream_info.format;
+        auto src_format = vstream_params.user_buffer_format;
+        /* the format order of each plane (stream) is determined by the stream's order.
+            type and flags are determined by the vstream params */
+        src_format.order = dst_format.order;
+        auto quant_infos = std::vector<hailo_quant_info_t>{stream_info.quant_info};
+
+        auto should_transform_expected = InputTransformContext::is_transformation_required(src_image_shape, src_format,
+            dst_image_shape, dst_format, quant_infos);
+        CHECK_EXPECTED_AS_STATUS(should_transform_expected);
+
+        if(should_transform_expected.value()){
+            auto pre_infer_elem = PreInferElement::create(src_image_shape, src_format,
+                dst_image_shape, dst_format, quant_infos, PipelineObject::create_element_name( "PreInferElement",
+                stream->get_info().name, stream->get_info().index), vstream_params, shutdown_event, pipeline_status);
+
+            CHECK_EXPECTED_AS_STATUS(pre_infer_elem);
+            base_elements.push_back(pre_infer_elem.value());
+
+            auto queue_elem = PushQueueElement::create(
+                PipelineObject::create_element_name("PushQueueElement", stream_info.name, stream_info.index),
+                vstream_params, shutdown_event, pipeline_status);
+
+            CHECK_EXPECTED_AS_STATUS(queue_elem);
+            base_elements.push_back((queue_elem.value()));
+
+            CHECK_SUCCESS(PipelinePad::link_pads(planes_splitter.value(), pre_infer_elem.value(), stream_number, 0));
+            CHECK_SUCCESS(PipelinePad::link_pads(pre_infer_elem.value(), queue_elem.value()));
+            CHECK_SUCCESS(PipelinePad::link_pads(queue_elem.value(), *hw_write_elem));
+        } else {
+            CHECK_SUCCESS(PipelinePad::link_pads(planes_splitter.value(), *hw_write_elem, stream_number, 0));
+
+        }
+        stream_number++;
+    }
+
+    auto vstream = InputVStream::create(vstream_info, { vstream_info.quant_info }, vstream_params, planes_splitter.value(),
+        nullptr, std::move(base_elements), std::move(pipeline_status), shutdown_event,
+        core_op_activated_event, accumalator);
+    CHECK_EXPECTED_AS_STATUS(vstream);
+    vstreams.emplace_back(vstream.release());
+
+    return HAILO_SUCCESS;
+}
+
 hailo_status VStreamsBuilderUtils::add_demux(std::shared_ptr<OutputStream> output_stream, NameToVStreamParamsMap &vstreams_params_map,
     std::vector<std::shared_ptr<PipelineElement>> &&base_elements, std::vector<OutputVStream> &vstreams,
     std::shared_ptr<HwReadElement> hw_read_elem, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
@@ -2779,17 +4022,17 @@ hailo_status VStreamsBuilderUtils::add_demux(std::shared_ptr<OutputStream> outpu
         current_vstream_elements.push_back(demux_queue_elem.value());
         CHECK_SUCCESS(PipelinePad::link_pads(demux_elem.value(), demux_queue_elem.value(), i, 0));
 
-        demux_queue_elem.value()->set_timeout(HAILO_INFINITE_TIMEOUT);
+        CHECK_SUCCESS(demux_queue_elem.value()->set_timeout(HAILO_INFINITE_TIMEOUT));
 
         auto pipeline_latency_accumulator = create_pipeline_latency_accumulator(vstream_params);
         CHECK_EXPECTED_AS_STATUS(pipeline_latency_accumulator);
-
         auto should_transform = OutputTransformContext::is_transformation_required(edge_info.hw_shape, 
-            edge_info.format, edge_info.shape, vstream_params.user_buffer_format, edge_info.quant_info);
+            edge_info.format, edge_info.shape, vstream_params.user_buffer_format, std::vector<hailo_quant_info_t>{edge_info.quant_info}); // TODO: Get quant vector (HRT-11077)
+        CHECK_EXPECTED_AS_STATUS(should_transform);
 
-        if (should_transform) {
+        if (should_transform.value()) {
             auto post_infer_elem = PostInferElement::create(edge_info.hw_shape, edge_info.format, 
-                edge_info.shape, vstream_params.user_buffer_format, edge_info.quant_info, edge_info.nms_info,
+                edge_info.shape, vstream_params.user_buffer_format, { edge_info.quant_info }, edge_info.nms_info, // TODO: Get quant vector (HRT-11077)
                 PipelineObject::create_element_name("PostInferElement", edge_info.name, edge_info.index),
                 vstream_params, pipeline_status, shutdown_event);
             CHECK_EXPECTED_AS_STATUS(post_infer_elem);
@@ -2803,7 +4046,8 @@ hailo_status VStreamsBuilderUtils::add_demux(std::shared_ptr<OutputStream> outpu
             current_vstream_elements.push_back(post_infer_queue_elem.value());
             CHECK_SUCCESS(PipelinePad::link_pads(post_infer_elem.value(), post_infer_queue_elem.value()));
 
-            auto vstream = OutputVStream::create(vstream_info->second, vstream_params, post_infer_queue_elem.release(), std::move(current_vstream_elements),
+            // TODO: Replace output_stream->get_quant_infos() with mux quant info
+            auto vstream = OutputVStream::create(vstream_info->second, output_stream->get_quant_infos(), vstream_params, post_infer_queue_elem.release(), std::move(current_vstream_elements), // TODO: Get quant vector (HRT-11077)
                 std::move(pipeline_status_copy), shutdown_event, core_op_activated_event, pipeline_latency_accumulator.release());
             CHECK_EXPECTED_AS_STATUS(vstream);
             vstreams.emplace_back(vstream.release());
@@ -2811,12 +4055,13 @@ hailo_status VStreamsBuilderUtils::add_demux(std::shared_ptr<OutputStream> outpu
             // TODO: HRT-4179
             auto user_copy_elem = CopyBufferElement::create(
                 PipelineObject::create_element_name("CopyBufferElement", edge_info.name, edge_info.index),
-                pipeline_status);
+                pipeline_status, std::chrono::milliseconds(vstream_params.timeout_ms));
             CHECK_EXPECTED_AS_STATUS(user_copy_elem);
             current_vstream_elements.push_back(user_copy_elem.value());
             CHECK_SUCCESS(PipelinePad::link_pads(demux_queue_elem.value(), user_copy_elem.value()));
 
-            auto vstream = OutputVStream::create(vstream_info->second, vstream_params, user_copy_elem.release(), std::move(current_vstream_elements),
+            // TODO: Replace output_stream->get_quant_infos() with mux quant info
+            auto vstream = OutputVStream::create(vstream_info->second, { edge_info.quant_info }, vstream_params, user_copy_elem.release(), std::move(current_vstream_elements), // TODO: Get quant vector (HRT-11077)
                 std::move(pipeline_status_copy), shutdown_event, core_op_activated_event, pipeline_latency_accumulator.release());
             CHECK_EXPECTED_AS_STATUS(vstream);
             vstreams.emplace_back(vstream.release());
@@ -2881,14 +4126,15 @@ hailo_status VStreamsBuilderUtils::add_nms_fuse(OutputStreamPtrVector &output_st
     CHECK_EXPECTED_AS_STATUS(pipeline_latency_accumulator);
 
     auto should_transform = OutputTransformContext::is_transformation_required({}, src_stream_format, {},
-        vstreams_params.user_buffer_format, vstream_info->second.quant_info);
-    
+        vstreams_params.user_buffer_format, std::vector<hailo_quant_info_t>{vstream_info->second.quant_info}); // TODO: Get quant vector (HRT-11078)
+    CHECK_EXPECTED_AS_STATUS(should_transform);
+
     EventPtr core_op_activated_event = nullptr;
     if (!output_streams[0]->is_scheduled()) {
         core_op_activated_event = output_streams[0]->get_core_op_activated_event();
     }
 
-    if (should_transform) {
+    if (should_transform.value()) {
         auto nms_queue_elem = PullQueueElement::create(
             PipelineObject::create_element_name("PullQueueElement_nms", fused_layer_name, 0),
             vstreams_params, shutdown_event, pipeline_status);
@@ -2898,7 +4144,7 @@ hailo_status VStreamsBuilderUtils::add_nms_fuse(OutputStreamPtrVector &output_st
         CHECK_SUCCESS(PipelinePad::link_pads(nms_elem.value(), nms_queue_elem.value()));
 
         auto post_infer_elem = PostInferElement::create({}, src_stream_format,
-            {}, vstreams_params.user_buffer_format, vstream_info->second.quant_info, fused_layer_nms_info,
+            {}, vstreams_params.user_buffer_format, { vstream_info->second.quant_info }, fused_layer_nms_info, // TODO: Get quant vector (HRT-11078)
             PipelineObject::create_element_name("PostInferElement", fused_layer_name, 0), vstreams_params, pipeline_status,
             shutdown_event);
         CHECK_EXPECTED_AS_STATUS(post_infer_elem);
@@ -2913,12 +4159,14 @@ hailo_status VStreamsBuilderUtils::add_nms_fuse(OutputStreamPtrVector &output_st
         elements.push_back(post_infer_queue_elem.value());
         CHECK_SUCCESS(PipelinePad::link_pads(post_infer_elem.value(), post_infer_queue_elem.value()));
 
-        auto vstream = OutputVStream::create(vstream_info->second, vstreams_params, post_infer_queue_elem.release(), std::move(elements),
+        // TODO: Check with SDK where should we take the quant infos from (output_streams[0]->get_quant_infos() might be good) (HRT-11078)
+        auto vstream = OutputVStream::create(vstream_info->second, output_streams[0]->get_quant_infos(), vstreams_params, post_infer_queue_elem.release(), std::move(elements), // TODO: Get quant vector (HRT-11078)
             std::move(pipeline_status), shutdown_event, core_op_activated_event, pipeline_latency_accumulator.release());
         CHECK_EXPECTED_AS_STATUS(vstream);
         vstreams.emplace_back(vstream.release());
     } else {
-        auto vstream = OutputVStream::create(vstream_info->second, vstreams_params, nms_elem.release(), std::move(elements),
+        // TODO: Check with SDK where should we take the quant infos from (output_streams[0]->get_quant_infos() might be good) (HRT-11078)
+        auto vstream = OutputVStream::create(vstream_info->second, output_streams[0]->get_quant_infos(), vstreams_params, nms_elem.release(), std::move(elements), // TODO: Get quant vector (HRT-11078)
             std::move(pipeline_status), shutdown_event, core_op_activated_event, pipeline_latency_accumulator.release());
         CHECK_EXPECTED_AS_STATUS(vstream);
         vstreams.emplace_back(vstream.release());
@@ -2931,23 +4179,18 @@ hailo_status VStreamsBuilderUtils::add_nms_post_process(OutputStreamPtrVector &o
     std::vector<std::shared_ptr<PipelineElement>> &elements, std::vector<OutputVStream> &vstreams,
     EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
     const std::map<std::string, hailo_vstream_info_t> &output_vstream_infos,
-    const NetFlowElement &nms_op)
+    const std::shared_ptr<hailort::net_flow::Op> &nms_op)
 {
     auto first_stream_info = output_streams[0]->get_info();
-    if (vstreams_params.user_buffer_format.type == HAILO_FORMAT_TYPE_AUTO) {
-        vstreams_params.user_buffer_format.type = HAILO_FORMAT_TYPE_FLOAT32;
-    }
-    if (vstreams_params.user_buffer_format.order == HAILO_FORMAT_ORDER_AUTO) {
-        vstreams_params.user_buffer_format.order = HAILO_FORMAT_ORDER_HAILO_NMS;
-    }
-    vstreams_params = expand_vstream_params_autos(first_stream_info, vstreams_params);
+    vstreams_params.user_buffer_format = net_flow::NmsOpMetadata::expand_output_format_autos_by_op_type(
+        vstreams_params.user_buffer_format, nms_op->metadata()->type());
     CHECK(vstreams_params.user_buffer_format.type == HAILO_FORMAT_TYPE_FLOAT32, HAILO_INVALID_ARGUMENT,
         "NMS output format type must be HAILO_FORMAT_TYPE_FLOAT32");
-    CHECK(vstreams_params.user_buffer_format.order == HAILO_FORMAT_ORDER_HAILO_NMS, HAILO_INVALID_ARGUMENT,
-        "NMS output format order must be HAILO_FORMAT_ORDER_HAILO_NMS");
+    CHECK(HailoRTCommon::is_nms(vstreams_params.user_buffer_format.order), HAILO_INVALID_ARGUMENT,
+        "NMS output format order must be HAILO_FORMAT_ORDER_HAILO_NMS or HAILO_FORMAT_ORDER_HAILO_NMS_WITH_BYTE_MASK");
 
-    std::map<std::string, net_flow::BufferMetaData> inputs_metadata;
-    std::map<std::string, net_flow::BufferMetaData> outputs_metadata;
+    std::unordered_map<std::string, net_flow::BufferMetaData> inputs_metadata;
+    std::unordered_map<std::string, net_flow::BufferMetaData> outputs_metadata;
     for (uint32_t i = 0; i < output_streams.size(); ++i) {
         const auto &curr_stream_info = output_streams[i]->get_info();
         net_flow::BufferMetaData input_metadata = {
@@ -2959,11 +4202,11 @@ hailo_status VStreamsBuilderUtils::add_nms_post_process(OutputStreamPtrVector &o
         inputs_metadata.insert({curr_stream_info.name, input_metadata});
     }
 
-    const auto &output_pads = nms_op.op->outputs_metadata();
+    const auto &output_pads = nms_op->outputs_metadata();
     assert(output_pads.size() == 1);
     auto vstream_info = output_vstream_infos.find(output_pads.begin()->first);
     CHECK(vstream_info != output_vstream_infos.end(), HAILO_NOT_FOUND,
-        "Failed to find vstream info of {}", nms_op.name);
+        "Failed to find vstream info of {}", nms_op->metadata()->get_name());
     net_flow::BufferMetaData output_metadata = {
         vstream_info->second.shape,
         vstream_info->second.shape,
@@ -2972,8 +4215,10 @@ hailo_status VStreamsBuilderUtils::add_nms_post_process(OutputStreamPtrVector &o
     };
     outputs_metadata.insert({vstream_info->first, output_metadata});
 
-    auto nms_elem = NmsPostProcessMuxElement::create(nms_op.op, nms_op.nms_info,
-        PipelineObject::create_element_name("NmsPostProcessMuxElement", nms_op.name, 0),
+    auto op_metadata = std::dynamic_pointer_cast<net_flow::NmsOpMetadata>(nms_op->metadata());
+    assert(nullptr != op_metadata);
+    auto nms_elem = NmsPostProcessMuxElement::create(nms_op,
+        PipelineObject::create_element_name("NmsPostProcessMuxElement", nms_op->get_name(), 0),
         vstreams_params, shutdown_event, pipeline_status);
     CHECK_EXPECTED_AS_STATUS(nms_elem);
 
@@ -2986,10 +4231,12 @@ hailo_status VStreamsBuilderUtils::add_nms_post_process(OutputStreamPtrVector &o
         const auto &curr_stream_info = output_streams[i]->get_info();
         output_streams[i]->set_timeout(HAILO_INFINITE_TIMEOUT);
 
+        auto output_stream_base = std::static_pointer_cast<OutputStreamBase>(output_streams[i]);
         auto should_transform = OutputTransformContext::is_transformation_required(curr_stream_info.hw_shape, curr_stream_info.format,
-            curr_stream_info.hw_shape, nms_src_format, vstream_info->second.quant_info);
+            curr_stream_info.hw_shape, nms_src_format, output_stream_base->get_quant_infos());
+        CHECK_EXPECTED_AS_STATUS(should_transform);
 
-        CHECK(!should_transform, HAILO_INVALID_ARGUMENT, "Unexpected transformation required for {}", curr_stream_info.name);
+        CHECK(!(should_transform.value()), HAILO_INVALID_ARGUMENT, "Unexpected transformation required for {}", curr_stream_info.name);
 
         auto hw_read_elem = HwReadElement::create(output_streams[i],
             PipelineObject::create_element_name("HwReadElement", curr_stream_info.name, curr_stream_info.index),
@@ -3018,7 +4265,9 @@ hailo_status VStreamsBuilderUtils::add_nms_post_process(OutputStreamPtrVector &o
         core_op_activated_event = output_streams[0]->get_core_op_activated_event();
     }
 
-    auto vstream = OutputVStream::create(vstream_info->second, vstreams_params, nms_elem.release(), std::move(elements),
+    // If user uses HailoRT++ we can assume he won't use Output Scale by Feature
+    auto output_stream_base = std::static_pointer_cast<OutputStreamBase>(output_streams[0]);
+    auto vstream = OutputVStream::create(vstream_info->second, output_stream_base->get_quant_infos(), vstreams_params, nms_elem.release(), std::move(elements),
         std::move(pipeline_status), shutdown_event, core_op_activated_event, pipeline_latency_accumulator.release());
     CHECK_EXPECTED_AS_STATUS(vstream);
     vstreams.emplace_back(vstream.release());
index 587d360647c5260583cfefbc20a2d64fadd139f2..cc62a3353565cf355cc6456e0f3f4e1536fea817 100644 (file)
 #ifndef _HAILO_VSTREAM_INTERNAL_HPP_
 #define _HAILO_VSTREAM_INTERNAL_HPP_
 
+#include "hailo/expected.hpp"
 #include "hailo/transform.hpp"
 #include "hailo/stream.hpp"
 
 #include "hef/hef_internal.hpp"
 #include "net_flow/pipeline/pipeline.hpp"
-#include "net_flow/ops/yolo_post_process.hpp"
+#include "net_flow/ops/yolov5_post_process.hpp"
 #include "network_group/network_group_internal.hpp"
 
 #ifdef HAILO_SUPPORT_MULTI_PROCESS
@@ -52,6 +53,7 @@ public:
 
     virtual size_t get_frame_size() const;
     virtual const hailo_vstream_info_t &get_info() const;
+    virtual const std::vector<hailo_quant_info_t> &get_quant_infos() const;
     virtual const hailo_format_t &get_user_buffer_format() const;
     virtual std::string name() const;
     virtual std::string network_name() const;
@@ -67,13 +69,14 @@ public:
     virtual hailo_status stop_vstream();
     virtual hailo_status stop_and_clear();
 
-    virtual hailo_status before_fork() { return HAILO_SUCCESS; };
-    virtual hailo_status after_fork_in_parent() { return HAILO_SUCCESS; };
-    virtual hailo_status after_fork_in_child() { return HAILO_SUCCESS; };
     virtual bool is_aborted() { return m_is_aborted; };
 
+    virtual hailo_status before_fork();
+    virtual hailo_status after_fork_in_parent();
+    virtual hailo_status after_fork_in_child();
+
 protected:
-    BaseVStream(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
+    BaseVStream(const hailo_vstream_info_t &vstream_info, const std::vector<hailo_quant_info_t> &quant_infos, const hailo_vstream_params_t &vstream_params,
         std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
         std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, AccumulatorPtr pipeline_latency_accumulator,
         EventPtr &&core_op_activated_event, hailo_status &output_status);
@@ -82,6 +85,7 @@ protected:
     virtual std::string get_pipeline_description() const = 0;
 
     hailo_vstream_info_t m_vstream_info;
+    std::vector<hailo_quant_info_t> m_quant_infos;
     hailo_vstream_params_t m_vstream_params;
     bool m_measure_pipeline_latency;
     std::shared_ptr<PipelineElement> m_entry_element;
@@ -101,7 +105,7 @@ protected:
 class InputVStreamInternal : public BaseVStream
 {
 public:
-    static Expected<std::shared_ptr<InputVStreamInternal>> create(const hailo_vstream_info_t &vstream_info,
+    static Expected<std::shared_ptr<InputVStreamInternal>> create(const hailo_vstream_info_t &vstream_info, const std::vector<hailo_quant_info_t> &quant_infos,
         const hailo_vstream_params_t &vstream_params, std::shared_ptr<PipelineElement> pipeline_entry,
         std::shared_ptr<SinkElement> pipeline_exit, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
         std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, EventPtr core_op_activated_event,
@@ -111,12 +115,14 @@ public:
     virtual ~InputVStreamInternal() = default;
 
     virtual hailo_status write(const MemoryView &buffer) = 0;
+    virtual hailo_status write(const hailo_pix_buffer_t &buffer) = 0;
     virtual hailo_status flush() = 0;
+    virtual bool is_multi_planar() const = 0;
 
     virtual std::string get_pipeline_description() const override;
 
 protected:
-    InputVStreamInternal(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
+    InputVStreamInternal(const hailo_vstream_info_t &vstream_info, const std::vector<hailo_quant_info_t> &quant_infos, const hailo_vstream_params_t &vstream_params,
         std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
         std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, AccumulatorPtr pipeline_latency_accumulator,
         EventPtr &&core_op_activated_event, hailo_status &output_status);
@@ -128,7 +134,7 @@ class OutputVStreamInternal : public BaseVStream
 {
 public:
     static Expected<std::shared_ptr<OutputVStreamInternal>> create(
-        const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
+        const hailo_vstream_info_t &vstream_info, const std::vector<hailo_quant_info_t> &quant_infos, const hailo_vstream_params_t &vstream_params,
         std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
         std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event,
         EventPtr core_op_activated_event, AccumulatorPtr pipeline_latency_accumulator);
@@ -140,8 +146,12 @@ public:
     virtual hailo_status read(MemoryView buffer) = 0;
     virtual std::string get_pipeline_description() const override;
 
+    virtual hailo_status set_nms_score_threshold(float32_t threshold) = 0;
+    virtual hailo_status set_nms_iou_threshold(float32_t threshold) = 0;
+    virtual hailo_status set_nms_max_proposals_per_class(uint32_t max_proposals_per_class) = 0;
+
 protected:
-    OutputVStreamInternal(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
+    OutputVStreamInternal(const hailo_vstream_info_t &vstream_info, const std::vector<hailo_quant_info_t> &quant_infos, const hailo_vstream_params_t &vstream_params,
         std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
         std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, AccumulatorPtr pipeline_latency_accumulator,
         EventPtr core_op_activated_event, hailo_status &output_status);
@@ -151,7 +161,7 @@ protected:
 class InputVStreamImpl : public InputVStreamInternal
 {
 public:
-    static Expected<std::shared_ptr<InputVStreamImpl>> create(const hailo_vstream_info_t &vstream_info,
+    static Expected<std::shared_ptr<InputVStreamImpl>> create(const hailo_vstream_info_t &vstream_info, const std::vector<hailo_quant_info_t> &quant_infos,
         const hailo_vstream_params_t &vstream_params, std::shared_ptr<PipelineElement> pipeline_entry,
         std::shared_ptr<SinkElement> pipeline_exit, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
         std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, EventPtr core_op_activated_event,
@@ -163,19 +173,24 @@ public:
     virtual ~InputVStreamImpl();
 
     virtual hailo_status write(const MemoryView &buffer) override;
+    virtual hailo_status write(const hailo_pix_buffer_t &buffer) override;
     virtual hailo_status flush() override;
+    virtual bool is_multi_planar() const override;
+
 private:
-    InputVStreamImpl(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
+    InputVStreamImpl(const hailo_vstream_info_t &vstream_info, const std::vector<hailo_quant_info_t> &quant_infos, const hailo_vstream_params_t &vstream_params,
         std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
         std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, AccumulatorPtr pipeline_latency_accumulator,
         EventPtr core_op_activated_event, hailo_status &output_status);
+
+    bool m_is_multi_planar;
 };
 
 class OutputVStreamImpl : public OutputVStreamInternal
 {
 public:
     static Expected<std::shared_ptr<OutputVStreamImpl>> create(
-        const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
+        const hailo_vstream_info_t &vstream_info, const std::vector<hailo_quant_info_t> &quant_infos, const hailo_vstream_params_t &vstream_params,
         std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
         std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event,
         EventPtr core_op_activated_event, AccumulatorPtr pipeline_latency_accumulator);
@@ -185,7 +200,7 @@ public:
     OutputVStreamImpl &operator=(const OutputVStreamImpl &) = delete;
     virtual ~OutputVStreamImpl();
 
-    virtual hailo_status read(MemoryView buffer);
+    virtual hailo_status read(MemoryView buffer) override;
 
     void set_on_vstream_cant_read_callback(std::function<void()> callback)
     {
@@ -197,12 +212,18 @@ public:
         m_can_read_callback = callback;
     }
 
+    virtual hailo_status set_nms_score_threshold(float32_t threshold) override;
+    virtual hailo_status set_nms_iou_threshold(float32_t threshold) override;
+    virtual hailo_status set_nms_max_proposals_per_class(uint32_t max_proposals_per_class) override;
+
 private:
-    OutputVStreamImpl(const hailo_vstream_info_t &vstream_info, const hailo_vstream_params_t &vstream_params,
+    OutputVStreamImpl(const hailo_vstream_info_t &vstream_info, const std::vector<hailo_quant_info_t> &quant_infos, const hailo_vstream_params_t &vstream_params,
         std::shared_ptr<PipelineElement> pipeline_entry, std::vector<std::shared_ptr<PipelineElement>> &&pipeline,
         std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr shutdown_event, AccumulatorPtr pipeline_latency_accumulator,
         EventPtr core_op_activated_event, hailo_status &output_status);
 
+    Expected<std::shared_ptr<net_flow::NmsOpMetadata>> get_nms_metadata_from_pipeline() const;
+
     std::function<void()> m_cant_read_callback;
     std::function<void()> m_can_read_callback;
 };
@@ -211,7 +232,7 @@ private:
 class InputVStreamClient : public InputVStreamInternal
 {
 public:
-    static Expected<std::shared_ptr<InputVStreamClient>> create(uint32_t input_vstream_handle);
+    static Expected<std::shared_ptr<InputVStreamClient>> create(VStreamIdentifier &&identifier);
     InputVStreamClient(InputVStreamClient &&) noexcept = default;
     InputVStreamClient(const InputVStreamClient &) = delete;
     InputVStreamClient &operator=(InputVStreamClient &&) noexcept = default;
@@ -219,7 +240,9 @@ public:
     virtual ~InputVStreamClient();
 
     virtual hailo_status write(const MemoryView &buffer) override;
+    virtual hailo_status write(const hailo_pix_buffer_t &buffer) override;
     virtual hailo_status flush() override;
+    virtual bool is_multi_planar() const override;
 
     virtual hailo_status abort() override;
     virtual hailo_status resume() override;
@@ -241,12 +264,12 @@ public:
     virtual bool is_aborted() override;
 
 private:
-    InputVStreamClient(std::unique_ptr<HailoRtRpcClient> client, uint32_t input_vstream_handle, hailo_format_t &&user_buffer_format, 
+    InputVStreamClient(std::unique_ptr<HailoRtRpcClient> client, VStreamIdentifier &&identifier, hailo_format_t &&user_buffer_format,
         hailo_vstream_info_t &&info);
     hailo_status create_client();
 
     std::unique_ptr<HailoRtRpcClient> m_client;
-    uint32_t m_handle;
+    VStreamIdentifier m_identifier;
     hailo_format_t m_user_buffer_format;
     hailo_vstream_info_t m_info;
 };
@@ -254,7 +277,7 @@ private:
 class OutputVStreamClient : public OutputVStreamInternal
 {
 public:
-    static Expected<std::shared_ptr<OutputVStreamClient>> create(uint32_t outputs_vstream_handle);
+    static Expected<std::shared_ptr<OutputVStreamClient>> create(const VStreamIdentifier &&identifier);
     OutputVStreamClient(OutputVStreamClient &&) noexcept = default;
     OutputVStreamClient(const OutputVStreamClient &) = delete;
     OutputVStreamClient &operator=(OutputVStreamClient &&) noexcept = default;
@@ -282,14 +305,18 @@ public:
     virtual hailo_status start_vstream() override;
     virtual bool is_aborted() override;
 
+    virtual hailo_status set_nms_score_threshold(float32_t threshold) override;
+    virtual hailo_status set_nms_iou_threshold(float32_t threshold) override;
+    virtual hailo_status set_nms_max_proposals_per_class(uint32_t max_proposals_per_class) override;
+
 private:
-    OutputVStreamClient(std::unique_ptr<HailoRtRpcClient> client, uint32_t outputs_vstream_handle, hailo_format_t &&user_buffer_format,
+    OutputVStreamClient(std::unique_ptr<HailoRtRpcClient> client, const VStreamIdentifier &&identifier, hailo_format_t &&user_buffer_format,
         hailo_vstream_info_t &&info);
 
     hailo_status create_client();
 
     std::unique_ptr<HailoRtRpcClient> m_client;
-    uint32_t m_handle;
+    VStreamIdentifier m_identifier;
     hailo_format_t m_user_buffer_format;
     hailo_vstream_info_t m_info;
 };
@@ -299,19 +326,23 @@ class PreInferElement : public FilterElement
 {
 public:
     static Expected<std::shared_ptr<PreInferElement>> create(const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
-        const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info,
+        const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, const std::vector<hailo_quant_info_t> &dst_quant_infos,
         const std::string &name, std::chrono::milliseconds timeout, size_t buffer_pool_size, hailo_pipeline_elem_stats_flags_t elem_flags,
-        hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+        hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
+        PipelineDirection pipeline_direction = PipelineDirection::PUSH, bool is_dma_able = false);
     static Expected<std::shared_ptr<PreInferElement>> create(const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
-        const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, const std::string &name,
-        const hailo_vstream_params_t &vstream_params, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+        const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, const std::vector<hailo_quant_info_t> &dst_quant_infos, const std::string &name,
+        const hailo_vstream_params_t &vstream_params, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
+        PipelineDirection pipeline_direction = PipelineDirection::PUSH, bool is_dma_able = false);
+    static Expected<std::shared_ptr<PreInferElement>> create(const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
+        const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, const std::vector<hailo_quant_info_t> &dst_quant_infos,
+        const std::string &name, const ElementBuildParams &build_params, PipelineDirection pipeline_direction = PipelineDirection::PUSH, bool is_dma_able = false);
     PreInferElement(std::unique_ptr<InputTransformContext> &&transform_context, BufferPoolPtr buffer_pool,
         const std::string &name, std::chrono::milliseconds timeout, DurationCollector &&duration_collector,
-        std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
+        std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, PipelineDirection pipeline_direction);
     virtual ~PreInferElement() = default;
 
     virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) override;
-    virtual std::vector<AccumulatorPtr> get_queue_size_accumulators() override;
     virtual PipelinePad &next_pad() override;
     virtual std::string description() const override;
 
@@ -320,8 +351,32 @@ protected:
 
 private:
     std::unique_ptr<InputTransformContext> m_transform_context;
-    BufferPoolPtr m_pool;
-    std::chrono::milliseconds m_timeout;
+};
+
+class RemoveOverlappingBboxesElement : public FilterElement
+{
+public:
+    static Expected<std::shared_ptr<RemoveOverlappingBboxesElement>> create(
+        const net_flow::NmsPostProcessConfig nms_config, const std::string &name,
+        hailo_pipeline_elem_stats_flags_t elem_flags, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
+        std::chrono::milliseconds timeout, hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event,
+        size_t buffer_pool_size, PipelineDirection pipeline_direction = PipelineDirection::PULL, bool is_last_copy_element = false);
+    static Expected<std::shared_ptr<RemoveOverlappingBboxesElement>> create(const net_flow::NmsPostProcessConfig nms_config,
+        const std::string &name, const ElementBuildParams &build_params, PipelineDirection pipeline_direction = PipelineDirection::PULL, 
+        bool is_last_copy_element = false);
+    RemoveOverlappingBboxesElement(const net_flow::NmsPostProcessConfig &&nms_config, const std::string &name, DurationCollector &&duration_collector,
+        std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, BufferPoolPtr buffer_pool, std::chrono::milliseconds timeout,
+        PipelineDirection pipeline_direction);
+    virtual ~RemoveOverlappingBboxesElement() = default;
+    virtual hailo_status run_push(PipelineBuffer &&buffer, const PipelinePad &sink) override;
+    virtual PipelinePad &next_pad() override;
+    virtual std::string description() const override;
+
+protected:
+    virtual Expected<PipelineBuffer> action(PipelineBuffer &&input, PipelineBuffer &&optional) override;
+
+private:
+    net_flow::NmsPostProcessConfig m_nms_config;
 };
 
 class PostInferElement : public FilterElement
@@ -329,29 +384,83 @@ class PostInferElement : public FilterElement
 public:
     static Expected<std::shared_ptr<PostInferElement>> create(const hailo_3d_image_shape_t &src_image_shape,
         const hailo_format_t &src_format, const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format,
-        const hailo_quant_info_t &dst_quant_info, const hailo_nms_info_t &nms_info, const std::string &name,
+        const std::vector<hailo_quant_info_t> &dst_quant_infos, const hailo_nms_info_t &nms_info, const std::string &name,
         hailo_pipeline_elem_stats_flags_t elem_flags, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
         std::chrono::milliseconds timeout, hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event,
-        size_t buffer_pool_size);
+        size_t buffer_pool_size, PipelineDirection pipeline_direction = PipelineDirection::PULL, bool is_last_copy_element = false);
     static Expected<std::shared_ptr<PostInferElement>> create(const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
-        const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, const hailo_nms_info_t &nms_info,
-        const std::string &name, const hailo_vstream_params_t &vstream_params, std::shared_ptr<std::atomic<hailo_status>> pipeline_status, EventPtr shutdown_event);
+        const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, const std::vector<hailo_quant_info_t> &dst_quant_info, const hailo_nms_info_t &nms_info,
+        const std::string &name, const hailo_vstream_params_t &vstream_params, std::shared_ptr<std::atomic<hailo_status>> pipeline_status, EventPtr shutdown_event,
+        PipelineDirection pipeline_direction = PipelineDirection::PULL, bool is_last_copy_element = false);
+    static Expected<std::shared_ptr<PostInferElement>> create(const hailo_3d_image_shape_t &src_image_shape,
+        const hailo_format_t &src_format, const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format,
+        const std::vector<hailo_quant_info_t> &dst_quant_infos, const hailo_nms_info_t &nms_info, const std::string &name,
+        const ElementBuildParams &build_params, PipelineDirection pipeline_direction = PipelineDirection::PULL, bool is_last_copy_element = false);
     PostInferElement(std::unique_ptr<OutputTransformContext> &&transform_context, const std::string &name,
         DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, BufferPoolPtr buffer_pool,
-        std::chrono::milliseconds timeout);
+        std::chrono::milliseconds timeout, PipelineDirection pipeline_direction = PipelineDirection::PULL);
     virtual ~PostInferElement() = default;
-    virtual hailo_status run_push(PipelineBuffer &&buffer) override;
+    virtual hailo_status run_push(PipelineBuffer &&buffer, const PipelinePad &sink) override;
+    virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) override;
     virtual PipelinePad &next_pad() override;
     virtual std::string description() const override;
-    virtual std::vector<AccumulatorPtr> get_queue_size_accumulators() override;
 
 protected:
     virtual Expected<PipelineBuffer> action(PipelineBuffer &&input, PipelineBuffer &&optional) override;
 
 private:
     std::unique_ptr<OutputTransformContext> m_transform_context;
-    BufferPoolPtr m_pool;
-    std::chrono::milliseconds m_timeout;
+};
+
+class ConvertNmsToDetectionsElement : public FilterElement
+{
+public:
+    static Expected<std::shared_ptr<ConvertNmsToDetectionsElement>> create(const hailo_nms_info_t &nms_info, const std::string &name,
+        hailo_pipeline_elem_stats_flags_t elem_flags, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
+        std::chrono::milliseconds timeout, hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event,
+        size_t buffer_pool_size, PipelineDirection pipeline_direction = PipelineDirection::PULL, bool is_last_copy_element = false);
+    static Expected<std::shared_ptr<ConvertNmsToDetectionsElement>> create(
+        const hailo_nms_info_t &nms_info, const std::string &name, const ElementBuildParams &build_params,
+        PipelineDirection pipeline_direction = PipelineDirection::PULL, bool is_last_copy_element = false);
+    ConvertNmsToDetectionsElement(const hailo_nms_info_t &&nms_info, const std::string &name, DurationCollector &&duration_collector,
+        std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, BufferPoolPtr buffer_pool, std::chrono::milliseconds timeout,
+        PipelineDirection pipeline_direction);
+    virtual ~ConvertNmsToDetectionsElement() = default;
+    virtual hailo_status run_push(PipelineBuffer &&buffer, const PipelinePad &sink) override;
+    virtual PipelinePad &next_pad() override;
+    virtual std::string description() const override;
+
+protected:
+    virtual Expected<PipelineBuffer> action(PipelineBuffer &&input, PipelineBuffer &&optional) override;
+
+private:
+    hailo_nms_info_t m_nms_info;
+};
+
+class FillNmsFormatElement : public FilterElement
+{
+public:
+    static Expected<std::shared_ptr<FillNmsFormatElement>> create(const hailo_nms_info_t nms_info,
+        const hailo_format_t &dst_format, const net_flow::NmsPostProcessConfig nms_config, const std::string &name,
+        hailo_pipeline_elem_stats_flags_t elem_flags, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
+        std::chrono::milliseconds timeout, hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event,
+        size_t buffer_pool_size, PipelineDirection pipeline_direction = PipelineDirection::PULL, bool is_last_copy_element = false);
+    static Expected<std::shared_ptr<FillNmsFormatElement>> create(const hailo_nms_info_t nms_info,
+        const hailo_format_t &dst_format, const net_flow::NmsPostProcessConfig nms_config, const std::string &name,
+        const ElementBuildParams &build_params, PipelineDirection pipeline_direction = PipelineDirection::PULL, bool is_last_copy_element = false);
+    FillNmsFormatElement(const net_flow::NmsPostProcessConfig &&nms_config, const std::string &name, DurationCollector &&duration_collector,
+        std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, BufferPoolPtr buffer_pool, std::chrono::milliseconds timeout,
+        PipelineDirection pipeline_direction);
+    virtual ~FillNmsFormatElement() = default;
+    virtual hailo_status run_push(PipelineBuffer &&buffer, const PipelinePad &sink) override;
+    virtual PipelinePad &next_pad() override;
+    virtual std::string description() const override;
+
+protected:
+    virtual Expected<PipelineBuffer> action(PipelineBuffer &&input, PipelineBuffer &&optional) override;
+
+private:
+    net_flow::NmsPostProcessConfig m_nms_config;
 };
 
 class ArgmaxPostProcessElement : public FilterElement
@@ -359,14 +468,21 @@ class ArgmaxPostProcessElement : public FilterElement
 public:
     static Expected<std::shared_ptr<ArgmaxPostProcessElement>> create(std::shared_ptr<net_flow::Op> argmax_op,
         const std::string &name, hailo_pipeline_elem_stats_flags_t elem_flags,
-        std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+        std::shared_ptr<std::atomic<hailo_status>> pipeline_status, size_t buffer_pool_size, std::chrono::milliseconds timeout,
+        hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event, PipelineDirection pipeline_direction = PipelineDirection::PULL,
+        bool is_last_copy_element = false);
+    static Expected<std::shared_ptr<ArgmaxPostProcessElement>> create(std::shared_ptr<net_flow::Op> argmax_op,
+        const std::string &name, const ElementBuildParams &build_params, PipelineDirection pipeline_direction = PipelineDirection::PULL,
+        bool is_last_copy_element = false);
     ArgmaxPostProcessElement(std::shared_ptr<net_flow::Op> argmax_op, const std::string &name,
-        DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
+        DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+        std::chrono::milliseconds timeout, BufferPoolPtr buffer_pool, PipelineDirection pipeline_direction = PipelineDirection::PULL);
     virtual ~ArgmaxPostProcessElement() = default;
-    virtual hailo_status run_push(PipelineBuffer &&buffer) override;
+    virtual hailo_status run_push(PipelineBuffer &&buffer, const PipelinePad &sink) override;
+    virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) override;
     virtual PipelinePad &next_pad() override;
     virtual std::string description() const override;
-
+    
 protected:
     virtual Expected<PipelineBuffer> action(PipelineBuffer &&input, PipelineBuffer &&optional) override;
 
@@ -379,11 +495,18 @@ class SoftmaxPostProcessElement : public FilterElement
 public:
     static Expected<std::shared_ptr<SoftmaxPostProcessElement>> create(std::shared_ptr<net_flow::Op> softmax_op,
         const std::string &name, hailo_pipeline_elem_stats_flags_t elem_flags,
-        std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+        std::shared_ptr<std::atomic<hailo_status>> pipeline_status, size_t buffer_pool_size, std::chrono::milliseconds timeout,
+        hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event,
+        PipelineDirection pipeline_direction = PipelineDirection::PULL, bool is_last_copy_element = false);
+    static Expected<std::shared_ptr<SoftmaxPostProcessElement>> create(std::shared_ptr<net_flow::Op> softmax_op,
+        const std::string &name, const ElementBuildParams &build_params, PipelineDirection pipeline_direction = PipelineDirection::PULL,
+        bool is_last_copy_element = false);
     SoftmaxPostProcessElement(std::shared_ptr<net_flow::Op> softmax_op, const std::string &name,
-        DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
+        DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+        std::chrono::milliseconds timeout, BufferPoolPtr buffer_pool, PipelineDirection pipeline_direction = PipelineDirection::PULL);
     virtual ~SoftmaxPostProcessElement() = default;
-    virtual hailo_status run_push(PipelineBuffer &&buffer) override;
+    virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) override;
+    virtual hailo_status run_push(PipelineBuffer &&buffer, const PipelinePad &sink) override;
     virtual PipelinePad &next_pad() override;
     virtual std::string description() const override;
 
@@ -398,15 +521,18 @@ class NmsPostProcessMuxElement : public BaseMuxElement
 {
 public:
     static Expected<std::shared_ptr<NmsPostProcessMuxElement>> create(std::shared_ptr<net_flow::Op> nms_op,
-        hailo_nms_info_t nms_info, const std::string &name, std::chrono::milliseconds timeout, size_t buffer_pool_size,
+        const std::string &name, std::chrono::milliseconds timeout, size_t buffer_pool_size,
         hailo_pipeline_elem_stats_flags_t elem_flags, hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event,
-        std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+        std::shared_ptr<std::atomic<hailo_status>> pipeline_status, PipelineDirection pipeline_direction = PipelineDirection::PULL, bool is_last_copy_element = false);
+    static Expected<std::shared_ptr<NmsPostProcessMuxElement>> create(std::shared_ptr<net_flow::Op> nms_op,
+        const std::string &name, const ElementBuildParams &build_params, PipelineDirection pipeline_direction = PipelineDirection::PULL, bool is_last_copy_element = false);
     static Expected<std::shared_ptr<NmsPostProcessMuxElement>> create(std::shared_ptr<net_flow::Op> nms_op,
-        hailo_nms_info_t nms_info, const std::string &name, const hailo_vstream_params_t &vstream_params,
-        EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+        const std::string &name, const hailo_vstream_params_t &vstream_params,
+        EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
+        PipelineDirection pipeline_direction = PipelineDirection::PULL, bool is_last_copy_element = false);
     NmsPostProcessMuxElement(std::shared_ptr<net_flow::Op> nms_op, BufferPoolPtr &&pool, const std::string &name,
         std::chrono::milliseconds timeout, DurationCollector &&duration_collector,
-        std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
+        std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, PipelineDirection pipeline_direction);
 
     virtual std::vector<AccumulatorPtr> get_queue_size_accumulators() override;
     void add_sink_name(const std::string &name) // TODO: remove this (HRT-8875)
@@ -414,12 +540,13 @@ public:
         m_sinks_names.push_back(name);
     }
 
+    std::shared_ptr<net_flow::Op> get_op() { return m_nms_op; }
+
 protected:
     virtual Expected<PipelineBuffer> action(std::vector<PipelineBuffer> &&inputs, PipelineBuffer &&optional) override;
 
 private:
     std::shared_ptr<net_flow::Op> m_nms_op;
-    BufferPoolPtr m_pool;
     std::vector<std::string> m_sinks_names; // TODO: remove this (HRT-8875)
 };
 
@@ -428,11 +555,17 @@ class NmsMuxElement : public BaseMuxElement
 public:
     static Expected<std::shared_ptr<NmsMuxElement>> create(const std::vector<hailo_nms_info_t> &nms_infos,
         const std::string &name, std::chrono::milliseconds timeout, size_t buffer_pool_size, hailo_pipeline_elem_stats_flags_t elem_flags,
-        hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+        hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
+        PipelineDirection pipeline_direction = PipelineDirection::PULL, bool is_last_copy_element = false);
     static Expected<std::shared_ptr<NmsMuxElement>> create(const std::vector<hailo_nms_info_t> &nms_infos, const std::string &name,
-        const hailo_vstream_params_t &vstream_params, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+        const hailo_vstream_params_t &vstream_params, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
+        PipelineDirection pipeline_direction = PipelineDirection::PULL, bool is_last_copy_element = false);
+    static Expected<std::shared_ptr<NmsMuxElement>> create(const std::vector<hailo_nms_info_t> &nms_infos,
+        const std::string &name, const ElementBuildParams &build_params, PipelineDirection pipeline_direction = PipelineDirection::PULL,
+        bool is_last_copy_element = false);
     NmsMuxElement(const std::vector<hailo_nms_info_t> &nms_infos, const hailo_nms_info_t &fused_nms_info, BufferPoolPtr &&pool, const std::string &name,
-        std::chrono::milliseconds timeout, DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
+        std::chrono::milliseconds timeout, DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+        PipelineDirection pipeline_direction = PipelineDirection::PULL);
     const hailo_nms_info_t &get_fused_nms_info() const;
 
     virtual std::vector<AccumulatorPtr> get_queue_size_accumulators() override;
@@ -443,7 +576,6 @@ protected:
 private:
     std::vector<hailo_nms_info_t> m_nms_infos;
     hailo_nms_info_t m_fused_nms_info;
-    BufferPoolPtr m_pool;
 };
 
 class TransformDemuxElement : public BaseDemuxElement
@@ -451,9 +583,13 @@ class TransformDemuxElement : public BaseDemuxElement
 public:
     static Expected<std::shared_ptr<TransformDemuxElement>> create(std::shared_ptr<OutputDemuxer> demuxer,
         const std::string &name, std::chrono::milliseconds timeout, size_t buffer_pool_size, hailo_pipeline_elem_stats_flags_t elem_flags,
-        hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+        hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
+        PipelineDirection pipeline_direction = PipelineDirection::PULL);
+    static Expected<std::shared_ptr<TransformDemuxElement>> create(std::shared_ptr<OutputDemuxer> demuxer,
+        const std::string &name, const ElementBuildParams &build_params, PipelineDirection pipeline_direction = PipelineDirection::PULL);
     TransformDemuxElement(std::shared_ptr<OutputDemuxer> demuxer, std::vector<BufferPoolPtr> &&pools, const std::string &name,
-        std::chrono::milliseconds timeout, DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status);
+        std::chrono::milliseconds timeout, DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+        PipelineDirection pipeline_direction);
 
     virtual std::vector<AccumulatorPtr> get_queue_size_accumulators() override;
 
@@ -462,7 +598,21 @@ protected:
 
 private:
     std::shared_ptr<OutputDemuxer> m_demuxer;
-    std::vector<BufferPoolPtr> m_pools;
+};
+
+class PixBufferElement : public BaseDemuxElement
+{
+public:
+    static Expected<std::shared_ptr<PixBufferElement>> create(const std::string &name,
+        std::chrono::milliseconds timeout, DurationCollector &&duration_collector,
+        std::shared_ptr<std::atomic<hailo_status>> pipeline_status, size_t sources_count, hailo_format_order_t order);
+
+    PixBufferElement(const std::string &name, std::chrono::milliseconds timeout, DurationCollector &&duration_collector,
+        std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, size_t sources_count, hailo_format_order_t order);
+
+protected:
+    virtual Expected<std::vector<PipelineBuffer>> action(PipelineBuffer &&input);
+    hailo_format_order_t m_order;
 };
 
 class HwReadElement : public SourceElement
@@ -470,15 +620,16 @@ class HwReadElement : public SourceElement
 public:
     static Expected<std::shared_ptr<HwReadElement>> create(std::shared_ptr<OutputStream> stream, const std::string &name, std::chrono::milliseconds timeout,
         size_t buffer_pool_size, hailo_pipeline_elem_stats_flags_t elem_flags, hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event,
-        std::shared_ptr<std::atomic<hailo_status>> pipeline_status, std::unique_ptr<OutputTransformContext> m_transform_context = nullptr);
+        std::shared_ptr<std::atomic<hailo_status>> pipeline_status, PipelineDirection pipeline_direction = PipelineDirection::PULL);
     HwReadElement(std::shared_ptr<OutputStream> stream, BufferPoolPtr buffer_pool, const std::string &name, std::chrono::milliseconds timeout,
         DurationCollector &&duration_collector, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
-        BufferPoolPtr transform_pool = nullptr, std::unique_ptr<OutputTransformContext> transform_context = nullptr);
+        PipelineDirection pipeline_direction);
     virtual ~HwReadElement() = default;
 
     virtual std::vector<AccumulatorPtr> get_queue_size_accumulators() override;
 
-    virtual hailo_status run_push(PipelineBuffer &&buffer) override;
+    virtual hailo_status run_push(PipelineBuffer &&buffer, const PipelinePad &sink) override;
+    virtual void run_push_async(PipelineBuffer &&buffer, const PipelinePad &sink) override;
     virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) override;
     virtual hailo_status execute_activate() override;
     virtual hailo_status execute_deactivate() override;
@@ -494,23 +645,23 @@ public:
 private:
     std::shared_ptr<OutputStream> m_stream;
     BufferPoolPtr m_pool;
-    BufferPoolPtr m_transform_pool;
     std::chrono::milliseconds m_timeout;
     EventPtr m_shutdown_event;
     WaitOrShutdown m_activation_wait_or_shutdown;
-    std::unique_ptr<OutputTransformContext> m_transform_context;
 };
 
 class HwWriteElement : public SinkElement
 {
 public:
     static Expected<std::shared_ptr<HwWriteElement>> create(std::shared_ptr<InputStream> stream, const std::string &name,
-        hailo_pipeline_elem_stats_flags_t elem_flags, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+        hailo_pipeline_elem_stats_flags_t elem_flags, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
+        PipelineDirection pipeline_direction = PipelineDirection::PUSH);
     HwWriteElement(std::shared_ptr<InputStream> stream, const std::string &name, DurationCollector &&duration_collector,
-        std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr got_flush_event);
+        std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, EventPtr got_flush_event, PipelineDirection pipeline_direction);
     virtual ~HwWriteElement() = default;
 
-    virtual hailo_status run_push(PipelineBuffer &&buffer) override;
+    virtual hailo_status run_push(PipelineBuffer &&buffer, const PipelinePad &sink) override;
+    virtual void run_push_async(PipelineBuffer &&buffer, const PipelinePad &sink) override;
     virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) override;
     virtual hailo_status execute_activate() override;
     virtual hailo_status execute_deactivate() override;
@@ -527,11 +678,82 @@ private:
     EventPtr m_got_flush_event;
 };
 
+class LastAsyncElement : public SinkElement
+{
+public:
+    static Expected<std::shared_ptr<LastAsyncElement>> create(const std::string &name,
+        hailo_pipeline_elem_stats_flags_t elem_flags, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
+        PipelineDirection pipeline_direction = PipelineDirection::PUSH);
+    static Expected<std::shared_ptr<LastAsyncElement>> create(const std::string &name,
+        const ElementBuildParams &build_params, PipelineDirection pipeline_direction = PipelineDirection::PUSH);
+    LastAsyncElement(const std::string &name, DurationCollector &&duration_collector,
+        std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status,
+        PipelineDirection pipeline_direction);
+    virtual ~LastAsyncElement() = default;
+
+    virtual void run_push_async(PipelineBuffer &&buffer, const PipelinePad &sink) override;
+    virtual hailo_status run_push(PipelineBuffer &&buffer, const PipelinePad &sink) override;
+    virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) override;
+    virtual std::string description() const override;
+    virtual hailo_status execute_activate() override;
+    virtual hailo_status execute_wait_for_finish() override;
+
+    virtual hailo_status enqueue_execution_buffer(MemoryView mem_view, const TransferDoneCallbackAsyncInfer &exec_done, const std::string &source_name) override;
+    virtual Expected<bool> are_buffer_pools_full() override;
+    virtual hailo_status fill_buffer_pools(bool is_dma_able) override;
+};
+
+// Note: This element does infer - it sends writes to HW and reads the outputs
+class AsyncHwElement : public PipelineElement
+{
+public:
+    static Expected<std::shared_ptr<AsyncHwElement>> create(const std::vector<std::shared_ptr<InputStream>> &input_streams,
+        const std::vector<std::shared_ptr<OutputStream>> &output_streams, std::chrono::milliseconds timeout, size_t buffer_pool_size, hailo_pipeline_elem_stats_flags_t elem_flags,
+        hailo_vstream_stats_flags_t vstream_flags, EventPtr shutdown_event, const std::string &name, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
+        PipelineDirection pipeline_direction = PipelineDirection::PUSH, bool is_last_copy_element = false);
+    AsyncHwElement(const std::vector<std::shared_ptr<InputStream>> &input_streams, const std::vector<std::shared_ptr<OutputStream>> &output_streams,
+        std::chrono::milliseconds timeout, std::unordered_map<std::string, BufferPoolPtr> &&output_streams_pools, const std::string &name,
+        DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> &&pipeline_status, PipelineDirection pipeline_direction);
+    virtual ~AsyncHwElement() = default;
+
+    virtual void run_push_async(PipelineBuffer &&buffer, const PipelinePad &sink) override;
+    virtual hailo_status run_push(PipelineBuffer &&buffer, const PipelinePad &sink) override;
+    virtual Expected<PipelineBuffer> run_pull(PipelineBuffer &&optional, const PipelinePad &source) override;
+
+    virtual hailo_status enqueue_execution_buffer(MemoryView mem_view, const TransferDoneCallbackAsyncInfer &exec_done, const std::string &source_name) override;
+    virtual Expected<bool> are_buffer_pools_full() override;
+    virtual hailo_status fill_buffer_pools(bool is_dma_able) override;
+
+    Expected<uint32_t> get_source_index_from_output_stream_name(const std::string &output_stream_name);
+    Expected<uint32_t> get_sink_index_from_input_stream_name(const std::string &input_stream_name);
+
+protected:
+    virtual std::vector<PipelinePad*> execution_pads() override;
+
+private:
+    void read_async_on_all_streams();
+    void handle_error_in_hw_async_elem(hailo_status error_status);
+    bool has_all_sinks_arrived();
+
+    std::chrono::milliseconds m_timeout;
+    std::unordered_map<std::string, BufferPoolPtr> m_output_streams_pools;
+    std::unordered_map<std::string, std::shared_ptr<InputStream>> m_sink_name_to_input;
+    std::unordered_map<std::string, std::shared_ptr<OutputStream>> m_source_name_to_output;
+    std::unordered_map<std::string, bool> m_sink_has_arrived;
+    std::unordered_map<std::string, PipelineBuffer> m_input_buffers;
+    std::mutex m_mutex;
+    std::condition_variable m_cv;
+    std::unordered_map<std::string, uint32_t> m_source_name_to_index;
+    std::unordered_map<std::string, uint32_t> m_sink_name_to_index;
+};
+
 class CopyBufferElement : public FilterElement
 {
 public:
-    static Expected<std::shared_ptr<CopyBufferElement>> create(const std::string &name, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
-    CopyBufferElement(const std::string &name, DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> pipeline_status);
+    static Expected<std::shared_ptr<CopyBufferElement>> create(const std::string &name, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
+        std::chrono::milliseconds timeout, PipelineDirection pipeline_direction = PipelineDirection::PULL);
+    CopyBufferElement(const std::string &name, DurationCollector &&duration_collector, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
+        std::chrono::milliseconds timeout, PipelineDirection pipeline_direction);
     virtual ~CopyBufferElement() = default;
     virtual PipelinePad &next_pad() override;
 
@@ -542,7 +764,7 @@ protected:
 class VStreamsBuilderUtils
 {
 public:
-    static Expected<std::vector<InputVStream>> create_inputs(std::shared_ptr<InputStream> input_stream, const hailo_vstream_info_t &input_vstream_infos,
+    static Expected<std::vector<InputVStream>> create_inputs(std::vector<std::shared_ptr<InputStream>> input_streams, const hailo_vstream_info_t &input_vstream_infos,
         const hailo_vstream_params_t &vstreams_params);
     static Expected<std::vector<OutputVStream>> create_outputs(std::shared_ptr<OutputStream> output_stream,
         NameToVStreamParamsMap &vstreams_params_map, const std::map<std::string, hailo_vstream_info_t> &output_vstream_infos);
@@ -553,51 +775,90 @@ public:
         const std::map<std::string, hailo_vstream_info_t> &output_vstream_infos);
     static Expected<std::vector<OutputVStream>> create_output_vstreams_from_streams(const OutputStreamWithParamsVector &all_output_streams,
         OutputStreamPtrVector &output_streams, const hailo_vstream_params_t &vstream_params,
-        const std::unordered_map<std::string, std::shared_ptr<NetFlowElement>> &post_process_ops,
+        const std::unordered_map<std::string, net_flow::PostProcessOpMetadataPtr> &post_process_ops,
         const std::unordered_map<std::string, std::string> &op_inputs_to_op_name, const std::map<std::string, hailo_vstream_info_t> &output_vstream_infos_map);
     static Expected<std::vector<OutputVStream>> create_output_post_process_nms(OutputStreamPtrVector &output_streams,
         hailo_vstream_params_t vstreams_params,
         const std::map<std::string, hailo_vstream_info_t> &output_vstream_infos,
-        const NetFlowElement &nms_op);
+        const std::shared_ptr<hailort::net_flow::Op> &nms_op);
     static Expected<std::shared_ptr<HwReadElement>> add_hw_read_element(std::shared_ptr<OutputStream> &output_stream,
         std::shared_ptr<std::atomic<hailo_status>> &pipeline_status, std::vector<std::shared_ptr<PipelineElement>> &elements,
         const std::string &element_name, EventPtr &shutdown_event, size_t buffer_pool_size,
         const hailo_pipeline_elem_stats_flags_t &hw_read_element_stats_flags, const hailo_vstream_stats_flags_t &hw_read_stream_stats_flags);
+
     static Expected<std::shared_ptr<PullQueueElement>> add_pull_queue_element(std::shared_ptr<OutputStream> &output_stream,
         std::shared_ptr<std::atomic<hailo_status>> &pipeline_status, std::vector<std::shared_ptr<PipelineElement>> &elements,
         const std::string &element_name, EventPtr &shutdown_event, const hailo_vstream_params_t &vstream_params);
+
+    // Move all post-processes related elements to a dedicated model - HRT-11512
     static Expected<std::shared_ptr<ArgmaxPostProcessElement>> add_argmax_element(std::shared_ptr<OutputStream> &output_stream,
         std::shared_ptr<std::atomic<hailo_status>> &pipeline_status, std::vector<std::shared_ptr<PipelineElement>> &elements,
-        const std::string &element_name, hailo_vstream_params_t &vstream_params, const NetFlowElement &argmax_op);
+        const std::string &element_name, hailo_vstream_params_t &vstream_params, const net_flow::PostProcessOpMetadataPtr &argmax_op,
+        size_t buffer_pool_size, std::chrono::milliseconds timeout, const hailo_vstream_stats_flags_t &vstream_flags,
+        EventPtr &shutdown_event);
+
     static Expected<std::shared_ptr<SoftmaxPostProcessElement>> add_softmax_element(std::shared_ptr<OutputStream> &output_stream,
         std::shared_ptr<std::atomic<hailo_status>> &pipeline_status, std::vector<std::shared_ptr<PipelineElement>> &elements,
-        const std::string &element_name, hailo_vstream_params_t &vstream_params, const NetFlowElement &softmax_op);
+        const std::string &element_name, hailo_vstream_params_t &vstream_params, const net_flow::PostProcessOpMetadataPtr &softmax_op,
+        size_t buffer_pool_size, std::chrono::milliseconds timeout, const hailo_vstream_stats_flags_t &vstream_flags,
+        EventPtr &shutdown_event);
+
+    static Expected<std::shared_ptr<ConvertNmsToDetectionsElement>> add_nms_to_detections_convert_element(std::shared_ptr<OutputStream> &output_stream,
+        std::shared_ptr<std::atomic<hailo_status>> &pipeline_status, std::vector<std::shared_ptr<PipelineElement>> &elements, const std::string &element_name,
+        hailo_vstream_params_t &vstream_params, const net_flow::PostProcessOpMetadataPtr &iou_op_metadata, size_t buffer_pool_size, std::chrono::milliseconds timeout,
+        const hailo_vstream_stats_flags_t &vstream_flags, EventPtr &shutdown_event);
+
+    static Expected<std::shared_ptr<RemoveOverlappingBboxesElement>> add_remove_overlapping_bboxes_element(std::shared_ptr<OutputStream> &output_stream,
+        std::shared_ptr<std::atomic<hailo_status>> &pipeline_status, std::vector<std::shared_ptr<PipelineElement>> &elements,
+        const std::string &element_name, hailo_vstream_params_t &vstream_params, const net_flow::PostProcessOpMetadataPtr &iou_op_metadata,
+        size_t buffer_pool_size, std::chrono::milliseconds timeout, const hailo_vstream_stats_flags_t &vstream_flags, EventPtr &shutdown_event);
+
+    static Expected<std::shared_ptr<FillNmsFormatElement>> add_fill_nms_format_element(std::shared_ptr<OutputStream> &output_stream,
+        std::shared_ptr<std::atomic<hailo_status>> &pipeline_status, std::vector<std::shared_ptr<PipelineElement>> &elements,
+        const std::string &element_name, hailo_vstream_params_t &vstream_params, const net_flow::PostProcessOpMetadataPtr &iou_op_metadata,
+        size_t buffer_pool_size, std::chrono::milliseconds timeout, const hailo_vstream_stats_flags_t &vstream_flags, EventPtr &shutdown_event);
+
     static Expected<std::shared_ptr<UserBufferQueueElement>> add_user_buffer_queue_element(std::shared_ptr<OutputStream> &output_stream,
         std::shared_ptr<std::atomic<hailo_status>> &pipeline_status, std::vector<std::shared_ptr<PipelineElement>> &elements,
         const std::string &element_name, EventPtr &shutdown_event, const hailo_vstream_params_t &vstream_params);
+
     static Expected<std::shared_ptr<PostInferElement>> add_post_infer_element(std::shared_ptr<OutputStream> &output_stream,
         std::shared_ptr<std::atomic<hailo_status>> &pipeline_status, std::vector<std::shared_ptr<PipelineElement>> &elements,
         const std::string &element_name, const hailo_vstream_params_t &vstream_params, EventPtr shutdown_event);
+
     static hailo_status add_demux(std::shared_ptr<OutputStream> output_stream, NameToVStreamParamsMap &vstreams_params_map,
         std::vector<std::shared_ptr<PipelineElement>> &&elements, std::vector<OutputVStream> &vstreams,
         std::shared_ptr<HwReadElement> hw_read_elem, EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
         const std::map<std::string, hailo_vstream_info_t> &output_vstream_infos);
+
+    static hailo_status handle_pix_buffer_splitter_flow(std::vector<std::shared_ptr<InputStream>> streams,
+        const hailo_vstream_info_t &vstream_info, std::vector<std::shared_ptr<PipelineElement>> &&base_elements,
+        std::vector<InputVStream> &vstreams, const hailo_vstream_params_t &vstream_params, EventPtr shutdown_event,
+        std::shared_ptr<std::atomic<hailo_status>> pipeline_status, EventPtr &core_op_activated_event,
+        AccumulatorPtr accumaltor);
+
     static hailo_status add_nms_fuse(OutputStreamPtrVector &output_streams, hailo_vstream_params_t &vstreams_params,
         std::vector<std::shared_ptr<PipelineElement>> &elements, std::vector<OutputVStream> &vstreams,
         EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
         const std::map<std::string, hailo_vstream_info_t> &output_vstream_infos);
+
     static hailo_status add_nms_post_process(OutputStreamPtrVector &output_streams, hailo_vstream_params_t &vstreams_params,
         std::vector<std::shared_ptr<PipelineElement>> &elements, std::vector<OutputVStream> &vstreams,
         EventPtr shutdown_event, std::shared_ptr<std::atomic<hailo_status>> pipeline_status,
         const std::map<std::string, hailo_vstream_info_t> &output_vstream_infos,
-        const NetFlowElement &nms_op);
+        const std::shared_ptr<hailort::net_flow::Op> &nms_op);
+
     static Expected<AccumulatorPtr> create_pipeline_latency_accumulator(const hailo_vstream_params_t &vstreams_params);
 
 private:
     static Expected<std::vector<OutputVStream>> create_output_post_process_argmax(std::shared_ptr<OutputStream> output_stream,
-        const NameToVStreamParamsMap &vstreams_params_map, const hailo_vstream_info_t &output_vstream_info, const NetFlowElement &argmax_op);
+        const NameToVStreamParamsMap &vstreams_params_map, const hailo_vstream_info_t &output_vstream_info,
+        const net_flow::PostProcessOpMetadataPtr &argmax_op_metadata);
     static Expected<std::vector<OutputVStream>> create_output_post_process_softmax(std::shared_ptr<OutputStream> output_stream,
-        const NameToVStreamParamsMap &vstreams_params_map, const hailo_vstream_info_t &output_vstream_info, const NetFlowElement &softmax_op);
+        const NameToVStreamParamsMap &vstreams_params_map, const hailo_vstream_info_t &output_vstream_info,
+        const net_flow::PostProcessOpMetadataPtr &softmax_op_metadata);
+    static Expected<std::vector<OutputVStream>> create_output_post_process_iou(std::shared_ptr<OutputStream> output_stream,
+        hailo_vstream_params_t vstream_params, const net_flow::PostProcessOpMetadataPtr &iou_op_metadata);
 };
 
 } /* namespace hailort */
index 2fa911ff56bc81341ef162ef7df936fa4fcef734..715b0c68703bae58bb2cbf0c1a1bbf07e85ca4a1 100644 (file)
@@ -7,6 +7,7 @@
  * @brief: Configured Network Group and Activated Network Group
  **/
 
+#include "hailo/hailort.h"
 #include "hailo/transform.hpp"
 #include "hailo/vstream.hpp"
 #include "hailo/hailort_defaults.hpp"
 namespace hailort
 {
 
-Expected<std::shared_ptr<ConfiguredNetworkGroup>> ConfiguredNetworkGroup::duplicate_network_group_client(uint32_t handle, const std::string &network_group_name)
+class ActivatedNetworkGroupImpl : public ActivatedNetworkGroup {
+public:
+
+    static Expected<std::unique_ptr<ActivatedNetworkGroup>> create(ConfiguredNetworkGroupBase &cng)
+    {
+        auto status = HAILO_UNINITIALIZED;
+        std::unique_ptr<ActivatedNetworkGroup> ang = make_unique_nothrow<ActivatedNetworkGroupImpl>(cng, status);
+        CHECK_NOT_NULL_AS_EXPECTED(ang, HAILO_OUT_OF_HOST_MEMORY);
+        if (HAILO_STREAM_ABORTED_BY_USER == status) {
+            LOGGER__ERROR("Network group activation failed because some of the low level streams are aborted. Make sure to run clear_abort before activating!");
+            return make_unexpected(status);
+        }
+        CHECK_SUCCESS_AS_EXPECTED(status);
+        return ang;
+    }
+
+    virtual ~ActivatedNetworkGroupImpl()
+    {
+        if (m_is_activated) {
+            auto status = m_cng.deactivate_impl();
+            if (HAILO_SUCCESS != status) {
+                LOGGER__ERROR("Failed deactivate {}", status);
+            }
+            m_is_activated = false;
+        }
+    }
+
+    ActivatedNetworkGroupImpl(const ActivatedNetworkGroupImpl &) = delete;
+    ActivatedNetworkGroupImpl &operator=(const ActivatedNetworkGroupImpl &) = delete;
+    ActivatedNetworkGroupImpl(ActivatedNetworkGroupImpl &&) = delete;
+    ActivatedNetworkGroupImpl &operator=(ActivatedNetworkGroupImpl &&) = delete;
+
+    virtual const std::string &get_network_group_name() const override
+    {
+        return m_cng.get_network_group_name();
+    }
+
+    virtual Expected<Buffer> get_intermediate_buffer(const IntermediateBufferKey &key) override
+    {
+        return m_cng.get_intermediate_buffer(key);
+    }
+
+    virtual uint32_t get_invalid_frames_count() override
+    {
+        uint32_t total_invalid_frames_count = 0;
+        for (auto& output_stream : m_cng.get_output_streams()) {
+            total_invalid_frames_count += output_stream.get().get_invalid_frames_count();
+        }
+        return total_invalid_frames_count;
+    }
+
+    ActivatedNetworkGroupImpl(ConfiguredNetworkGroupBase &cng, hailo_status &status) :
+        m_cng(cng)
+    {
+        auto activate_status = m_cng.activate_impl();
+        if (HAILO_STREAM_ABORTED_BY_USER == activate_status) {
+            LOGGER__INFO("Network group activation failed because it was aborted by user");
+            status = activate_status;
+            return;
+        }
+        if (HAILO_SUCCESS != activate_status) {
+            LOGGER__ERROR("Failed activate {}", activate_status);
+            status = activate_status;
+            return;
+        }
+
+        m_is_activated = true;
+        status = HAILO_SUCCESS;
+    }
+
+private:
+    ConfiguredNetworkGroupBase &m_cng;
+    bool m_is_activated;
+};
+
+Expected<std::shared_ptr<ConfiguredNetworkGroup>> ConfiguredNetworkGroup::duplicate_network_group_client(uint32_t ng_handle, uint32_t vdevice_handle,
+    const std::string &network_group_name)
 {
 #ifdef HAILO_SUPPORT_MULTI_PROCESS
-    auto net_group_client = ConfiguredNetworkGroupClient::duplicate_network_group_client(handle, network_group_name);
+    auto net_group_client = ConfiguredNetworkGroupClient::duplicate_network_group_client(ng_handle, vdevice_handle, network_group_name);
     CHECK_EXPECTED(net_group_client);
-    
+
     return std::shared_ptr<ConfiguredNetworkGroup>(net_group_client.release());
 #else
-    (void)handle;
+    (void)ng_handle;
+    (void)vdevice_handle;
     (void)network_group_name;
     LOGGER__ERROR("`duplicate_network_group_client()` requires service compilation with HAILO_BUILD_SERVICE");
     return make_unexpected(HAILO_INVALID_OPERATION);
@@ -48,16 +126,38 @@ Expected<uint32_t> ConfiguredNetworkGroup::get_client_handle() const
     return make_unexpected(HAILO_INVALID_OPERATION);
 }
 
+Expected<uint32_t> ConfiguredNetworkGroup::get_vdevice_client_handle() const
+{
+    LOGGER__ERROR("`get_vdevice_client_handle()` is valid only when working with HailoRT Service!");
+    return make_unexpected(HAILO_INVALID_OPERATION);
+}
+
+hailo_status ConfiguredNetworkGroup::before_fork()
+{
+    return HAILO_SUCCESS;
+}
+
+hailo_status ConfiguredNetworkGroup::after_fork_in_parent()
+{
+    return HAILO_SUCCESS;
+}
+
+hailo_status ConfiguredNetworkGroup::after_fork_in_child()
+{
+    return HAILO_SUCCESS;
+}
+
 Expected<std::unique_ptr<ActivatedNetworkGroup>> ConfiguredNetworkGroup::activate()
 {
-    const auto network_group_params = HailoRTDefaults::get_active_network_group_params();
-    return activate(network_group_params);
+    return activate(HailoRTDefaults::get_active_network_group_params());
 }
 
 Expected<std::unique_ptr<ActivatedNetworkGroup>> ConfiguredNetworkGroupBase::activate(
     const hailo_activate_network_group_params_t &network_group_params)
 {
-    return get_core_op()->activate(network_group_params);
+    // Params are reserved for later use.
+    (void)network_group_params;
+    return ActivatedNetworkGroupImpl::create(*this);
 }
 
 /* Network group base functions */
@@ -93,7 +193,7 @@ Expected<OutputStreamWithParamsVector> ConfiguredNetworkGroupBase::get_output_st
         }
     }
     // Add non mux streams to result
-    hailo_status status = add_mux_streams_by_edges_names(results, outputs_edges_params); 
+    hailo_status status = add_mux_streams_by_edges_names(results, outputs_edges_params);
     CHECK_SUCCESS_AS_EXPECTED(status);
 
     return results;
@@ -151,6 +251,11 @@ Expected<OutputStreamPtrVector> ConfiguredNetworkGroupBase::get_output_streams_b
     return output_streams;
 }
 
+Expected<std::vector<net_flow::PostProcessOpMetadataPtr>> ConfiguredNetworkGroupBase::get_ops_metadata()
+{
+    return std::vector<net_flow::PostProcessOpMetadataPtr>(m_network_group_metadata.m_ops_metadata);
+}
+
 Expected<LayerInfo> ConfiguredNetworkGroupBase::get_layer_info(const std::string &stream_name)
 {
     return get_core_op()->get_layer_info(stream_name);
@@ -161,7 +266,8 @@ ConfiguredNetworkGroupBase::ConfiguredNetworkGroupBase(
     NetworkGroupMetadata &&metadata) :
         m_config_params(config_params),
         m_core_ops(std::move(core_ops)),
-        m_network_group_metadata(std::move(metadata))
+        m_network_group_metadata(std::move(metadata)),
+        m_is_forked(false)
 {}
 
 // static func
@@ -188,12 +294,6 @@ uint16_t ConfiguredNetworkGroupBase::get_smallest_configured_batch_size(const Co
     return (UINT16_MAX == min_batch_size) ? DEFAULT_ACTUAL_BATCH_SIZE : min_batch_size;
 }
 
-Expected<std::unique_ptr<ActivatedNetworkGroup>> ConfiguredNetworkGroupBase::activate_with_batch(uint16_t dynamic_batch_size,
-    bool resume_pending_stream_transfers)
-{
-    return get_core_op()->activate_with_batch(dynamic_batch_size, resume_pending_stream_transfers);
-}
-
 const std::string &ConfiguredNetworkGroupBase::get_network_group_name() const
 {
     return m_network_group_metadata.name();
@@ -204,9 +304,9 @@ const std::string &ConfiguredNetworkGroupBase::name() const
     return m_network_group_metadata.name();
 }
 
-hailo_status ConfiguredNetworkGroupBase::activate_low_level_streams(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers)
+hailo_status ConfiguredNetworkGroupBase::activate_low_level_streams()
 {
-    return get_core_op()->activate_low_level_streams(dynamic_batch_size, resume_pending_stream_transfers);
+    return get_core_op()->activate_low_level_streams();
 }
 
 hailo_status ConfiguredNetworkGroupBase::deactivate_low_level_streams()
@@ -264,35 +364,6 @@ const SupportedFeatures &ConfiguredNetworkGroupBase::get_supported_features()
     return get_core_op()->get_supported_features();
 }
 
-hailo_status ConfiguredNetworkGroupBase::create_input_stream_from_config_params(Device &device,
-    const hailo_stream_parameters_t &stream_params, const std::string &stream_name)
-{
-    return get_core_op()->create_input_stream_from_config_params(device, stream_params, stream_name);
-}
-
-hailo_status ConfiguredNetworkGroupBase::create_vdma_input_stream(Device &device, const std::string &stream_name,
-    const LayerInfo &layer_info, const hailo_stream_parameters_t &stream_params)
-{
-    return get_core_op()->create_vdma_input_stream(device, stream_name, layer_info, stream_params);
-}
-
-hailo_status ConfiguredNetworkGroupBase::create_output_stream_from_config_params(Device &device,
-    const hailo_stream_parameters_t &stream_params, const std::string &stream_name)
-{
-    return get_core_op()->create_output_stream_from_config_params(device, stream_params, stream_name);
-}
-
-hailo_status ConfiguredNetworkGroupBase::create_vdma_output_stream(Device &device, const std::string &stream_name,
-    const LayerInfo &layer_info, const hailo_stream_parameters_t &stream_params)
-{
-    return get_core_op()->create_vdma_output_stream(device, stream_name, layer_info, stream_params);
-}
-
-hailo_status ConfiguredNetworkGroupBase::create_streams_from_config_params(Device &device)
-{
-    return get_core_op()->create_streams_from_config_params(device);
-}
-
 Expected<InputStreamRefVector> ConfiguredNetworkGroupBase::get_input_streams_by_network(const std::string &network_name)
 {
     return get_core_op()->get_input_streams_by_network(network_name);
@@ -340,6 +411,16 @@ hailo_status ConfiguredNetworkGroupBase::wait_for_activation(const std::chrono::
     return get_core_op()->wait_for_activation(timeout);
 }
 
+hailo_status ConfiguredNetworkGroupBase::activate_impl(uint16_t dynamic_batch_size)
+{
+    return get_core_op()->activate(dynamic_batch_size);
+}
+
+hailo_status ConfiguredNetworkGroupBase::deactivate_impl()
+{
+    return get_core_op()->deactivate();
+}
+
 Expected<std::vector<std::vector<std::string>>> ConfiguredNetworkGroupBase::get_output_vstream_groups()
 {
     std::vector<std::vector<std::string>> results;
@@ -452,6 +533,22 @@ static hailo_vstream_params_t expand_vstream_params_autos(const hailo_stream_inf
     return local_vstream_params;
 }
 
+static hailo_vstream_params_t expand_vstream_params_autos_multi_planar(const hailo_vstream_info_t &vstream_info,
+    const hailo_vstream_params_t &vstream_params)
+{
+    /* In multi planar case we compare to vstream_info instead of stream_info,
+        as the ll-streams formats doesnt indicate the format of the vstreams */
+    auto local_vstream_params = vstream_params;
+    if (HAILO_FORMAT_TYPE_AUTO == local_vstream_params.user_buffer_format.type) {
+        local_vstream_params.user_buffer_format.type = vstream_info.format.type;
+    }
+    if (HAILO_FORMAT_ORDER_AUTO == local_vstream_params.user_buffer_format.order) {
+        local_vstream_params.user_buffer_format.order = vstream_info.format.order;
+    }
+
+    return local_vstream_params;
+}
+
 static std::map<std::string, hailo_vstream_info_t> vstream_infos_vector_to_map(std::vector<hailo_vstream_info_t> &&vstream_info_vector)
 {
     std::map<std::string, hailo_vstream_info_t> vstream_infos_map;
@@ -470,17 +567,30 @@ Expected<std::vector<InputVStream>> ConfiguredNetworkGroupBase::create_input_vst
 
     std::vector<InputVStream> vstreams;
     vstreams.reserve(inputs_params.size());
+
     for (const auto &name_params_pair : inputs_params) {
-        auto input_stream_expected = get_shared_input_stream_by_name(name_params_pair.first);
-        CHECK_EXPECTED(input_stream_expected);
-        auto input_stream = input_stream_expected.release();
+        std::vector<std::shared_ptr<InputStream>> streams;
+        auto &vstream_name = name_params_pair.first;
+        auto &vstream_params = name_params_pair.second;
+
+        auto stream_names = m_network_group_metadata.get_stream_names_from_vstream_name(vstream_name);
+        CHECK_EXPECTED(stream_names);
 
-        const auto vstream_info = input_vstream_infos_map.find(name_params_pair.first);
+        const auto vstream_info = input_vstream_infos_map.find(vstream_name);
         CHECK_AS_EXPECTED(vstream_info != input_vstream_infos_map.end(), HAILO_NOT_FOUND,
-            "Failed to find vstream info of {}", name_params_pair.first);
+            "Failed to find vstream info of {}", vstream_name);
+
+        for (const auto &stream_name : stream_names.value()){
+            auto input_stream_expected = get_shared_input_stream_by_name(stream_name);
+            CHECK_EXPECTED(input_stream_expected);
+
+            auto input_stream = input_stream_expected.release();
+            streams.push_back(input_stream);
+        }
 
-        const auto vstream_params = expand_vstream_params_autos(input_stream->get_info(), name_params_pair.second);
-        auto inputs = VStreamsBuilderUtils::create_inputs(input_stream, vstream_info->second, vstream_params);
+        auto expanded_vstream_params = (streams.size() > 1) ? expand_vstream_params_autos_multi_planar(vstream_info->second, vstream_params) :
+            expand_vstream_params_autos(streams.back()->get_info(), vstream_params);
+        auto inputs = VStreamsBuilderUtils::create_inputs(streams, vstream_info->second, expanded_vstream_params);
         CHECK_EXPECTED(inputs);
 
         vstreams.insert(vstreams.end(), std::make_move_iterator(inputs->begin()), std::make_move_iterator(inputs->end()));
@@ -503,12 +613,12 @@ Expected<std::vector<OutputVStream>> ConfiguredNetworkGroupBase::create_output_v
     // Building DBs that connect output_vstreams, output_streams and ops.
     // Note: Assuming each post process op has a unique output streams.
     //       In other words, not possible for an output stream to be connected to more than one op
-    std::unordered_map<std::string, std::shared_ptr<NetFlowElement>> post_process_ops;
+    std::unordered_map<std::string, net_flow::PostProcessOpMetadataPtr> post_process_metadata;
     std::unordered_map<stream_name_t, op_name_t> op_inputs_to_op_name;
-    for (auto &op : m_network_group_metadata.m_net_flow_ops) {
-        post_process_ops.insert({op->name, op});
-        for (auto &input_stream : op->input_streams) {
-            op_inputs_to_op_name.insert({input_stream, op->name});
+    for (auto &metadata : m_network_group_metadata.m_ops_metadata) {
+        post_process_metadata.insert({metadata->get_name(), metadata});
+        for (auto &input_name : metadata->get_input_names()) {
+            op_inputs_to_op_name.insert({input_name, metadata->get_name()});
         }
     }
 
@@ -525,7 +635,7 @@ Expected<std::vector<OutputVStream>> ConfiguredNetworkGroupBase::create_output_v
         }
 
         auto outputs = VStreamsBuilderUtils::create_output_vstreams_from_streams(all_output_streams, output_streams.value(), vstream_params.second,
-            post_process_ops, op_inputs_to_op_name, output_vstream_infos_map);
+            post_process_metadata, op_inputs_to_op_name, output_vstream_infos_map);
         CHECK_EXPECTED(outputs);
         vstreams.insert(vstreams.end(), std::make_move_iterator(outputs->begin()), std::make_move_iterator(outputs->end()));
     }
@@ -534,4 +644,24 @@ Expected<std::vector<OutputVStream>> ConfiguredNetworkGroupBase::create_output_v
     return vstreams;
 }
 
+hailo_status ConfiguredNetworkGroupBase::before_fork()
+{
+    // On fork, we wrap the streams object with some wrapper allowing multi process
+    // support.
+    if (!m_is_forked) {
+        for (auto &core_op : m_core_ops) {
+            auto status = core_op->wrap_streams_for_remote_process();
+            CHECK_SUCCESS(status);
+        }
+        m_is_forked = true;
+    }
+
+    return HAILO_SUCCESS;
+}
+
+Expected<Buffer> ConfiguredNetworkGroupBase::get_intermediate_buffer(const IntermediateBufferKey &key)
+{
+    return get_core_op()->get_intermediate_buffer(key);
+}
+
 } /* namespace hailort */
index 31cb962d4bc15140be100b8b33c97c6a9e64186d..4728f60e71fda1daa16c74f9b85cc921b9343bed 100644 (file)
  *        -------------------------------------------------------------------------------------------------------------|
  *        |                         ActivatedNetworkGroup                                                              |  (External "interface")
  *        |                                   |                                                                        |
- *        |                             ActivatedCoreOp                                                                |  (Base classes)
- *        |                 __________________|_____________________________________________________                   |
- *        |                /                                         |                               \                 |
- *        |    VdmaConfigActivatedCoreOp                 VDeviceActivatedCoreOp             HcpConfigActivatedCoreOp   |  (Actual implementations)
- *        |                                                          |                                                 |
- *        |                                        vector of VdmaConfigActivatedCoreOp                                 |
+ *        |                       ActivatedNetworkGroupImpl                                                            |  (Class implementation)
  *        --------------------------------------------------------------------------------------------------------------
  **/
 
@@ -46,6 +41,7 @@
 
 #ifdef HAILO_SUPPORT_MULTI_PROCESS
 #include "service/hailort_rpc_client.hpp"
+#include "rpc/rpc_definitions.hpp"
 #endif // HAILO_SUPPORT_MULTI_PROCESS
 
 
@@ -54,6 +50,7 @@ namespace hailort
 using stream_name_t = std::string;
 using op_name_t = std::string;
 
+
 class ConfiguredNetworkGroupBase : public ConfiguredNetworkGroup
 {
 public:
@@ -71,15 +68,15 @@ public:
     ConfiguredNetworkGroupBase(const ConfiguredNetworkGroupBase &other) = delete;
     ConfiguredNetworkGroupBase &operator=(const ConfiguredNetworkGroupBase &other) = delete;
     ConfiguredNetworkGroupBase &operator=(ConfiguredNetworkGroupBase &&other) = delete;
-    ConfiguredNetworkGroupBase(ConfiguredNetworkGroupBase &&other) = default;
+    ConfiguredNetworkGroupBase(ConfiguredNetworkGroupBase &&other) = delete;
 
-    Expected<std::unique_ptr<ActivatedNetworkGroup>> activate_with_batch(
-        uint16_t dynamic_batch_size = CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE,
-        bool resume_pending_stream_transfers = false);
     virtual Expected<std::unique_ptr<ActivatedNetworkGroup>> activate(
         const hailo_activate_network_group_params_t &network_group_params) override;
     virtual hailo_status wait_for_activation(const std::chrono::milliseconds &timeout) override;
 
+    hailo_status activate_impl(uint16_t dynamic_batch_size = CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE);
+    hailo_status deactivate_impl();
+
     virtual const std::string &get_network_group_name() const override;
     virtual const std::string &name() const override;
 
@@ -114,7 +111,6 @@ public:
     virtual Expected<std::vector<hailo_vstream_info_t>> get_all_vstream_infos(const std::string &network_name="") const override;
     virtual AccumulatorPtr get_activation_time_accumulator() const override;
     virtual AccumulatorPtr get_deactivation_time_accumulator() const override;
-    hailo_status create_streams_from_config_params(Device &device);
 
     virtual bool is_multi_context() const override;
     virtual const ConfigureNetworkParams get_config_params() const override;
@@ -127,7 +123,7 @@ public:
     const std::shared_ptr<CoreOpMetadata> get_core_op_metadata() const;
 
     const SupportedFeatures &get_supported_features();
-    
+
     Expected<uint16_t> get_stream_batch_size(const std::string &stream_name);
 
     virtual Expected<std::vector<std::string>> get_sorted_output_names() override;
@@ -137,12 +133,12 @@ public:
     virtual Expected<std::vector<InputVStream>> create_input_vstreams(const std::map<std::string, hailo_vstream_params_t> &inputs_params) override;
     virtual Expected<std::vector<OutputVStream>> create_output_vstreams(const std::map<std::string, hailo_vstream_params_t> &outputs_params) override;
 
-    Expected<std::shared_ptr<InputStream>> get_shared_input_stream_by_name(const std::string &stream_name)
+    Expected<std::shared_ptr<InputStreamBase>> get_shared_input_stream_by_name(const std::string &stream_name)
     {
         return get_core_op()->get_shared_input_stream_by_name(stream_name);
     }
-    
-    Expected<std::shared_ptr<OutputStream>> get_shared_output_stream_by_name(const std::string &stream_name) 
+
+    Expected<std::shared_ptr<OutputStreamBase>> get_shared_output_stream_by_name(const std::string &stream_name)
     {
         return get_core_op()->get_shared_output_stream_by_name(stream_name);
     }
@@ -151,22 +147,6 @@ public:
     {
         return get_core_op()->m_core_op_activated_event;
     }
-    
-    hailo_status activate_impl(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers = false)
-    {
-        return get_core_op()->activate_impl(dynamic_batch_size, resume_pending_stream_transfers);
-    }
-
-    hailo_status deactivate_impl(bool keep_nn_config_during_reset)
-    {
-        return get_core_op()->deactivate_impl(keep_nn_config_during_reset);
-    }
-
-    Expected<std::unique_ptr<ActivatedNetworkGroup>> create_activated_network_group(
-        const hailo_activate_network_group_params_t &network_group_params, uint16_t dynamic_batch_size, bool resume_pending_stream_transfers)
-    {
-        return get_core_op()->create_activated_network_group(network_group_params, dynamic_batch_size, resume_pending_stream_transfers);
-    }
 
     Expected<std::shared_ptr<LatencyMetersMap>> get_latency_meters()
     {
@@ -208,33 +188,30 @@ public:
         return m_core_ops;
     }
 
+    virtual hailo_status before_fork() override;
+
+    Expected<Buffer> get_intermediate_buffer(const IntermediateBufferKey &key);
+    Expected<OutputStreamPtrVector> get_output_streams_by_vstream_name(const std::string &name);
+    Expected<std::vector<net_flow::PostProcessOpMetadataPtr>> get_ops_metadata();
+
 private:
     ConfiguredNetworkGroupBase(const ConfigureNetworkParams &config_params,
         std::vector<std::shared_ptr<CoreOp>> &&core_ops, NetworkGroupMetadata &&metadata);
 
     static uint16_t get_smallest_configured_batch_size(const ConfigureNetworkParams &config_params);
-    hailo_status create_vdma_input_stream(Device &device, const std::string &stream_name,
-        const LayerInfo &layer_info, const hailo_stream_parameters_t &stream_params);
-    hailo_status create_vdma_output_stream(Device &device, const std::string &stream_name,
-        const LayerInfo &layer_info, const hailo_stream_parameters_t &stream_params);
-    hailo_status create_output_stream_from_config_params(Device &device,
-        const hailo_stream_parameters_t &stream_params, const std::string &stream_name);
-    hailo_status create_input_stream_from_config_params(Device &device,
-        const hailo_stream_parameters_t &stream_params, const std::string &stream_name);
     hailo_status add_mux_streams_by_edges_names(OutputStreamWithParamsVector &result,
         const std::unordered_map<std::string, hailo_vstream_params_t> &outputs_edges_params);
-    Expected<OutputStreamPtrVector> get_output_streams_by_vstream_name(const std::string &name);
     Expected<LayerInfo> get_layer_info(const std::string &stream_name);
 
-    hailo_status activate_low_level_streams(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers);
+    hailo_status activate_low_level_streams();
     hailo_status deactivate_low_level_streams();
 
     const ConfigureNetworkParams m_config_params;
     std::vector<std::shared_ptr<CoreOp>> m_core_ops;
     NetworkGroupMetadata m_network_group_metadata;
+    bool m_is_forked;
 
     friend class VDeviceCoreOp;
-    friend class VDeviceActivatedCoreOp;
 };
 
 // Move client ng to different header
@@ -242,13 +219,13 @@ private:
 class ConfiguredNetworkGroupClient : public ConfiguredNetworkGroup
 {
 public:
-    ConfiguredNetworkGroupClient(std::unique_ptr<HailoRtRpcClient> client, uint32_t handle);
+    ConfiguredNetworkGroupClient(std::unique_ptr<HailoRtRpcClient> client, NetworkGroupIdentifier &&identifier);
 
     virtual ~ConfiguredNetworkGroupClient();
     ConfiguredNetworkGroupClient(const ConfiguredNetworkGroupClient &other) = delete;
     ConfiguredNetworkGroupClient &operator=(const ConfiguredNetworkGroupClient &other) = delete;
     ConfiguredNetworkGroupClient &operator=(ConfiguredNetworkGroupClient &&other) = delete;
-    ConfiguredNetworkGroupClient(ConfiguredNetworkGroupClient &&other) = default;
+    ConfiguredNetworkGroupClient(ConfiguredNetworkGroupClient &&other) = delete;
 
     virtual const std::string &get_network_group_name() const override;
     virtual const std::string &name() const override;
@@ -310,18 +287,26 @@ public:
 
     virtual Expected<uint32_t> get_client_handle() const override
     {
-        auto val = m_handle;
+        auto val = m_identifier.m_network_group_handle;
+        return val;
+    };
+
+    virtual Expected<uint32_t> get_vdevice_client_handle() const override
+    {
+        auto val = m_identifier.m_vdevice_identifier.m_vdevice_handle;
         return val;
     };
 
-    static Expected<std::shared_ptr<ConfiguredNetworkGroupClient>> duplicate_network_group_client(uint32_t handle, const std::string &network_group_name);
+    static Expected<std::shared_ptr<ConfiguredNetworkGroupClient>> duplicate_network_group_client(uint32_t handle, uint32_t vdevice_handle,
+        const std::string &network_group_name);
 
 private:
-    ConfiguredNetworkGroupClient(uint32_t handle, const std::string &network_group_name);
+    ConfiguredNetworkGroupClient(NetworkGroupIdentifier &&identifier, const std::string &network_group_name);
     hailo_status create_client();
+    hailo_status dup_handle();
 
     std::unique_ptr<HailoRtRpcClient> m_client;
-    uint32_t m_handle;
+    NetworkGroupIdentifier m_identifier;
     std::string m_network_group_name;
 };
 #endif // HAILO_SUPPORT_MULTI_PROCESS
index 7205d5911a9221b5a2cbd10760101be3f9a82a15..f172cbbfd0effc7658ed8a64afb981d698087637 100755 (executable)
@@ -35,16 +35,16 @@ namespace hailort
 
 #define DEVICE_NODE_NAME       "hailo"
 
-#define PENDING_BUFFERS_SIZE (128)
-static_assert((0 == ((PENDING_BUFFERS_SIZE - 1) & PENDING_BUFFERS_SIZE)), "PENDING_BUFFERS_SIZE must be a power of 2");
+constexpr size_t ONGOING_TRANSFERS_SIZE = 128;
+static_assert((0 == ((ONGOING_TRANSFERS_SIZE - 1) & ONGOING_TRANSFERS_SIZE)), "ONGOING_TRANSFERS_SIZE must be a power of 2");
 
 #define MIN_ACTIVE_TRANSFERS_SCALE (2)
 #define MAX_ACTIVE_TRANSFERS_SCALE (4)
 
-#define HAILO_MAX_BATCH_SIZE ((PENDING_BUFFERS_SIZE / MIN_ACTIVE_TRANSFERS_SCALE) - 1)
+#define HAILO_MAX_BATCH_SIZE ((ONGOING_TRANSFERS_SIZE / MIN_ACTIVE_TRANSFERS_SCALE) - 1)
 
-// When measuring latency, each channel is capable of PENDING_BUFFERS_SIZE active transfers, each transfer raises max of 2 timestamps
-#define MAX_IRQ_TIMESTAMPS_SIZE (PENDING_BUFFERS_SIZE * 2)
+// When measuring latency, each channel is capable of ONGOING_TRANSFERS_SIZE active transfers, each transfer raises max of 2 timestamps
+#define MAX_IRQ_TIMESTAMPS_SIZE (ONGOING_TRANSFERS_SIZE * 2)
 
 #define PCIE_EXPECTED_MD5_LENGTH (16)
 
@@ -148,11 +148,11 @@ public:
 
     using VdmaBufferHandle = size_t;
 
-    static Expected<HailoRTDriver> create(const DeviceInfo &device_info);
+    static Expected<std::unique_ptr<HailoRTDriver>> create(const DeviceInfo &device_info);
 
 // TODO: HRT-7309 add implementation for Windows
 #if defined(__linux__) || defined(__QNX__)
-    static hailo_status hailo_ioctl(int fd, int request, void* request_struct, int &error_status);
+    hailo_status hailo_ioctl(int fd, unsigned long request, void* request_struct, int &error_status);
 #endif // defined(__linux__) || defined(__QNX__)
 
     static Expected<std::vector<DeviceInfo>> scan_devices();
@@ -284,8 +284,8 @@ public:
 
     HailoRTDriver(const HailoRTDriver &other) = delete;
     HailoRTDriver &operator=(const HailoRTDriver &other) = delete;
-    HailoRTDriver(HailoRTDriver &&other) noexcept = default;
-    HailoRTDriver &operator=(HailoRTDriver &&other) = default;
+    HailoRTDriver(HailoRTDriver &&other) noexcept = delete;
+    HailoRTDriver &operator=(HailoRTDriver &&other) = delete;
 
     static const uintptr_t INVALID_DRIVER_BUFFER_HANDLE_VALUE;
     static const size_t INVALID_DRIVER_VDMA_MAPPING_HANDLE_VALUE;
@@ -325,8 +325,25 @@ private:
 #ifdef __QNX__
     pid_t m_resource_manager_pid;
 #endif // __QNX__
+
+#ifdef __linux__
+    // TODO: HRT-11595 fix linux driver deadlock and remove the mutex.
+    // Currently, on the linux, the mmap syscall is called under current->mm lock held. Inside, we lock the board
+    // mutex. On other ioctls, we first lock the board mutex, and then lock current->mm mutex (For example - before
+    // pinning user address to memory and on copy_to_user/copy_from_user calls).
+    // Need to refactor the driver lock mechanism and then remove the mutex from here.
+    std::mutex m_driver_lock;
+#endif
 };
 
+inline hailo_dma_buffer_direction_t to_hailo_dma_direction(HailoRTDriver::DmaDirection dma_direction)
+{
+    return (dma_direction == HailoRTDriver::DmaDirection::H2D)  ? HAILO_DMA_BUFFER_DIRECTION_H2D :
+           (dma_direction == HailoRTDriver::DmaDirection::D2H)  ? HAILO_DMA_BUFFER_DIRECTION_D2H :
+           (dma_direction == HailoRTDriver::DmaDirection::BOTH) ? HAILO_DMA_BUFFER_DIRECTION_BOTH :
+                                                                  HAILO_DMA_BUFFER_DIRECTION_MAX_ENUM;
+}
+
 } /* namespace hailort */
 
 #endif  /* _HAILORT_DRIVER_HPP_ */
index 4615f4d000369d4d22df2da2fa8232a19a9791ca..a0e2fc514a2b5c7f73136bb57e788e3b5bdb974a 100755 (executable)
@@ -107,35 +107,58 @@ const uintptr_t HailoRTDriver::INVALID_DRIVER_BUFFER_HANDLE_VALUE = INVALID_DRIV
 const size_t HailoRTDriver::INVALID_DRIVER_VDMA_MAPPING_HANDLE_VALUE = INVALID_DRIVER_HANDLE_VALUE;
 const uint8_t HailoRTDriver::INVALID_VDMA_CHANNEL_INDEX = INVALID_VDMA_CHANNEL;
 
-Expected<HailoRTDriver> HailoRTDriver::create(const DeviceInfo &device_info)
+Expected<std::unique_ptr<HailoRTDriver>> HailoRTDriver::create(const DeviceInfo &device_info)
 {
     auto fd = FileDescriptor(open(device_info.dev_path.c_str(), O_RDWR));
     CHECK_AS_EXPECTED(fd >= 0, HAILO_DRIVER_FAIL,
         "Failed to open device file {} with error {}", device_info.dev_path, errno);
 
     hailo_status status = HAILO_UNINITIALIZED;
-    HailoRTDriver object(device_info, std::move(fd), status);
+    std::unique_ptr<HailoRTDriver> driver(new (std::nothrow) HailoRTDriver(device_info, std::move(fd), status));
+    CHECK_NOT_NULL_AS_EXPECTED(driver, HAILO_OUT_OF_HOST_MEMORY);
     CHECK_SUCCESS_AS_EXPECTED(status);
 
-    return object;
+    return driver;
 }
 
-hailo_status HailoRTDriver::hailo_ioctl(int fd, int request, void* request_struct, int &error_status)
+#if defined(__linux__)
+static bool is_blocking_ioctl(unsigned long request)
+{
+    switch (request) {
+    case HAILO_VDMA_INTERRUPTS_WAIT:
+    case HAILO_FW_CONTROL:
+    case HAILO_READ_NOTIFICATION:
+        return true;
+    default:
+        return false;
+    }
+}
+
+hailo_status HailoRTDriver::hailo_ioctl(int fd, unsigned long request, void* request_struct, int &error_status)
 {
+    // We lock m_driver lock on all request but the blocking onces. Read m_driver_lock doc in the header
+    std::unique_lock<std::mutex> lock;
+    if (!is_blocking_ioctl(request)) {
+        lock = std::unique_lock<std::mutex>(m_driver_lock);
+    }
+
     int res = ioctl(fd, request, request_struct);
-    if (0 > res) {
-#if defined(__linux__)
-        error_status = errno;
+    error_status = errno;
+    return (res >= 0) ? HAILO_SUCCESS : HAILO_DRIVER_FAIL;
+}
 #elif defined(__QNX__)
+hailo_status HailoRTDriver::hailo_ioctl(int fd, unsigned long request, void* request_struct, int &error_status)
+{
+    int res = ioctl(fd, static_cast<int>(request), request_struct);
+    if (0 > res) {
         error_status = -res;
-#else
-#error "unsupported platform!"
-#endif // __linux__
-
         return HAILO_DRIVER_FAIL;
     }
     return HAILO_SUCCESS;
 }
+#else
+#error "Unsupported platform"
+#endif
 
 static hailo_status validate_driver_version(const hailo_driver_info &driver_info)
 {
@@ -770,6 +793,9 @@ hailo_status HailoRTDriver::descriptors_list_release_ioctl(uintptr_t desc_handle
 #if defined(__linux__)
 Expected<void *> HailoRTDriver::descriptors_list_create_mmap(uintptr_t desc_handle, size_t desc_count)
 {
+    // We lock m_driver_lock before calling mmap. Read m_driver_lock doc in the header
+    std::unique_lock<std::mutex> lock(m_driver_lock);
+
     const size_t buffer_size = desc_count * SIZE_OF_SINGLE_DESCRIPTOR;
     void *address = mmap(nullptr, buffer_size, PROT_WRITE | PROT_READ, MAP_SHARED, m_fd, (off_t)desc_handle);
     if (MAP_FAILED == address) {
index c9e753bf8c7b071fdd5d9b8b56b062db8a1fbfe5..3e1e947763edc959be26f7e5b615ef65e5c272cd 100644 (file)
@@ -125,19 +125,20 @@ Expected<Event> Event::create(const State& initial_state)
 {
     const auto handle = open_event_handle(initial_state);
     if (-1 == handle) {
-        return make_unexpected(HAILO_INTERNAL_FAILURE);
+        return make_unexpected(HAILO_EVENT_CREATE_FAIL);
     }
     return Event(handle);
 }
 
-EventPtr Event::create_shared(const State& initial_state)
+Expected<EventPtr> Event::create_shared(const State& initial_state)
 {
     const auto handle = open_event_handle(initial_state);
-    if (-1 == handle) {
-        return nullptr;
-    }
+    CHECK_AS_EXPECTED(-1 != handle, HAILO_EVENT_CREATE_FAIL);
+
+    auto res = make_shared_nothrow<Event>(handle);
+    CHECK_NOT_NULL_AS_EXPECTED(res, HAILO_OUT_OF_HOST_MEMORY);
 
-    return make_shared_nothrow<Event>(handle);
+    return res;
 }
 
 hailo_status Event::signal()
@@ -174,7 +175,7 @@ Expected<Semaphore> Semaphore::create(uint32_t initial_count)
 {
     const auto handle = open_semaphore_handle(initial_count);
     if (-1 == handle) {
-        return make_unexpected(HAILO_INTERNAL_FAILURE);
+        return make_unexpected(HAILO_EVENT_CREATE_FAIL);
     }
     return Semaphore(handle);
 }
index 94f16179a76dcac7cfe874b5038aa91319e25f86..312b6991c9d2b145d4f2860b2442f7b139cf5fb6 100644 (file)
@@ -63,19 +63,20 @@ Expected<Event> Event::create(const State& initial_state)
 {
     const auto handle = open_event_handle(initial_state);
     if (INVALID_EVENT_HANDLE == handle) {
-        return make_unexpected(HAILO_INTERNAL_FAILURE);
+        return make_unexpected(HAILO_EVENT_CREATE_FAIL);
     }
     return std::move(Event(handle));
 }
 
-EventPtr Event::create_shared(const State& initial_state)
+Expected<EventPtr> Event::create_shared(const State& initial_state)
 {
     const auto handle = open_event_handle(initial_state);
-    if (INVALID_EVENT_HANDLE == handle) {
-        return nullptr;
-    }
+    CHECK_AS_EXPECTED(INVALID_EVENT_HANDLE != handle, HAILO_EVENT_CREATE_FAIL);
+
+    auto res = make_shared_nothrow<Event>(handle);
+    CHECK_NOT_NULL_AS_EXPECTED(res, HAILO_OUT_OF_HOST_MEMORY);
 
-    return make_shared_nothrow<Event>(handle);
+    return res;
 }
 
 hailo_status Event::signal()
@@ -114,7 +115,7 @@ Expected<Semaphore> Semaphore::create(uint32_t initial_count)
 {
     const auto handle = open_semaphore_handle(initial_count);
     if (INVALID_EVENT_HANDLE == handle) {
-        return make_unexpected(HAILO_INTERNAL_FAILURE);
+        return make_unexpected(HAILO_EVENT_CREATE_FAIL);
     }
     return std::move(Semaphore(handle, initial_count));
 }
index d13f29ad64ee5b33845586916bd2c9ebe1da7719..3512631da08d71909c051cc9283fe7fbbcffaf05 100644 (file)
@@ -64,19 +64,20 @@ Expected<Event> Event::create(const State& initial_state)
 {
     const auto handle = open_event_handle(initial_state);
     if (nullptr == handle) {
-        return make_unexpected(HAILO_INTERNAL_FAILURE);
+        return make_unexpected(HAILO_EVENT_CREATE_FAIL);
     }
     return std::move(Event(handle));
 }
 
-EventPtr Event::create_shared(const State& initial_state)
+Expected<EventPtr> Event::create_shared(const State& initial_state)
 {
     const auto handle = open_event_handle(initial_state);
-    if (nullptr == handle) {
-        return nullptr;
-    }
+    CHECK_AS_EXPECTED(nullptr != handle, HAILO_EVENT_CREATE_FAIL);
+
+    auto res = make_shared_nothrow<Event>(handle);
+    CHECK_NOT_NULL_AS_EXPECTED(res, HAILO_OUT_OF_HOST_MEMORY);
 
-    return make_shared_nothrow<Event>(handle);
+    return res;
 }
 
 hailo_status Event::signal()
@@ -123,7 +124,7 @@ Expected<Semaphore> Semaphore::create(uint32_t initial_count)
 {
     const auto handle = open_semaphore_handle(initial_count);
     if (nullptr == handle) {
-        return make_unexpected(HAILO_INTERNAL_FAILURE);
+        return make_unexpected(HAILO_EVENT_CREATE_FAIL);
     }
     return std::move(Semaphore(handle));
 }
index 57b0db6a15914dc4042abf364e98e29274efb427..c0bbd6c4368d6d078f8ac4a487330dec1415ff8d 100644 (file)
@@ -353,9 +353,8 @@ Expected<std::vector<HailoRTDriver::DeviceInfo>> HailoRTDriver::scan_devices()
     return devices_info;
 }
 
-Expected<HailoRTDriver> HailoRTDriver::create(const DeviceInfo &device_info)
+Expected<std::unique_ptr<HailoRTDriver>> HailoRTDriver::create(const DeviceInfo &device_info)
 {
-    hailo_status status = HAILO_UNINITIALIZED;
     CDeviceFile f(device_info.dev_path);
     if (!f.Present()) {
         LOGGER__ERROR("Failed to open board {}", device_info.dev_path);
@@ -363,11 +362,12 @@ Expected<HailoRTDriver> HailoRTDriver::create(const DeviceInfo &device_info)
     }
     FileDescriptor fd(f.Detach());
 
-    HailoRTDriver platform(device_info, std::move(fd), status);
-    if (HAILO_SUCCESS != status) {
-        return make_unexpected(status);
-    }
-    return platform;
+    hailo_status status = HAILO_UNINITIALIZED;
+    std::unique_ptr<HailoRTDriver> driver(new (std::nothrow) HailoRTDriver(device_info, std::move(fd), status));
+    CHECK_NOT_NULL_AS_EXPECTED(driver, HAILO_OUT_OF_HOST_MEMORY);
+    CHECK_SUCCESS_AS_EXPECTED(status);
+
+    return driver;
 }
 
 Expected<std::vector<uint8_t>> HailoRTDriver::read_notification()
index 6f68357372b4a0cf203bae00eb97a1a49597233c..54a6b83e390f3229b189c889f93c897a079bd490 100644 (file)
@@ -66,22 +66,11 @@ Expected<uint32_t> HailoRtRpcClient::VDevice_create(const hailo_vdevice_params_t
     return reply.handle();
 }
 
-Expected<uint32_t> HailoRtRpcClient::VDevice_dup_handle(uint32_t pid, uint32_t handle)
-{
-    dup_handle_Request request;
-    request.set_pid(pid);
-    request.set_handle(handle);
-    dup_handle_Reply reply;
-    ClientContextWithTimeout context;
-    grpc::Status status = m_stub->VDevice_dup_handle(&context, request, &reply);
-    CHECK_GRPC_STATUS_AS_EXPECTED(status);
-    return reply.handle();
-}
-
-hailo_status HailoRtRpcClient::VDevice_release(uint32_t handle, uint32_t pid)
+hailo_status HailoRtRpcClient::VDevice_release(const VDeviceIdentifier &identifier, uint32_t pid)
 {
     Release_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_vdevice_identifier();
+    VDevice_convert_identifier_to_proto(identifier, proto_identifier);
     request.set_pid(pid);
 
     Release_Reply reply;
@@ -93,11 +82,12 @@ hailo_status HailoRtRpcClient::VDevice_release(uint32_t handle, uint32_t pid)
     return HAILO_SUCCESS;
 }
 
-Expected<std::vector<uint32_t>> HailoRtRpcClient::InputVStreams_create(uint32_t net_group_handle,
+Expected<std::vector<uint32_t>> HailoRtRpcClient::InputVStreams_create(const NetworkGroupIdentifier &identifier,
     const std::map<std::string, hailo_vstream_params_t> &inputs_params, uint32_t pid)
 {
     VStream_create_Request request;
-    request.set_net_group(net_group_handle);
+    auto proto_identifier = request.mutable_identifier();
+    ConfiguredNetworkGroup_convert_identifier_to_proto(identifier, proto_identifier);
     request.set_pid(pid);
     auto proto_vstreams_params = request.mutable_vstreams_params();
     for (const auto &name_params_pair : inputs_params) {
@@ -136,11 +126,12 @@ Expected<std::vector<uint32_t>> HailoRtRpcClient::InputVStreams_create(uint32_t
     return input_vstreams_handles;
 }
 
-hailo_status HailoRtRpcClient::InputVStream_release(uint32_t handle, uint32_t pid)
+hailo_status HailoRtRpcClient::InputVStream_release(const VStreamIdentifier &identifier, uint32_t pid)
 {
     Release_Request request;
-    request.set_handle(handle);
     request.set_pid(pid);
+    auto proto_identifier = request.mutable_vstream_identifier();
+    VStream_convert_identifier_to_proto(identifier, proto_identifier);
 
     Release_Reply reply;
     ClientContextWithTimeout context;
@@ -151,11 +142,12 @@ hailo_status HailoRtRpcClient::InputVStream_release(uint32_t handle, uint32_t pi
     return HAILO_SUCCESS;
 }
 
-Expected<std::vector<uint32_t>> HailoRtRpcClient::OutputVStreams_create(uint32_t net_group_handle,
+Expected<std::vector<uint32_t>> HailoRtRpcClient::OutputVStreams_create(const NetworkGroupIdentifier &identifier,
         const std::map<std::string, hailo_vstream_params_t> &output_params, uint32_t pid)
 {
     VStream_create_Request request;
-    request.set_net_group(net_group_handle);
+    auto proto_identifier = request.mutable_identifier();
+    ConfiguredNetworkGroup_convert_identifier_to_proto(identifier, proto_identifier);
     request.set_pid(pid);
     auto proto_vstreams_params = request.mutable_vstreams_params();
     for (const auto &name_params_pair : output_params) {
@@ -194,11 +186,12 @@ Expected<std::vector<uint32_t>> HailoRtRpcClient::OutputVStreams_create(uint32_t
     return output_vstreams_handles;
 }
 
-hailo_status HailoRtRpcClient::OutputVStream_release(uint32_t handle, uint32_t pid)
+hailo_status HailoRtRpcClient::OutputVStream_release(const VStreamIdentifier &identifier, uint32_t pid)
 {
     Release_Request request;
-    request.set_handle(handle);
     request.set_pid(pid);
+    auto proto_identifier = request.mutable_vstream_identifier();
+    VStream_convert_identifier_to_proto(identifier, proto_identifier);
 
     Release_Reply reply;
     ClientContextWithTimeout context;
@@ -209,35 +202,12 @@ hailo_status HailoRtRpcClient::OutputVStream_release(uint32_t handle, uint32_t p
     return HAILO_SUCCESS;
 }
 
-Expected<uint32_t> HailoRtRpcClient::InputVStream_dup_handle(uint32_t pid, uint32_t handle)
-{
-    dup_handle_Request request;
-    request.set_pid(pid);
-    request.set_handle(handle);
-    dup_handle_Reply reply;
-    ClientContextWithTimeout context;
-    grpc::Status status = m_stub->InputVStream_dup_handle(&context, request, &reply);
-    CHECK_GRPC_STATUS_AS_EXPECTED(status);
-    return reply.handle();
-}
-
-Expected<uint32_t> HailoRtRpcClient::OutputVStream_dup_handle(uint32_t pid, uint32_t handle)
-{
-    dup_handle_Request request;
-    request.set_pid(pid);
-    request.set_handle(handle);
-    dup_handle_Reply reply;
-    ClientContextWithTimeout context;
-    grpc::Status status = m_stub->OutputVStream_dup_handle(&context, request, &reply);
-    CHECK_GRPC_STATUS_AS_EXPECTED(status);
-    return reply.handle();
-}
-
-Expected<std::vector<uint32_t>> HailoRtRpcClient::VDevice_configure(uint32_t vdevice_handle, const Hef &hef,
+Expected<std::vector<uint32_t>> HailoRtRpcClient::VDevice_configure(const VDeviceIdentifier &identifier, const Hef &hef,
     uint32_t pid, const NetworkGroupsParamsMap &configure_params)
 {
     VDevice_configure_Request request;
-    request.set_handle(vdevice_handle);
+    auto proto_identifier = request.mutable_identifier();
+    VDevice_convert_identifier_to_proto(identifier, proto_identifier);
     request.set_pid(pid);
     auto hef_memview = hef.pimpl->get_hef_memview();
     request.set_hef(hef_memview.data(), hef_memview.size());
@@ -287,10 +257,11 @@ Expected<std::vector<uint32_t>> HailoRtRpcClient::VDevice_configure(uint32_t vde
     return networks_handles;
 }
 
-Expected<std::vector<std::string>> HailoRtRpcClient::VDevice_get_physical_devices_ids(uint32_t handle)
+Expected<std::vector<std::string>> HailoRtRpcClient::VDevice_get_physical_devices_ids(const VDeviceIdentifier &identifier)
 {
     VDevice_get_physical_devices_ids_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    VDevice_convert_identifier_to_proto(identifier, proto_identifier);
 
     VDevice_get_physical_devices_ids_Reply reply;
     ClientContextWithTimeout context;
@@ -305,11 +276,11 @@ Expected<std::vector<std::string>> HailoRtRpcClient::VDevice_get_physical_device
     return result;
 }
 
-Expected<std::vector<std::unique_ptr<Device>>> HailoRtRpcClient::VDevice_get_physical_devices(uint32_t handle)
+Expected<std::vector<std::unique_ptr<Device>>> HailoRtRpcClient::VDevice_get_physical_devices(const VDeviceIdentifier &identifier)
 {
     std::vector<std::unique_ptr<Device>> devices;
 
-    auto device_ids = VDevice_get_physical_devices_ids(handle);
+    auto device_ids = VDevice_get_physical_devices_ids(identifier);
     CHECK_EXPECTED(device_ids);
     devices.reserve(device_ids->size());
 
@@ -318,14 +289,14 @@ Expected<std::vector<std::unique_ptr<Device>>> HailoRtRpcClient::VDevice_get_phy
         CHECK_EXPECTED(device);
         devices.push_back(std::move(device.release())) ;
     }
-
     return devices;
 }
 
-Expected<hailo_stream_interface_t> HailoRtRpcClient::VDevice_get_default_streams_interface(uint32_t handle)
+Expected<hailo_stream_interface_t> HailoRtRpcClient::VDevice_get_default_streams_interface(const VDeviceIdentifier &identifier)
 {
     VDevice_get_default_streams_interface_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    VDevice_convert_identifier_to_proto(identifier, proto_identifier);
 
     VDevice_get_default_streams_interface_Reply reply;
     ClientContextWithTimeout context;
@@ -338,22 +309,25 @@ Expected<hailo_stream_interface_t> HailoRtRpcClient::VDevice_get_default_streams
     return static_cast<hailo_stream_interface_t>(reply.stream_interface());
 }
 
-Expected<uint32_t> HailoRtRpcClient::ConfiguredNetworkGroup_dup_handle(uint32_t pid, uint32_t handle)
+Expected<uint32_t> HailoRtRpcClient::ConfiguredNetworkGroup_dup_handle(const NetworkGroupIdentifier &identifier, uint32_t pid)
 {
-    dup_handle_Request request;
+    ConfiguredNetworkGroup_dup_handle_Request request;
+    auto proto_identifier = request.mutable_identifier();
+    ConfiguredNetworkGroup_convert_identifier_to_proto(identifier, proto_identifier);
     request.set_pid(pid);
-    request.set_handle(handle);
-    dup_handle_Reply reply;
+
+    ConfiguredNetworkGroup_dup_handle_Reply reply;
     ClientContextWithTimeout context;
     grpc::Status status = m_stub->ConfiguredNetworkGroup_dup_handle(&context, request, &reply);
     CHECK_GRPC_STATUS_AS_EXPECTED(status);
     return reply.handle();
 }
 
-hailo_status HailoRtRpcClient::ConfiguredNetworkGroup_release(uint32_t handle, uint32_t pid)
+hailo_status HailoRtRpcClient::ConfiguredNetworkGroup_release(const NetworkGroupIdentifier &identifier, uint32_t pid)
 {
     Release_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_network_group_identifier();
+    ConfiguredNetworkGroup_convert_identifier_to_proto(identifier, proto_identifier);
     request.set_pid(pid);
 
     Release_Reply reply;
@@ -390,11 +364,12 @@ std::map<std::string, hailo_vstream_params_t> get_group(const ProtoNamedVStreamP
 }
 
 Expected<std::map<std::string, hailo_vstream_params_t>> HailoRtRpcClient::ConfiguredNetworkGroup_make_input_vstream_params(
-    uint32_t handle, bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
+    const NetworkGroupIdentifier &identifier, bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
     const std::string &network_name)
 {
     ConfiguredNetworkGroup_make_input_vstream_params_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    ConfiguredNetworkGroup_convert_identifier_to_proto(identifier, proto_identifier);
     request.set_quantized(quantized);
     request.set_format_type(format_type);
     request.set_timeout_ms(timeout_ms);
@@ -411,10 +386,11 @@ Expected<std::map<std::string, hailo_vstream_params_t>> HailoRtRpcClient::Config
 }
 
 Expected<std::vector<std::map<std::string, hailo_vstream_params_t>>> HailoRtRpcClient::ConfiguredNetworkGroup_make_output_vstream_params_groups(
-    uint32_t handle, bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size)
+    const NetworkGroupIdentifier &identifier, bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size)
 {
     ConfiguredNetworkGroup_make_output_vstream_params_groups_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    ConfiguredNetworkGroup_convert_identifier_to_proto(identifier, proto_identifier);
     request.set_quantized(quantized);
     request.set_format_type(format_type);
     request.set_timeout_ms(timeout_ms);
@@ -435,11 +411,12 @@ Expected<std::vector<std::map<std::string, hailo_vstream_params_t>>> HailoRtRpcC
 }
 
 Expected<std::map<std::string, hailo_vstream_params_t>> HailoRtRpcClient::ConfiguredNetworkGroup_make_output_vstream_params(
-    uint32_t handle, bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
+    const NetworkGroupIdentifier &identifier, bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
     const std::string &network_name)
 {
     ConfiguredNetworkGroup_make_output_vstream_params_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    ConfiguredNetworkGroup_convert_identifier_to_proto(identifier, proto_identifier);
     request.set_quantized(quantized);
     request.set_format_type(format_type);
     request.set_timeout_ms(timeout_ms);
@@ -474,15 +451,16 @@ Expected<std::map<std::string, hailo_vstream_params_t>> HailoRtRpcClient::Config
     return result;
 }
 
-Expected<std::string> HailoRtRpcClient::ConfiguredNetworkGroup_get_network_group_name(uint32_t handle)
+Expected<std::string> HailoRtRpcClient::ConfiguredNetworkGroup_get_network_group_name(const NetworkGroupIdentifier &identifier)
 {
-    return ConfiguredNetworkGroup_name(handle);
+    return ConfiguredNetworkGroup_name(identifier);
 }
 
-Expected<std::string> HailoRtRpcClient::ConfiguredNetworkGroup_name(uint32_t handle)
+Expected<std::string> HailoRtRpcClient::ConfiguredNetworkGroup_name(const NetworkGroupIdentifier &identifier)
 {
     ConfiguredNetworkGroup_name_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    ConfiguredNetworkGroup_convert_identifier_to_proto(identifier, proto_identifier);
 
     ConfiguredNetworkGroup_name_Reply reply;
     ClientContextWithTimeout context;
@@ -494,10 +472,11 @@ Expected<std::string> HailoRtRpcClient::ConfiguredNetworkGroup_name(uint32_t han
     return network_group_name;
 }
 
-Expected<std::vector<hailo_network_info_t>> HailoRtRpcClient::ConfiguredNetworkGroup_get_network_infos(uint32_t handle)
+Expected<std::vector<hailo_network_info_t>> HailoRtRpcClient::ConfiguredNetworkGroup_get_network_infos(const NetworkGroupIdentifier &identifier)
 {
     ConfiguredNetworkGroup_get_network_infos_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    ConfiguredNetworkGroup_convert_identifier_to_proto(identifier, proto_identifier);
 
     ConfiguredNetworkGroup_get_network_infos_Reply reply;
     ClientContextWithTimeout context;
@@ -516,11 +495,12 @@ Expected<std::vector<hailo_network_info_t>> HailoRtRpcClient::ConfiguredNetworkG
     return network_infos;
 }
 
-Expected<std::vector<hailo_stream_info_t>> HailoRtRpcClient::ConfiguredNetworkGroup_get_all_stream_infos(uint32_t handle,
+Expected<std::vector<hailo_stream_info_t>> HailoRtRpcClient::ConfiguredNetworkGroup_get_all_stream_infos(const NetworkGroupIdentifier &identifier,
     const std::string &network_name)
 {
     ConfiguredNetworkGroup_get_all_stream_infos_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    ConfiguredNetworkGroup_convert_identifier_to_proto(identifier, proto_identifier);
     request.set_network_name(network_name);
 
     ConfiguredNetworkGroup_get_all_stream_infos_Reply reply;
@@ -588,10 +568,11 @@ Expected<std::vector<hailo_stream_info_t>> HailoRtRpcClient::ConfiguredNetworkGr
     return result;
 }
 
-Expected<hailo_stream_interface_t> HailoRtRpcClient::ConfiguredNetworkGroup_get_default_stream_interface(uint32_t handle)
+Expected<hailo_stream_interface_t> HailoRtRpcClient::ConfiguredNetworkGroup_get_default_stream_interface(const NetworkGroupIdentifier &identifier)
 {
     ConfiguredNetworkGroup_get_default_stream_interface_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    ConfiguredNetworkGroup_convert_identifier_to_proto(identifier, proto_identifier);
 
     ConfiguredNetworkGroup_get_default_stream_interface_Reply reply;
     ClientContextWithTimeout context;
@@ -603,10 +584,11 @@ Expected<hailo_stream_interface_t> HailoRtRpcClient::ConfiguredNetworkGroup_get_
     return stream_interface;
 }
 
-Expected<std::vector<std::vector<std::string>>> HailoRtRpcClient::ConfiguredNetworkGroup_get_output_vstream_groups(uint32_t handle)
+Expected<std::vector<std::vector<std::string>>> HailoRtRpcClient::ConfiguredNetworkGroup_get_output_vstream_groups(const NetworkGroupIdentifier &identifier)
 {
     ConfiguredNetworkGroup_get_output_vstream_groups_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    ConfiguredNetworkGroup_convert_identifier_to_proto(identifier, proto_identifier);
 
     ConfiguredNetworkGroup_get_output_vstream_groups_Reply reply;
     ClientContextWithTimeout context;
@@ -643,7 +625,8 @@ hailo_vstream_info_t deserialize_vstream_info(const ProtoVStreamInfo &info_proto
     if (format.order == HAILO_FORMAT_ORDER_HAILO_NMS) {
         hailo_nms_shape_t nms_shape = {
             info_proto.nms_shape().number_of_classes(),
-            info_proto.nms_shape().max_bbox_per_class()
+            info_proto.nms_shape().max_bbox_per_class(),
+            info_proto.nms_shape().max_mask_size()
         };
         info.nms_shape = nms_shape;
     } else {
@@ -675,11 +658,12 @@ Expected<std::vector<hailo_vstream_info_t>> deserialize_vstream_infos(const Conf
     return result;
 } 
 
-Expected<std::vector<hailo_vstream_info_t>> HailoRtRpcClient::ConfiguredNetworkGroup_get_input_vstream_infos(uint32_t handle,
+Expected<std::vector<hailo_vstream_info_t>> HailoRtRpcClient::ConfiguredNetworkGroup_get_input_vstream_infos(const NetworkGroupIdentifier &identifier,
     std::string network_name)
 {
     ConfiguredNetworkGroup_get_vstream_infos_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    ConfiguredNetworkGroup_convert_identifier_to_proto(identifier, proto_identifier);
     request.set_network_name(network_name);
 
     ConfiguredNetworkGroup_get_vstream_infos_Reply reply;
@@ -691,11 +675,12 @@ Expected<std::vector<hailo_vstream_info_t>> HailoRtRpcClient::ConfiguredNetworkG
     return deserialize_vstream_infos(reply);
 }
 
-Expected<std::vector<hailo_vstream_info_t>> HailoRtRpcClient::ConfiguredNetworkGroup_get_output_vstream_infos(uint32_t handle,
+Expected<std::vector<hailo_vstream_info_t>> HailoRtRpcClient::ConfiguredNetworkGroup_get_output_vstream_infos(const NetworkGroupIdentifier &identifier,
     std::string network_name)
 {
     ConfiguredNetworkGroup_get_vstream_infos_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    ConfiguredNetworkGroup_convert_identifier_to_proto(identifier, proto_identifier);
     request.set_network_name(network_name);
 
     ConfiguredNetworkGroup_get_vstream_infos_Reply reply;
@@ -707,11 +692,12 @@ Expected<std::vector<hailo_vstream_info_t>> HailoRtRpcClient::ConfiguredNetworkG
     return deserialize_vstream_infos(reply);
 }
 
-Expected<std::vector<hailo_vstream_info_t>> HailoRtRpcClient::ConfiguredNetworkGroup_get_all_vstream_infos(uint32_t handle,
+Expected<std::vector<hailo_vstream_info_t>> HailoRtRpcClient::ConfiguredNetworkGroup_get_all_vstream_infos(const NetworkGroupIdentifier &identifier,
     std::string network_name)
 {
     ConfiguredNetworkGroup_get_vstream_infos_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    ConfiguredNetworkGroup_convert_identifier_to_proto(identifier, proto_identifier);
     request.set_network_name(network_name);
 
     ConfiguredNetworkGroup_get_vstream_infos_Reply reply;
@@ -723,11 +709,12 @@ Expected<std::vector<hailo_vstream_info_t>> HailoRtRpcClient::ConfiguredNetworkG
     return deserialize_vstream_infos(reply);
 }
 
-Expected<bool> HailoRtRpcClient::ConfiguredNetworkGroup_is_scheduled(uint32_t handle)
+Expected<bool> HailoRtRpcClient::ConfiguredNetworkGroup_is_scheduled(const NetworkGroupIdentifier &identifier)
 {
     ConfiguredNetworkGroup_is_scheduled_Request request;
     ConfiguredNetworkGroup_is_scheduled_Reply reply;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    ConfiguredNetworkGroup_convert_identifier_to_proto(identifier, proto_identifier);
     ClientContextWithTimeout context;
     grpc::Status status = m_stub->ConfiguredNetworkGroup_is_scheduled(&context, request, &reply);
     CHECK_GRPC_STATUS_AS_EXPECTED(status);
@@ -736,11 +723,12 @@ Expected<bool> HailoRtRpcClient::ConfiguredNetworkGroup_is_scheduled(uint32_t ha
     return reply.is_scheduled();
 }
 
-hailo_status HailoRtRpcClient::ConfiguredNetworkGroup_set_scheduler_timeout(uint32_t handle,
+hailo_status HailoRtRpcClient::ConfiguredNetworkGroup_set_scheduler_timeout(const NetworkGroupIdentifier &identifier,
     const std::chrono::milliseconds &timeout, const std::string &network_name)
 {
     ConfiguredNetworkGroup_set_scheduler_timeout_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    ConfiguredNetworkGroup_convert_identifier_to_proto(identifier, proto_identifier);
     request.set_timeout_ms(static_cast<uint32_t>(timeout.count()));
     request.set_network_name(network_name);
 
@@ -752,11 +740,12 @@ hailo_status HailoRtRpcClient::ConfiguredNetworkGroup_set_scheduler_timeout(uint
     return static_cast<hailo_status>(reply.status());
 }
 
-hailo_status HailoRtRpcClient::ConfiguredNetworkGroup_set_scheduler_threshold(uint32_t handle, uint32_t threshold,
+hailo_status HailoRtRpcClient::ConfiguredNetworkGroup_set_scheduler_threshold(const NetworkGroupIdentifier &identifier, uint32_t threshold,
     const std::string &network_name)
 {
     ConfiguredNetworkGroup_set_scheduler_threshold_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    ConfiguredNetworkGroup_convert_identifier_to_proto(identifier, proto_identifier);
     request.set_threshold(threshold);
     request.set_network_name(network_name);
 
@@ -768,11 +757,12 @@ hailo_status HailoRtRpcClient::ConfiguredNetworkGroup_set_scheduler_threshold(ui
     return static_cast<hailo_status>(reply.status());
 }
 
-hailo_status HailoRtRpcClient::ConfiguredNetworkGroup_set_scheduler_priority(uint32_t handle, uint8_t priority,
+hailo_status HailoRtRpcClient::ConfiguredNetworkGroup_set_scheduler_priority(const NetworkGroupIdentifier &identifier, uint8_t priority,
     const std::string &network_name)
 {
     ConfiguredNetworkGroup_set_scheduler_priority_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    ConfiguredNetworkGroup_convert_identifier_to_proto(identifier, proto_identifier);
     request.set_priority(priority);
     request.set_network_name(network_name);
 
@@ -784,12 +774,13 @@ hailo_status HailoRtRpcClient::ConfiguredNetworkGroup_set_scheduler_priority(uin
     return static_cast<hailo_status>(reply.status());
 }
 
-Expected<LatencyMeasurementResult> HailoRtRpcClient::ConfiguredNetworkGroup_get_latency_measurement(uint32_t handle,
+Expected<LatencyMeasurementResult> HailoRtRpcClient::ConfiguredNetworkGroup_get_latency_measurement(const NetworkGroupIdentifier &identifier,
     const std::string &network_name)
 {
     ConfiguredNetworkGroup_get_latency_measurement_Request request;
     ConfiguredNetworkGroup_get_latency_measurement_Reply reply;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    ConfiguredNetworkGroup_convert_identifier_to_proto(identifier, proto_identifier);
     request.set_network_name(network_name);
     ClientContextWithTimeout context;
     grpc::Status status = m_stub->ConfiguredNetworkGroup_get_latency_measurement(&context, request, &reply);
@@ -805,11 +796,12 @@ Expected<LatencyMeasurementResult> HailoRtRpcClient::ConfiguredNetworkGroup_get_
     return result;
 }
 
-Expected<bool> HailoRtRpcClient::ConfiguredNetworkGroup_is_multi_context(uint32_t handle)
+Expected<bool> HailoRtRpcClient::ConfiguredNetworkGroup_is_multi_context(const NetworkGroupIdentifier &identifier)
 {
     ConfiguredNetworkGroup_is_multi_context_Request request;
     ConfiguredNetworkGroup_is_multi_context_Reply reply;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    ConfiguredNetworkGroup_convert_identifier_to_proto(identifier, proto_identifier);
     ClientContextWithTimeout context;
     grpc::Status status = m_stub->ConfiguredNetworkGroup_is_multi_context(&context, request, &reply);
     CHECK_GRPC_STATUS_AS_EXPECTED(status);
@@ -818,11 +810,12 @@ Expected<bool> HailoRtRpcClient::ConfiguredNetworkGroup_is_multi_context(uint32_
     return reply.is_multi_context();
 }
 
-Expected<ConfigureNetworkParams> HailoRtRpcClient::ConfiguredNetworkGroup_get_config_params(uint32_t handle)
+Expected<ConfigureNetworkParams> HailoRtRpcClient::ConfiguredNetworkGroup_get_config_params(const NetworkGroupIdentifier &identifier)
 {
     ConfiguredNetworkGroup_get_config_params_Request request;
     ConfiguredNetworkGroup_get_config_params_Reply reply;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    ConfiguredNetworkGroup_convert_identifier_to_proto(identifier, proto_identifier);
     ClientContextWithTimeout context;
     grpc::Status status = m_stub->ConfiguredNetworkGroup_get_config_params(&context, request, &reply);
     CHECK_GRPC_STATUS_AS_EXPECTED(status);
@@ -858,11 +851,12 @@ Expected<ConfigureNetworkParams> HailoRtRpcClient::ConfiguredNetworkGroup_get_co
     return network_configure_params;
 }
 
-Expected<std::vector<std::string>> HailoRtRpcClient::ConfiguredNetworkGroup_get_sorted_output_names(uint32_t handle)
+Expected<std::vector<std::string>> HailoRtRpcClient::ConfiguredNetworkGroup_get_sorted_output_names(const NetworkGroupIdentifier &identifier)
 {
     ConfiguredNetworkGroup_get_sorted_output_names_Request request;
     ConfiguredNetworkGroup_get_sorted_output_names_Reply reply;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    ConfiguredNetworkGroup_convert_identifier_to_proto(identifier, proto_identifier);
     ClientContextWithTimeout context;
     grpc::Status status = m_stub->ConfiguredNetworkGroup_get_sorted_output_names(&context, request, &reply);
     CHECK_GRPC_STATUS_AS_EXPECTED(status);
@@ -875,11 +869,13 @@ Expected<std::vector<std::string>> HailoRtRpcClient::ConfiguredNetworkGroup_get_
     return result;
 }
 
-Expected<std::vector<std::string>> HailoRtRpcClient::ConfiguredNetworkGroup_get_stream_names_from_vstream_name(uint32_t handle, const std::string &vstream_name)
+Expected<std::vector<std::string>> HailoRtRpcClient::ConfiguredNetworkGroup_get_stream_names_from_vstream_name(const NetworkGroupIdentifier &identifier,
+    const std::string &vstream_name)
 {
     ConfiguredNetworkGroup_get_stream_names_from_vstream_name_Request request;
     ConfiguredNetworkGroup_get_stream_names_from_vstream_name_Reply reply;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    ConfiguredNetworkGroup_convert_identifier_to_proto(identifier, proto_identifier);
     request.set_vstream_name(vstream_name);
     ClientContextWithTimeout context;
     grpc::Status status = m_stub->ConfiguredNetworkGroup_get_stream_names_from_vstream_name(&context, request, &reply);
@@ -893,11 +889,12 @@ Expected<std::vector<std::string>> HailoRtRpcClient::ConfiguredNetworkGroup_get_
     return result;
 }
 
-Expected<std::vector<std::string>> HailoRtRpcClient::ConfiguredNetworkGroup_get_vstream_names_from_stream_name(uint32_t handle, const std::string &stream_name)
+Expected<std::vector<std::string>> HailoRtRpcClient::ConfiguredNetworkGroup_get_vstream_names_from_stream_name(const NetworkGroupIdentifier &identifier, const std::string &stream_name)
 {
     ConfiguredNetworkGroup_get_vstream_names_from_stream_name_Request request;
     ConfiguredNetworkGroup_get_vstream_names_from_stream_name_Reply reply;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    ConfiguredNetworkGroup_convert_identifier_to_proto(identifier, proto_identifier);
     request.set_stream_name(stream_name);
     ClientContextWithTimeout context;
     grpc::Status status = m_stub->ConfiguredNetworkGroup_get_vstream_names_from_stream_name(&context, request, &reply);
@@ -911,11 +908,52 @@ Expected<std::vector<std::string>> HailoRtRpcClient::ConfiguredNetworkGroup_get_
     return result;
 }
 
-hailo_status HailoRtRpcClient::InputVStream_write(uint32_t handle, const MemoryView &buffer)
+Expected<bool> HailoRtRpcClient::InputVStream_is_multi_planar(const VStreamIdentifier &identifier)
+{
+    InputVStream_is_multi_planar_Request request;
+    auto proto_identifier = request.mutable_identifier();
+    VStream_convert_identifier_to_proto(identifier, proto_identifier);
+
+    ClientContextWithTimeout context;
+    InputVStream_is_multi_planar_Reply reply;
+    grpc::Status status = m_stub->InputVStream_is_multi_planar(&context, request, &reply);
+    CHECK_GRPC_STATUS_AS_EXPECTED(status);
+    assert(reply.status() < HAILO_STATUS_COUNT);
+    CHECK_SUCCESS_AS_EXPECTED(static_cast<hailo_status>(reply.status()));
+    auto is_multi_planar = reply.is_multi_planar();
+    return is_multi_planar;
+}
+
+hailo_status HailoRtRpcClient::InputVStream_write(const VStreamIdentifier &identifier, const hailo_pix_buffer_t &buffer)
+{
+    InputVStream_write_pix_Request request;
+    auto proto_identifier = request.mutable_identifier();
+    VStream_convert_identifier_to_proto(identifier, proto_identifier);
+    request.set_index(buffer.index);
+    request.set_number_of_planes(buffer.number_of_planes);
+    for (uint32_t i = 0; i < buffer.number_of_planes; i++) {
+        request.add_planes_data(buffer.planes[i].user_ptr, buffer.planes[i].bytes_used);
+    }
+
+    ClientContextWithTimeout context;
+    InputVStream_write_pix_Reply reply;
+    grpc::Status status = m_stub->InputVStream_write_pix(&context, request, &reply);
+    CHECK_GRPC_STATUS(status);
+    assert(reply.status() < HAILO_STATUS_COUNT);
+    if (reply.status() == HAILO_STREAM_ABORTED_BY_USER) {
+        return static_cast<hailo_status>(reply.status());
+    }
+    CHECK_SUCCESS(static_cast<hailo_status>(reply.status()));
+    return HAILO_SUCCESS;
+}
+
+hailo_status HailoRtRpcClient::InputVStream_write(const VStreamIdentifier &identifier, const MemoryView &buffer)
 {
     InputVStream_write_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    VStream_convert_identifier_to_proto(identifier, proto_identifier);
     request.set_data(buffer.data(), buffer.size());
+
     ClientContextWithTimeout context;
     InputVStream_write_Reply reply;
     grpc::Status status = m_stub->InputVStream_write(&context, request, &reply);
@@ -928,11 +966,13 @@ hailo_status HailoRtRpcClient::InputVStream_write(uint32_t handle, const MemoryV
     return HAILO_SUCCESS;
 }
 
-hailo_status HailoRtRpcClient::OutputVStream_read(uint32_t handle, MemoryView buffer)
+hailo_status HailoRtRpcClient::OutputVStream_read(const VStreamIdentifier &identifier, MemoryView buffer)
 {
     OutputVStream_read_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    VStream_convert_identifier_to_proto(identifier, proto_identifier);
     request.set_size(static_cast<uint32_t>(buffer.size()));
+
     ClientContextWithTimeout context;
     OutputVStream_read_Reply reply;
     grpc::Status status = m_stub->OutputVStream_read(&context, request, &reply);
@@ -946,10 +986,12 @@ hailo_status HailoRtRpcClient::OutputVStream_read(uint32_t handle, MemoryView bu
     return HAILO_SUCCESS;
 }
 
-Expected<size_t> HailoRtRpcClient::InputVStream_get_frame_size(uint32_t handle)
+Expected<size_t> HailoRtRpcClient::InputVStream_get_frame_size(const VStreamIdentifier &identifier)
 {
     VStream_get_frame_size_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    VStream_convert_identifier_to_proto(identifier, proto_identifier);
+
     ClientContextWithTimeout context;
     VStream_get_frame_size_Reply reply;
     grpc::Status status = m_stub->InputVStream_get_frame_size(&context, request, &reply);
@@ -959,10 +1001,12 @@ Expected<size_t> HailoRtRpcClient::InputVStream_get_frame_size(uint32_t handle)
     return reply.frame_size();
 }
 
-Expected<size_t> HailoRtRpcClient::OutputVStream_get_frame_size(uint32_t handle)
+Expected<size_t> HailoRtRpcClient::OutputVStream_get_frame_size(const VStreamIdentifier &identifier)
 {
     VStream_get_frame_size_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    VStream_convert_identifier_to_proto(identifier, proto_identifier);
+
     ClientContextWithTimeout context;
     VStream_get_frame_size_Reply reply;
     grpc::Status status = m_stub->OutputVStream_get_frame_size(&context, request, &reply);
@@ -972,10 +1016,12 @@ Expected<size_t> HailoRtRpcClient::OutputVStream_get_frame_size(uint32_t handle)
     return reply.frame_size();
 }
 
-hailo_status HailoRtRpcClient::InputVStream_flush(uint32_t handle)
+hailo_status HailoRtRpcClient::InputVStream_flush(const VStreamIdentifier &identifier)
 {
     InputVStream_flush_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    VStream_convert_identifier_to_proto(identifier, proto_identifier);
+
     ClientContextWithTimeout context;
     InputVStream_flush_Reply reply;
     grpc::Status status = m_stub->InputVStream_flush(&context, request, &reply);
@@ -984,10 +1030,12 @@ hailo_status HailoRtRpcClient::InputVStream_flush(uint32_t handle)
     return static_cast<hailo_status>(reply.status());
 }
 
-Expected<std::string> HailoRtRpcClient::InputVStream_name(uint32_t handle)
+Expected<std::string> HailoRtRpcClient::InputVStream_name(const VStreamIdentifier &identifier)
 {
     VStream_name_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    VStream_convert_identifier_to_proto(identifier, proto_identifier);
+
     ClientContextWithTimeout context;
     VStream_name_Reply reply;
     grpc::Status status = m_stub->InputVStream_name(&context, request, &reply);
@@ -998,10 +1046,12 @@ Expected<std::string> HailoRtRpcClient::InputVStream_name(uint32_t handle)
     return name;
 }
 
-Expected<std::string> HailoRtRpcClient::OutputVStream_name(uint32_t handle)
+Expected<std::string> HailoRtRpcClient::OutputVStream_name(const VStreamIdentifier &identifier)
 {
     VStream_name_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    VStream_convert_identifier_to_proto(identifier, proto_identifier);
+
     ClientContextWithTimeout context;
     VStream_name_Reply reply;
     grpc::Status status = m_stub->OutputVStream_name(&context, request, &reply);
@@ -1012,10 +1062,12 @@ Expected<std::string> HailoRtRpcClient::OutputVStream_name(uint32_t handle)
     return name;
 }
 
-Expected<std::string> HailoRtRpcClient::InputVStream_network_name(uint32_t handle)
+Expected<std::string> HailoRtRpcClient::InputVStream_network_name(const VStreamIdentifier &identifier)
 {
     VStream_network_name_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    VStream_convert_identifier_to_proto(identifier, proto_identifier);
+
     ClientContextWithTimeout context;
     VStream_network_name_Reply reply;
     grpc::Status status = m_stub->InputVStream_network_name(&context, request, &reply);
@@ -1026,10 +1078,12 @@ Expected<std::string> HailoRtRpcClient::InputVStream_network_name(uint32_t handl
     return name;
 }
 
-Expected<std::string> HailoRtRpcClient::OutputVStream_network_name(uint32_t handle)
+Expected<std::string> HailoRtRpcClient::OutputVStream_network_name(const VStreamIdentifier &identifier)
 {
     VStream_network_name_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    VStream_convert_identifier_to_proto(identifier, proto_identifier);
+
     ClientContextWithTimeout context;
     VStream_network_name_Reply reply;
     grpc::Status status = m_stub->OutputVStream_network_name(&context, request, &reply);
@@ -1040,10 +1094,12 @@ Expected<std::string> HailoRtRpcClient::OutputVStream_network_name(uint32_t hand
     return name;
 }
 
-hailo_status HailoRtRpcClient::InputVStream_abort(uint32_t handle)
+hailo_status HailoRtRpcClient::InputVStream_abort(const VStreamIdentifier &identifier)
 {
     VStream_abort_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    VStream_convert_identifier_to_proto(identifier, proto_identifier);
+
     ClientContextWithTimeout context;
     VStream_abort_Reply reply;
     grpc::Status status = m_stub->InputVStream_abort(&context, request, &reply);
@@ -1052,10 +1108,12 @@ hailo_status HailoRtRpcClient::InputVStream_abort(uint32_t handle)
     return static_cast<hailo_status>(reply.status());
 }
 
-hailo_status HailoRtRpcClient::OutputVStream_abort(uint32_t handle)
+hailo_status HailoRtRpcClient::OutputVStream_abort(const VStreamIdentifier &identifier)
 {
     VStream_abort_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    VStream_convert_identifier_to_proto(identifier, proto_identifier);
+
     ClientContextWithTimeout context;
     VStream_abort_Reply reply;
     grpc::Status status = m_stub->OutputVStream_abort(&context, request, &reply);
@@ -1064,10 +1122,12 @@ hailo_status HailoRtRpcClient::OutputVStream_abort(uint32_t handle)
     return static_cast<hailo_status>(reply.status());
 }
 
-hailo_status HailoRtRpcClient::InputVStream_resume(uint32_t handle)
+hailo_status HailoRtRpcClient::InputVStream_resume(const VStreamIdentifier &identifier)
 {
     VStream_resume_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    VStream_convert_identifier_to_proto(identifier, proto_identifier);
+
     ClientContextWithTimeout context;
     VStream_resume_Reply reply;
     grpc::Status status = m_stub->InputVStream_resume(&context, request, &reply);
@@ -1076,10 +1136,12 @@ hailo_status HailoRtRpcClient::InputVStream_resume(uint32_t handle)
     return static_cast<hailo_status>(reply.status());
 }
 
-hailo_status HailoRtRpcClient::OutputVStream_resume(uint32_t handle)
+hailo_status HailoRtRpcClient::OutputVStream_resume(const VStreamIdentifier &identifier)
 {
     VStream_resume_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    VStream_convert_identifier_to_proto(identifier, proto_identifier);
+
     ClientContextWithTimeout context;
     VStream_resume_Reply reply;
     grpc::Status status = m_stub->OutputVStream_resume(&context, request, &reply);
@@ -1088,10 +1150,12 @@ hailo_status HailoRtRpcClient::OutputVStream_resume(uint32_t handle)
     return static_cast<hailo_status>(reply.status());
 }
 
-hailo_status HailoRtRpcClient::InputVStream_stop_and_clear(uint32_t handle)
+hailo_status HailoRtRpcClient::InputVStream_stop_and_clear(const VStreamIdentifier &identifier)
 {
     VStream_stop_and_clear_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    VStream_convert_identifier_to_proto(identifier, proto_identifier);
+
     ClientContextWithTimeout context;
     VStream_stop_and_clear_Reply reply;
     grpc::Status status = m_stub->InputVStream_stop_and_clear(&context, request, &reply);
@@ -1100,10 +1164,12 @@ hailo_status HailoRtRpcClient::InputVStream_stop_and_clear(uint32_t handle)
     return static_cast<hailo_status>(reply.status());
 }
 
-hailo_status HailoRtRpcClient::OutputVStream_stop_and_clear(uint32_t handle)
+hailo_status HailoRtRpcClient::OutputVStream_stop_and_clear(const VStreamIdentifier &identifier)
 {
     VStream_stop_and_clear_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    VStream_convert_identifier_to_proto(identifier, proto_identifier);
+
     ClientContextWithTimeout context;
     VStream_stop_and_clear_Reply reply;
     grpc::Status status = m_stub->OutputVStream_stop_and_clear(&context, request, &reply);
@@ -1112,10 +1178,12 @@ hailo_status HailoRtRpcClient::OutputVStream_stop_and_clear(uint32_t handle)
     return static_cast<hailo_status>(reply.status());
 }
 
-hailo_status HailoRtRpcClient::InputVStream_start_vstream(uint32_t handle)
+hailo_status HailoRtRpcClient::InputVStream_start_vstream(const VStreamIdentifier &identifier)
 {
     VStream_start_vstream_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    VStream_convert_identifier_to_proto(identifier, proto_identifier);
+
     ClientContextWithTimeout context;
     VStream_start_vstream_Reply reply;
     grpc::Status status = m_stub->InputVStream_start_vstream(&context, request, &reply);
@@ -1124,10 +1192,12 @@ hailo_status HailoRtRpcClient::InputVStream_start_vstream(uint32_t handle)
     return static_cast<hailo_status>(reply.status());
 }
 
-hailo_status HailoRtRpcClient::OutputVStream_start_vstream(uint32_t handle)
+hailo_status HailoRtRpcClient::OutputVStream_start_vstream(const VStreamIdentifier &identifier)
 {
     VStream_start_vstream_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    VStream_convert_identifier_to_proto(identifier, proto_identifier);
+
     ClientContextWithTimeout context;
     VStream_start_vstream_Reply reply;
     grpc::Status status = m_stub->OutputVStream_start_vstream(&context, request, &reply);
@@ -1136,10 +1206,12 @@ hailo_status HailoRtRpcClient::OutputVStream_start_vstream(uint32_t handle)
     return static_cast<hailo_status>(reply.status());
 }
 
-Expected<hailo_format_t> HailoRtRpcClient::InputVStream_get_user_buffer_format(uint32_t handle)
+Expected<hailo_format_t> HailoRtRpcClient::InputVStream_get_user_buffer_format(const VStreamIdentifier &identifier)
 {
     VStream_get_user_buffer_format_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    VStream_convert_identifier_to_proto(identifier, proto_identifier);
+
     ClientContextWithTimeout context;
     VStream_get_user_buffer_format_Reply reply;
     grpc::Status status = m_stub->InputVStream_get_user_buffer_format(&context, request, &reply);
@@ -1157,10 +1229,12 @@ Expected<hailo_format_t> HailoRtRpcClient::InputVStream_get_user_buffer_format(u
     return format;
 }
 
-Expected<hailo_format_t> HailoRtRpcClient::OutputVStream_get_user_buffer_format(uint32_t handle)
+Expected<hailo_format_t> HailoRtRpcClient::OutputVStream_get_user_buffer_format(const VStreamIdentifier &identifier)
 {
     VStream_get_user_buffer_format_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    VStream_convert_identifier_to_proto(identifier, proto_identifier);
+
     ClientContextWithTimeout context;
     VStream_get_user_buffer_format_Reply reply;
     grpc::Status status = m_stub->OutputVStream_get_user_buffer_format(&context, request, &reply);
@@ -1178,10 +1252,12 @@ Expected<hailo_format_t> HailoRtRpcClient::OutputVStream_get_user_buffer_format(
     return format;
 }
 
-Expected<hailo_vstream_info_t> HailoRtRpcClient::InputVStream_get_info(uint32_t handle)
+Expected<hailo_vstream_info_t> HailoRtRpcClient::InputVStream_get_info(const VStreamIdentifier &identifier)
 {
     VStream_get_info_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    VStream_convert_identifier_to_proto(identifier, proto_identifier);
+
     ClientContextWithTimeout context;
     VStream_get_info_Reply reply;
     grpc::Status status = m_stub->InputVStream_get_info(&context, request, &reply);
@@ -1191,10 +1267,12 @@ Expected<hailo_vstream_info_t> HailoRtRpcClient::InputVStream_get_info(uint32_t
     auto info_proto = reply.vstream_info();
     return deserialize_vstream_info(info_proto);
 }
-Expected<hailo_vstream_info_t> HailoRtRpcClient::OutputVStream_get_info(uint32_t handle)
+Expected<hailo_vstream_info_t> HailoRtRpcClient::OutputVStream_get_info(const VStreamIdentifier &identifier)
 {
     VStream_get_info_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    VStream_convert_identifier_to_proto(identifier, proto_identifier);
+
     ClientContextWithTimeout context;
     VStream_get_info_Reply reply;
     grpc::Status status = m_stub->OutputVStream_get_info(&context, request, &reply);
@@ -1205,10 +1283,12 @@ Expected<hailo_vstream_info_t> HailoRtRpcClient::OutputVStream_get_info(uint32_t
     return deserialize_vstream_info(info_proto);
 }
 
-Expected<bool> HailoRtRpcClient::InputVStream_is_aborted(uint32_t handle)
+Expected<bool> HailoRtRpcClient::InputVStream_is_aborted(const VStreamIdentifier &identifier)
 {
     VStream_is_aborted_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    VStream_convert_identifier_to_proto(identifier, proto_identifier);
+
     ClientContextWithTimeout context;
     VStream_is_aborted_Reply reply;
     grpc::Status status = m_stub->InputVStream_is_aborted(&context, request, &reply);
@@ -1219,10 +1299,12 @@ Expected<bool> HailoRtRpcClient::InputVStream_is_aborted(uint32_t handle)
     return is_aborted;
 }
 
-Expected<bool> HailoRtRpcClient::OutputVStream_is_aborted(uint32_t handle)
+Expected<bool> HailoRtRpcClient::OutputVStream_is_aborted(const VStreamIdentifier &identifier)
 {
     VStream_is_aborted_Request request;
-    request.set_handle(handle);
+    auto proto_identifier = request.mutable_identifier();
+    VStream_convert_identifier_to_proto(identifier, proto_identifier);
+
     ClientContextWithTimeout context;
     VStream_is_aborted_Reply reply;
     grpc::Status status = m_stub->OutputVStream_is_aborted(&context, request, &reply);
@@ -1233,4 +1315,66 @@ Expected<bool> HailoRtRpcClient::OutputVStream_is_aborted(uint32_t handle)
     return is_aborted;
 }
 
+hailo_status HailoRtRpcClient::OutputVStream_set_nms_score_threshold(const VStreamIdentifier &identifier, float32_t threshold)
+{
+    VStream_set_nms_score_threshold_Request request;
+    auto proto_identifier = request.mutable_identifier();
+    VStream_convert_identifier_to_proto(identifier, proto_identifier);
+    request.set_threshold(threshold);
+
+    ClientContextWithTimeout context;
+    VStream_set_nms_score_threshold_Reply reply;
+    grpc::Status status = m_stub->OutputVStream_set_nms_score_threshold(&context, request, &reply);
+    CHECK_GRPC_STATUS(status);
+    assert(reply.status() < HAILO_STATUS_COUNT);
+    return static_cast<hailo_status>(reply.status());
+}
+hailo_status HailoRtRpcClient::OutputVStream_set_nms_iou_threshold(const VStreamIdentifier &identifier, float32_t threshold)
+{
+    VStream_set_nms_iou_threshold_Request request;
+    auto proto_identifier = request.mutable_identifier();
+    VStream_convert_identifier_to_proto(identifier, proto_identifier);
+    request.set_threshold(threshold);
+
+    ClientContextWithTimeout context;
+    VStream_set_nms_iou_threshold_Reply reply;
+    grpc::Status status = m_stub->OutputVStream_set_nms_iou_threshold(&context, request, &reply);
+    CHECK_GRPC_STATUS(status);
+    assert(reply.status() < HAILO_STATUS_COUNT);
+    return static_cast<hailo_status>(reply.status());
+}
+
+hailo_status HailoRtRpcClient::OutputVStream_set_nms_max_proposals_per_class(const VStreamIdentifier &identifier, uint32_t max_proposals_per_class)
+{
+    VStream_set_nms_max_proposals_per_class_Request request;
+    auto proto_identifier = request.mutable_identifier();
+    VStream_convert_identifier_to_proto(identifier, proto_identifier);
+    request.set_max_proposals_per_class(max_proposals_per_class);
+
+    ClientContextWithTimeout context;
+    VStream_set_nms_max_proposals_per_class_Reply reply;
+    grpc::Status status = m_stub->OutputVStream_set_nms_max_proposals_per_class(&context, request, &reply);
+    CHECK_GRPC_STATUS(status);
+    assert(reply.status() < HAILO_STATUS_COUNT);
+    return static_cast<hailo_status>(reply.status());
+}
+
+void HailoRtRpcClient::VDevice_convert_identifier_to_proto(const VDeviceIdentifier &identifier, ProtoVDeviceIdentifier *proto_identifier)
+{
+    proto_identifier->set_vdevice_handle(identifier.m_vdevice_handle);
+}
+
+void HailoRtRpcClient::ConfiguredNetworkGroup_convert_identifier_to_proto(const NetworkGroupIdentifier &identifier, ProtoConfiguredNetworkGroupIdentifier *proto_identifier)
+{
+    proto_identifier->set_network_group_handle(identifier.m_network_group_handle);
+    proto_identifier->set_vdevice_handle(identifier.m_vdevice_identifier.m_vdevice_handle);
+}
+
+void HailoRtRpcClient::VStream_convert_identifier_to_proto(const VStreamIdentifier &identifier, ProtoVStreamIdentifier *proto_identifier)
+{
+    proto_identifier->set_vdevice_handle(identifier.m_network_group_identifier.m_vdevice_identifier.m_vdevice_handle);
+    proto_identifier->set_network_group_handle(identifier.m_network_group_identifier.m_network_group_handle);
+    proto_identifier->set_vstream_handle(identifier.m_vstream_handle);
+}
+
 }
\ No newline at end of file
index 231daa29d540f69c9d7aad45803099de0480352b..4b3d70b4dedbe950ee0a828803c69b9153aecf19 100644 (file)
@@ -13,6 +13,7 @@
 #include "hailo/hailort.h"
 #include "hailo/expected.hpp"
 #include "hailo/device.hpp"
+#include "rpc/rpc_definitions.hpp"
 
 #if defined(_MSC_VER)
 #pragma warning(push)
@@ -30,7 +31,6 @@
 #endif
 #include <memory>
 
-
 namespace hailort
 {
 
@@ -54,84 +54,95 @@ public:
     Expected<hailo_version_t> get_service_version();
 
     Expected<uint32_t> VDevice_create(const hailo_vdevice_params_t &params, uint32_t pid);
-    Expected<uint32_t> VDevice_dup_handle(uint32_t pid, uint32_t handle);
-    hailo_status VDevice_release(uint32_t handle, uint32_t pid);
-    Expected<std::vector<std::string>> VDevice_get_physical_devices_ids(uint32_t handle);
-    Expected<std::vector<std::unique_ptr<Device>>> VDevice_get_physical_devices(uint32_t handle);
-    Expected<hailo_stream_interface_t> VDevice_get_default_streams_interface(uint32_t handle);
-    Expected<std::vector<uint32_t>> VDevice_configure(uint32_t vdevice_handle, const Hef &hef, uint32_t pid, const NetworkGroupsParamsMap &configure_params={});
-
-    Expected<uint32_t> ConfiguredNetworkGroup_dup_handle(uint32_t pid, uint32_t handle);
-    hailo_status ConfiguredNetworkGroup_release(uint32_t handle, uint32_t pid);
-    Expected<std::map<std::string, hailo_vstream_params_t>> ConfiguredNetworkGroup_make_input_vstream_params(uint32_t handle,
+    hailo_status VDevice_release(const VDeviceIdentifier &identifier, uint32_t pid);
+    Expected<std::vector<std::string>> VDevice_get_physical_devices_ids(const VDeviceIdentifier &identifier);
+    Expected<std::vector<std::unique_ptr<Device>>> VDevice_get_physical_devices(const VDeviceIdentifier &identifier);
+    Expected<hailo_stream_interface_t> VDevice_get_default_streams_interface(const VDeviceIdentifier &identifier);
+    Expected<std::vector<uint32_t>> VDevice_configure(const VDeviceIdentifier &identifier, const Hef &hef, uint32_t pid, const NetworkGroupsParamsMap &configure_params={});
+
+    Expected<uint32_t> ConfiguredNetworkGroup_dup_handle(const NetworkGroupIdentifier &identifier, uint32_t pid);
+    hailo_status ConfiguredNetworkGroup_release(const NetworkGroupIdentifier &identifier, uint32_t pid);
+    Expected<std::map<std::string, hailo_vstream_params_t>> ConfiguredNetworkGroup_make_input_vstream_params(const NetworkGroupIdentifier &identifier,
         bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
         const std::string &network_name);
-    Expected<std::map<std::string, hailo_vstream_params_t>> ConfiguredNetworkGroup_make_output_vstream_params(uint32_t handle,
+    Expected<std::map<std::string, hailo_vstream_params_t>> ConfiguredNetworkGroup_make_output_vstream_params(const NetworkGroupIdentifier &identifier,
         bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
         const std::string &network_name);
-    Expected<std::string> ConfiguredNetworkGroup_get_network_group_name(uint32_t handle);
-    Expected<std::string> ConfiguredNetworkGroup_name(uint32_t handle);
-    Expected<std::vector<hailo_network_info_t>> ConfiguredNetworkGroup_get_network_infos(uint32_t handle);
-    Expected<std::vector<hailo_stream_info_t>> ConfiguredNetworkGroup_get_all_stream_infos(uint32_t handle, const std::string &network_name);
-    Expected<hailo_stream_interface_t> ConfiguredNetworkGroup_get_default_stream_interface(uint32_t handle);
-    Expected<std::vector<std::map<std::string, hailo_vstream_params_t>>> ConfiguredNetworkGroup_make_output_vstream_params_groups(uint32_t handle,
+    Expected<std::string> ConfiguredNetworkGroup_get_network_group_name(const NetworkGroupIdentifier &identifier);
+    Expected<std::string> ConfiguredNetworkGroup_name(const NetworkGroupIdentifier &identifier);
+    Expected<std::vector<hailo_network_info_t>> ConfiguredNetworkGroup_get_network_infos(const NetworkGroupIdentifier &identifier);
+    Expected<std::vector<hailo_stream_info_t>> ConfiguredNetworkGroup_get_all_stream_infos(const NetworkGroupIdentifier &identifier, const std::string &network_name);
+    Expected<hailo_stream_interface_t> ConfiguredNetworkGroup_get_default_stream_interface(const NetworkGroupIdentifier &identifier);
+    Expected<std::vector<std::map<std::string, hailo_vstream_params_t>>> ConfiguredNetworkGroup_make_output_vstream_params_groups(const NetworkGroupIdentifier &identifier,
         bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size);
-    Expected<std::vector<std::vector<std::string>>> ConfiguredNetworkGroup_get_output_vstream_groups(uint32_t handle);
-    Expected<std::vector<hailo_vstream_info_t>> ConfiguredNetworkGroup_get_input_vstream_infos(uint32_t handle, std::string network_name);
-    Expected<std::vector<hailo_vstream_info_t>> ConfiguredNetworkGroup_get_output_vstream_infos(uint32_t handle, std::string network_name);
-    Expected<std::vector<hailo_vstream_info_t>> ConfiguredNetworkGroup_get_all_vstream_infos(uint32_t handle, std::string network_name);
-    Expected<bool> ConfiguredNetworkGroup_is_scheduled(uint32_t handle);
-    hailo_status ConfiguredNetworkGroup_set_scheduler_timeout(uint32_t handle, const std::chrono::milliseconds &timeout,
+    Expected<std::vector<std::vector<std::string>>> ConfiguredNetworkGroup_get_output_vstream_groups(const NetworkGroupIdentifier &identifier);
+    Expected<std::vector<hailo_vstream_info_t>> ConfiguredNetworkGroup_get_input_vstream_infos(const NetworkGroupIdentifier &identifier, std::string network_name);
+    Expected<std::vector<hailo_vstream_info_t>> ConfiguredNetworkGroup_get_output_vstream_infos(const NetworkGroupIdentifier &identifier, std::string network_name);
+    Expected<std::vector<hailo_vstream_info_t>> ConfiguredNetworkGroup_get_all_vstream_infos(const NetworkGroupIdentifier &identifier, std::string network_name);
+    Expected<bool> ConfiguredNetworkGroup_is_scheduled(const NetworkGroupIdentifier &identifier);
+    hailo_status ConfiguredNetworkGroup_set_scheduler_timeout(const NetworkGroupIdentifier &identifier, const std::chrono::milliseconds &timeout,
         const std::string &network_name);
-    hailo_status ConfiguredNetworkGroup_set_scheduler_threshold(uint32_t handle, uint32_t threshold, const std::string &network_name);
-    hailo_status ConfiguredNetworkGroup_set_scheduler_priority(uint32_t handle, uint8_t priority, const std::string &network_name);
-    Expected<LatencyMeasurementResult> ConfiguredNetworkGroup_get_latency_measurement(uint32_t handle, const std::string &network_name);
-    Expected<bool> ConfiguredNetworkGroup_is_multi_context(uint32_t handle);
-    Expected<ConfigureNetworkParams> ConfiguredNetworkGroup_get_config_params(uint32_t handle);
-    Expected<std::vector<std::string>> ConfiguredNetworkGroup_get_sorted_output_names(uint32_t handle);
-    Expected<std::vector<std::string>> ConfiguredNetworkGroup_get_stream_names_from_vstream_name(uint32_t handle, const std::string &vstream_name);
-    Expected<std::vector<std::string>> ConfiguredNetworkGroup_get_vstream_names_from_stream_name(uint32_t handle, const std::string &stream_name);
-
-    Expected<std::vector<uint32_t>> InputVStreams_create(uint32_t net_group_handle,
+    hailo_status ConfiguredNetworkGroup_set_scheduler_threshold(const NetworkGroupIdentifier &identifier, uint32_t threshold, const std::string &network_name);
+    hailo_status ConfiguredNetworkGroup_set_scheduler_priority(const NetworkGroupIdentifier &identifier, uint8_t priority, const std::string &network_name);
+    Expected<LatencyMeasurementResult> ConfiguredNetworkGroup_get_latency_measurement(const NetworkGroupIdentifier &identifier, const std::string &network_name);
+    Expected<bool> ConfiguredNetworkGroup_is_multi_context(const NetworkGroupIdentifier &identifier);
+    Expected<ConfigureNetworkParams> ConfiguredNetworkGroup_get_config_params(const NetworkGroupIdentifier &identifier);
+    Expected<std::vector<std::string>> ConfiguredNetworkGroup_get_sorted_output_names(const NetworkGroupIdentifier &identifier);
+    Expected<std::vector<std::string>> ConfiguredNetworkGroup_get_stream_names_from_vstream_name(const NetworkGroupIdentifier &identifier, const std::string &vstream_name);
+    Expected<std::vector<std::string>> ConfiguredNetworkGroup_get_vstream_names_from_stream_name(const NetworkGroupIdentifier &identifier, const std::string &stream_name);
+
+    Expected<std::vector<uint32_t>> InputVStreams_create(const NetworkGroupIdentifier &identifier,
         const std::map<std::string, hailo_vstream_params_t> &inputs_params, uint32_t pid);
-    Expected<uint32_t> InputVStream_dup_handle(uint32_t pid, uint32_t handle);
-    Expected<uint32_t> OutputVStream_dup_handle(uint32_t pid, uint32_t handle);
-    hailo_status InputVStream_release(uint32_t handle, uint32_t pid);
-    Expected<std::vector<uint32_t>> OutputVStreams_create(uint32_t net_group_handle,
+    Expected<std::vector<uint32_t>> OutputVStreams_create(const NetworkGroupIdentifier &identifier,
         const std::map<std::string, hailo_vstream_params_t> &output_params, uint32_t pid);
-    hailo_status OutputVStream_release(uint32_t handle, uint32_t pid);
-    hailo_status InputVStream_write(uint32_t handle, const MemoryView &buffer);
-    hailo_status OutputVStream_read(uint32_t handle, MemoryView buffer);
-    Expected<size_t> InputVStream_get_frame_size(uint32_t handle);
-    Expected<size_t> OutputVStream_get_frame_size(uint32_t handle);
 
-    hailo_status InputVStream_flush(uint32_t handle);
+    Expected<uint32_t> InputVStream_dup_handle(const VStreamIdentifier &identifier, uint32_t pid);
+    Expected<uint32_t> OutputVStream_dup_handle(const VStreamIdentifier &identifier, uint32_t pid);
+    hailo_status InputVStream_release(const VStreamIdentifier &identifier, uint32_t pid);
+
+    hailo_status OutputVStream_release(const VStreamIdentifier &identifier, uint32_t pid);
+    Expected<bool> InputVStream_is_multi_planar(const VStreamIdentifier &identifier);
+    hailo_status InputVStream_write(const VStreamIdentifier &identifier, const MemoryView &buffer);
+    hailo_status InputVStream_write(const VStreamIdentifier &identifier, const hailo_pix_buffer_t &buffer);
+    hailo_status OutputVStream_read(const VStreamIdentifier &identifier, MemoryView buffer);
+    Expected<size_t> InputVStream_get_frame_size(const VStreamIdentifier &identifier);
+    Expected<size_t> OutputVStream_get_frame_size(const VStreamIdentifier &identifier);
+
+    hailo_status InputVStream_flush(const VStreamIdentifier &identifier);
 
-    Expected<std::string> InputVStream_name(uint32_t handle);
-    Expected<std::string> OutputVStream_name(uint32_t handle);
+    Expected<std::string> InputVStream_name(const VStreamIdentifier &identifier);
+    Expected<std::string> OutputVStream_name(const VStreamIdentifier &identifier);
 
-    Expected<std::string> InputVStream_network_name(uint32_t handle);
-    Expected<std::string> OutputVStream_network_name(uint32_t handle);
+    Expected<std::string> InputVStream_network_name(const VStreamIdentifier &identifier);
+    Expected<std::string> OutputVStream_network_name(const VStreamIdentifier &identifier);
 
-    hailo_status InputVStream_abort(uint32_t handle);
-    hailo_status OutputVStream_abort(uint32_t handle);
-    hailo_status InputVStream_resume(uint32_t handle);
-    hailo_status OutputVStream_resume(uint32_t handle);
-    hailo_status InputVStream_stop_and_clear(uint32_t handle);
-    hailo_status OutputVStream_stop_and_clear(uint32_t handle);
-    hailo_status InputVStream_start_vstream(uint32_t handle);
-    hailo_status OutputVStream_start_vstream(uint32_t handle);
+    hailo_status InputVStream_abort(const VStreamIdentifier &identifier);
+    hailo_status OutputVStream_abort(const VStreamIdentifier &identifier);
+    hailo_status InputVStream_resume(const VStreamIdentifier &identifier);
+    hailo_status OutputVStream_resume(const VStreamIdentifier &identifier);
+    hailo_status InputVStream_stop_and_clear(const VStreamIdentifier &identifier);
+    hailo_status OutputVStream_stop_and_clear(const VStreamIdentifier &identifier);
+    hailo_status InputVStream_start_vstream(const VStreamIdentifier &identifier);
+    hailo_status OutputVStream_start_vstream(const VStreamIdentifier &identifier);
 
-    Expected<hailo_format_t> InputVStream_get_user_buffer_format(uint32_t handle);
-    Expected<hailo_format_t> OutputVStream_get_user_buffer_format(uint32_t handle);
+    Expected<hailo_format_t> InputVStream_get_user_buffer_format(const VStreamIdentifier &identifier);
+    Expected<hailo_format_t> OutputVStream_get_user_buffer_format(const VStreamIdentifier &identifier);
 
-    Expected<hailo_vstream_info_t> InputVStream_get_info(uint32_t handle);
-    Expected<hailo_vstream_info_t> OutputVStream_get_info(uint32_t handle);
+    Expected<hailo_vstream_info_t> InputVStream_get_info(const VStreamIdentifier &identifier);
+    Expected<hailo_vstream_info_t> OutputVStream_get_info(const VStreamIdentifier &identifier);
 
-    Expected<bool> InputVStream_is_aborted(uint32_t handle);
-    Expected<bool> OutputVStream_is_aborted(uint32_t handle);
+    Expected<bool> InputVStream_is_aborted(const VStreamIdentifier &identifier);
+    Expected<bool> OutputVStream_is_aborted(const VStreamIdentifier &identifier);
+
+    hailo_status OutputVStream_set_nms_score_threshold(const VStreamIdentifier &identifier, float32_t threshold);
+    hailo_status OutputVStream_set_nms_iou_threshold(const VStreamIdentifier &identifier, float32_t threshold);
+    hailo_status OutputVStream_set_nms_max_proposals_per_class(const VStreamIdentifier &identifier, uint32_t max_proposals_per_class);
 
 private:
+    void VDevice_convert_identifier_to_proto(const VDeviceIdentifier &identifier, ProtoVDeviceIdentifier *proto_identifier);
+    void ConfiguredNetworkGroup_convert_identifier_to_proto(const NetworkGroupIdentifier &identifier, ProtoConfiguredNetworkGroupIdentifier *proto_identifier);
+    void VStream_convert_identifier_to_proto(const VStreamIdentifier &identifier, ProtoVStreamIdentifier *proto_identifier);
+
     std::unique_ptr<ProtoHailoRtRpc::Stub> m_stub;
 };
 
index 0e2bf6620d3ed4a9dc5ff20b095f508fe6a752cb..b085bbaadb1583f1e1a4dc1f9ff29f06e7db3fcd 100644 (file)
 
 #include "network_group/network_group_internal.hpp"
 #include "net_flow/pipeline/vstream_internal.hpp"
-#include "rpc/rpc_definitions.hpp"
 #include "rpc_client_utils.hpp"
 
 
 namespace hailort
 {
 
-ConfiguredNetworkGroupClient::ConfiguredNetworkGroupClient(std::unique_ptr<HailoRtRpcClient> client, uint32_t handle) :
+ConfiguredNetworkGroupClient::ConfiguredNetworkGroupClient(std::unique_ptr<HailoRtRpcClient> client, NetworkGroupIdentifier &&identifier) :
     m_client(std::move(client)),
-    m_handle(handle)
+    m_identifier(identifier)
 {
-    auto reply = m_client->ConfiguredNetworkGroup_name(m_handle);
+    auto reply = m_client->ConfiguredNetworkGroup_name(m_identifier);
     if (!reply) {
         LOGGER__ERROR("get_network_group_name failed with status {}", reply.status());
         return;
@@ -34,17 +33,22 @@ ConfiguredNetworkGroupClient::ConfiguredNetworkGroupClient(std::unique_ptr<Hailo
     m_network_group_name = reply.value();
 }
 
-ConfiguredNetworkGroupClient::ConfiguredNetworkGroupClient(uint32_t handle, const std::string &network_group_name) :
-    m_handle(handle),
+ConfiguredNetworkGroupClient::ConfiguredNetworkGroupClient(NetworkGroupIdentifier &&identifier, const std::string &network_group_name) :
+    m_identifier(identifier),
     m_network_group_name(network_group_name)
 {}
 
-Expected<std::shared_ptr<ConfiguredNetworkGroupClient>> ConfiguredNetworkGroupClient::duplicate_network_group_client(uint32_t handle,
+Expected<std::shared_ptr<ConfiguredNetworkGroupClient>> ConfiguredNetworkGroupClient::duplicate_network_group_client(uint32_t ng_handle, uint32_t vdevice_handle,
     const std::string &network_group_name)
 {
-    auto duplicated_net_group = std::shared_ptr<ConfiguredNetworkGroupClient>(new (std::nothrow) ConfiguredNetworkGroupClient(handle, network_group_name));
+    auto duplicated_net_group = std::shared_ptr<ConfiguredNetworkGroupClient>(new (std::nothrow)
+        ConfiguredNetworkGroupClient(NetworkGroupIdentifier(ng_handle, vdevice_handle), network_group_name));
     CHECK_ARG_NOT_NULL_AS_EXPECTED(duplicated_net_group);
-    auto status = duplicated_net_group->after_fork_in_child();
+
+    auto status = duplicated_net_group->create_client();
+    CHECK_SUCCESS_AS_EXPECTED(status);
+
+    status = duplicated_net_group->dup_handle();
     CHECK_SUCCESS_AS_EXPECTED(status);
 
     return duplicated_net_group;
@@ -52,7 +56,7 @@ Expected<std::shared_ptr<ConfiguredNetworkGroupClient>> ConfiguredNetworkGroupCl
 
 ConfiguredNetworkGroupClient::~ConfiguredNetworkGroupClient()
 {
-    auto reply = m_client->ConfiguredNetworkGroup_release(m_handle, OsUtils::get_curr_pid());
+    auto reply = m_client->ConfiguredNetworkGroup_release(m_identifier, OsUtils::get_curr_pid());
     if (reply != HAILO_SUCCESS) {
         LOGGER__CRITICAL("ConfiguredNetworkGroup_release failed with status: {}", reply);
     }
@@ -82,9 +86,16 @@ hailo_status ConfiguredNetworkGroupClient::after_fork_in_child()
     auto status = create_client();
     CHECK_SUCCESS(status);
 
-    auto expected_dup_handle = m_client->ConfiguredNetworkGroup_dup_handle(OsUtils::get_curr_pid(), m_handle);
+    auto expected_dup_handle = m_client->ConfiguredNetworkGroup_dup_handle(m_identifier, OsUtils::get_curr_pid());
+    CHECK_EXPECTED_AS_STATUS(expected_dup_handle);
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status ConfiguredNetworkGroupClient::dup_handle()
+{
+    auto expected_dup_handle = m_client->ConfiguredNetworkGroup_dup_handle(m_identifier, OsUtils::get_curr_pid());
     CHECK_EXPECTED_AS_STATUS(expected_dup_handle);
-    m_handle = expected_dup_handle.value();
 
     return HAILO_SUCCESS;
 }
@@ -99,7 +110,7 @@ Expected<std::unique_ptr<ActivatedNetworkGroup>> ConfiguredNetworkGroupClient::a
 /* Network group base functions */
 Expected<LatencyMeasurementResult> ConfiguredNetworkGroupClient::get_latency_measurement(const std::string &network_name)
 {
-    return m_client->ConfiguredNetworkGroup_get_latency_measurement(m_handle, network_name);
+    return m_client->ConfiguredNetworkGroup_get_latency_measurement(m_identifier, network_name);
 }
 
 const std::string &ConfiguredNetworkGroupClient::get_network_group_name() const
@@ -114,7 +125,7 @@ const std::string &ConfiguredNetworkGroupClient::name() const
 
 Expected<hailo_stream_interface_t> ConfiguredNetworkGroupClient::get_default_streams_interface()
 {
-    return m_client->ConfiguredNetworkGroup_get_default_stream_interface(m_handle);
+    return m_client->ConfiguredNetworkGroup_get_default_stream_interface(m_identifier);
 }
 
 std::vector<std::reference_wrapper<InputStream>> ConfiguredNetworkGroupClient::get_input_streams_by_interface(hailo_stream_interface_t)
@@ -183,13 +194,13 @@ hailo_status ConfiguredNetworkGroupClient::wait_for_activation(const std::chrono
 
 Expected<std::vector<std::vector<std::string>>> ConfiguredNetworkGroupClient::get_output_vstream_groups()
 {
-    return m_client->ConfiguredNetworkGroup_get_output_vstream_groups(m_handle);
+    return m_client->ConfiguredNetworkGroup_get_output_vstream_groups(m_identifier);
 }
 
 Expected<std::vector<std::map<std::string, hailo_vstream_params_t>>> ConfiguredNetworkGroupClient::make_output_vstream_params_groups(
     bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size)
 {
-    return m_client->ConfiguredNetworkGroup_make_output_vstream_params_groups(m_handle,
+    return m_client->ConfiguredNetworkGroup_make_output_vstream_params_groups(m_identifier,
         quantized, format_type, timeout_ms, queue_size);
 }
 
@@ -197,7 +208,7 @@ Expected<std::map<std::string, hailo_vstream_params_t>> ConfiguredNetworkGroupCl
     bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
     const std::string &network_name)
 {
-    return m_client->ConfiguredNetworkGroup_make_input_vstream_params(m_handle,
+    return m_client->ConfiguredNetworkGroup_make_input_vstream_params(m_identifier,
         quantized, format_type, timeout_ms, queue_size, network_name);
 }
 
@@ -205,41 +216,41 @@ Expected<std::map<std::string, hailo_vstream_params_t>> ConfiguredNetworkGroupCl
     bool quantized, hailo_format_type_t format_type, uint32_t timeout_ms, uint32_t queue_size,
     const std::string &network_name)
 {
-    return m_client->ConfiguredNetworkGroup_make_output_vstream_params(m_handle,
+    return m_client->ConfiguredNetworkGroup_make_output_vstream_params(m_identifier,
         quantized, format_type, timeout_ms, queue_size, network_name);
 }
 
 Expected<std::vector<hailo_stream_info_t>> ConfiguredNetworkGroupClient::get_all_stream_infos(const std::string &network_name) const
 {
-    return m_client->ConfiguredNetworkGroup_get_all_stream_infos(m_handle, network_name);
+    return m_client->ConfiguredNetworkGroup_get_all_stream_infos(m_identifier, network_name);
 }
 
 Expected<std::vector<hailo_network_info_t>> ConfiguredNetworkGroupClient::get_network_infos() const
 {
-    return m_client->ConfiguredNetworkGroup_get_network_infos(m_handle);
+    return m_client->ConfiguredNetworkGroup_get_network_infos(m_identifier);
 }
 
 Expected<std::vector<hailo_vstream_info_t>> ConfiguredNetworkGroupClient::get_input_vstream_infos(
     const std::string &network_name) const
 {
-    return m_client->ConfiguredNetworkGroup_get_input_vstream_infos(m_handle, network_name);
+    return m_client->ConfiguredNetworkGroup_get_input_vstream_infos(m_identifier, network_name);
 }
 
 Expected<std::vector<hailo_vstream_info_t>> ConfiguredNetworkGroupClient::get_output_vstream_infos(
     const std::string &network_name) const
 {
-    return m_client->ConfiguredNetworkGroup_get_output_vstream_infos(m_handle, network_name);
+    return m_client->ConfiguredNetworkGroup_get_output_vstream_infos(m_identifier, network_name);
 }
 
 Expected<std::vector<hailo_vstream_info_t>> ConfiguredNetworkGroupClient::get_all_vstream_infos(
     const std::string &network_name) const
 {
-    return m_client->ConfiguredNetworkGroup_get_all_vstream_infos(m_handle, network_name);
+    return m_client->ConfiguredNetworkGroup_get_all_vstream_infos(m_identifier, network_name);
 }
 
 bool ConfiguredNetworkGroupClient::is_scheduled() const
 {
-    auto reply = m_client->ConfiguredNetworkGroup_is_scheduled(m_handle);
+    auto reply = m_client->ConfiguredNetworkGroup_is_scheduled(m_identifier);
     if (reply.status() != HAILO_SUCCESS) {
         LOGGER__ERROR("is_scheduled failed with status {}", reply.status());
         return false;
@@ -249,17 +260,17 @@ bool ConfiguredNetworkGroupClient::is_scheduled() const
 
 hailo_status ConfiguredNetworkGroupClient::set_scheduler_timeout(const std::chrono::milliseconds &timeout, const std::string &network_name)
 {
-    return m_client->ConfiguredNetworkGroup_set_scheduler_timeout(m_handle, timeout, network_name);
+    return m_client->ConfiguredNetworkGroup_set_scheduler_timeout(m_identifier, timeout, network_name);
 }
 
 hailo_status ConfiguredNetworkGroupClient::set_scheduler_threshold(uint32_t threshold, const std::string &network_name)
 {
-    return m_client->ConfiguredNetworkGroup_set_scheduler_threshold(m_handle, threshold, network_name);
+    return m_client->ConfiguredNetworkGroup_set_scheduler_threshold(m_identifier, threshold, network_name);
 }
 
 hailo_status ConfiguredNetworkGroupClient::set_scheduler_priority(uint8_t priority, const std::string &network_name)
 {
-    return m_client->ConfiguredNetworkGroup_set_scheduler_priority(m_handle, priority, network_name);
+    return m_client->ConfiguredNetworkGroup_set_scheduler_priority(m_identifier, priority, network_name);
 }
 
 AccumulatorPtr ConfiguredNetworkGroupClient::get_activation_time_accumulator() const
@@ -276,7 +287,7 @@ AccumulatorPtr ConfiguredNetworkGroupClient::get_deactivation_time_accumulator()
 
 bool ConfiguredNetworkGroupClient::is_multi_context() const
 {
-    auto reply = m_client->ConfiguredNetworkGroup_is_multi_context(m_handle);
+    auto reply = m_client->ConfiguredNetworkGroup_is_multi_context(m_identifier);
     if (reply.status() != HAILO_SUCCESS) {
         LOGGER__ERROR("is_multi_context failed with status {}", reply.status());
         return false;
@@ -292,7 +303,7 @@ Expected<HwInferResults> ConfiguredNetworkGroupClient::run_hw_infer_estimator()
 
 const ConfigureNetworkParams ConfiguredNetworkGroupClient::get_config_params() const
 {
-    auto reply = m_client->ConfiguredNetworkGroup_get_config_params(m_handle);
+    auto reply = m_client->ConfiguredNetworkGroup_get_config_params(m_identifier);
     if (reply.status() != HAILO_SUCCESS) {
         LOGGER__ERROR("get_config_params failed with status {}", reply.status());
         return ConfigureNetworkParams();
@@ -302,29 +313,29 @@ const ConfigureNetworkParams ConfiguredNetworkGroupClient::get_config_params() c
 
 Expected<std::vector<std::string>> ConfiguredNetworkGroupClient::get_sorted_output_names()
 {
-    return m_client->ConfiguredNetworkGroup_get_sorted_output_names(m_handle);
+    return m_client->ConfiguredNetworkGroup_get_sorted_output_names(m_identifier);
 }
 
 Expected<std::vector<std::string>> ConfiguredNetworkGroupClient::get_stream_names_from_vstream_name(const std::string &vstream_name)
 {
-    return m_client->ConfiguredNetworkGroup_get_stream_names_from_vstream_name(m_handle, vstream_name);
+    return m_client->ConfiguredNetworkGroup_get_stream_names_from_vstream_name(m_identifier, vstream_name);
 }
 
 Expected<std::vector<std::string>> ConfiguredNetworkGroupClient::get_vstream_names_from_stream_name(const std::string &stream_name)
 {
-    return m_client->ConfiguredNetworkGroup_get_vstream_names_from_stream_name(m_handle, stream_name);
+    return m_client->ConfiguredNetworkGroup_get_vstream_names_from_stream_name(m_identifier, stream_name);
 }
 
 Expected<std::vector<InputVStream>> ConfiguredNetworkGroupClient::create_input_vstreams(const std::map<std::string, hailo_vstream_params_t> &inputs_params)
 {
-    auto reply = m_client->InputVStreams_create(m_handle, inputs_params, OsUtils::get_curr_pid());
+    auto reply = m_client->InputVStreams_create(m_identifier, inputs_params, OsUtils::get_curr_pid());
     CHECK_EXPECTED(reply);
     auto input_vstreams_handles = reply.release();
     std::vector<InputVStream> vstreams;
     vstreams.reserve(input_vstreams_handles.size());
 
     for (uint32_t handle : input_vstreams_handles) {
-        auto vstream_client = InputVStreamClient::create(handle);
+        auto vstream_client = InputVStreamClient::create(VStreamIdentifier(m_identifier, handle));
         CHECK_EXPECTED(vstream_client);
         auto vstream = VStreamsBuilderUtils::create_input(vstream_client.release());
         vstreams.push_back(std::move(vstream));
@@ -334,14 +345,14 @@ Expected<std::vector<InputVStream>> ConfiguredNetworkGroupClient::create_input_v
 
 Expected<std::vector<OutputVStream>> ConfiguredNetworkGroupClient::create_output_vstreams(const std::map<std::string, hailo_vstream_params_t> &outputs_params)
 {
-    auto reply = m_client->OutputVStreams_create(m_handle, outputs_params, OsUtils::get_curr_pid());
+    auto reply = m_client->OutputVStreams_create(m_identifier, outputs_params, OsUtils::get_curr_pid());
     CHECK_EXPECTED(reply);
     auto output_vstreams_handles = reply.release();
     std::vector<OutputVStream> vstreams;
     vstreams.reserve(output_vstreams_handles.size());
 
     for(uint32_t handle : output_vstreams_handles) {
-        auto vstream_client = OutputVStreamClient::create(handle);
+        auto vstream_client = OutputVStreamClient::create(VStreamIdentifier(m_identifier, handle));
         CHECK_EXPECTED(vstream_client);
         auto vstream = VStreamsBuilderUtils::create_output(vstream_client.release());
         vstreams.push_back(std::move(vstream));
index 99d8444be5d396adae498dfc4bf7c24b41372888..421ee4d09cda4e7ce42927fc7502324b737a924e 100644 (file)
@@ -45,7 +45,7 @@ public:
 
     static Expected<std::unique_ptr<HailoRtRpcClient>> create_client()
     {
-        auto channel = grpc::CreateChannel(HAILORT_SERVICE_DEFAULT_ADDR, grpc::InsecureChannelCredentials());
+        auto channel = grpc::CreateChannel(hailort::HAILORT_SERVICE_ADDRESS, grpc::InsecureChannelCredentials());
         CHECK_AS_EXPECTED(channel != nullptr, HAILO_INTERNAL_FAILURE);
         auto client = make_unique_nothrow<HailoRtRpcClient>(channel);
         CHECK_NOT_NULL_AS_EXPECTED(client, HAILO_INTERNAL_FAILURE);
@@ -57,7 +57,7 @@ public:
         std::unique_lock<std::mutex> lock(*m_mutex);
         if (!m_initialized) {
             // Create client
-            auto channel = grpc::CreateChannel(hailort::HAILORT_SERVICE_DEFAULT_ADDR, grpc::InsecureChannelCredentials());
+            auto channel = grpc::CreateChannel(hailort::HAILORT_SERVICE_ADDRESS, grpc::InsecureChannelCredentials());
             auto client = make_unique_nothrow<HailoRtRpcClient>(channel);
             CHECK_NOT_NULL(client, HAILO_OUT_OF_HOST_MEMORY);
 
@@ -145,7 +145,7 @@ private:
 
     hailo_status keep_alive()
     {
-        auto channel = grpc::CreateChannel(hailort::HAILORT_SERVICE_DEFAULT_ADDR, grpc::InsecureChannelCredentials());
+        auto channel = grpc::CreateChannel(hailort::HAILORT_SERVICE_ADDRESS, grpc::InsecureChannelCredentials());
         auto client = make_unique_nothrow<HailoRtRpcClient>(channel);
         CHECK_NOT_NULL(client, HAILO_OUT_OF_HOST_MEMORY);
 
@@ -164,8 +164,9 @@ private:
 
     hailo_status init_keep_alive_shutdown_event()
     {
-        m_keep_alive_shutdown_event = Event::create_shared(Event::State::not_signalled);
-        CHECK(nullptr != m_keep_alive_shutdown_event, HAILO_OUT_OF_HOST_MEMORY);
+        auto shutdown_event_exp = Event::create_shared(Event::State::not_signalled);
+        CHECK_EXPECTED_AS_STATUS(shutdown_event_exp);
+        m_keep_alive_shutdown_event = shutdown_event_exp.release();
 
         return HAILO_SUCCESS;
     }
index cacbbb2546c8edb848ce34df74c9e0a3cf1f3ccb..06d4e60f43b2ff6f8d5e859011b0cc6f3fe02367 100644 (file)
@@ -3,7 +3,11 @@ cmake_minimum_required(VERSION 3.0.0)
 set(SRC_FILES
     ${CMAKE_CURRENT_SOURCE_DIR}/stream.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/stream_internal.cpp
-    ${CMAKE_CURRENT_SOURCE_DIR}/nms_stream_reader.cpp
+    ${CMAKE_CURRENT_SOURCE_DIR}/async_stream_base.cpp
+    ${CMAKE_CURRENT_SOURCE_DIR}/nms_stream.cpp
+    ${CMAKE_CURRENT_SOURCE_DIR}/remote_process_stream.cpp
+    ${CMAKE_CURRENT_SOURCE_DIR}/transfer_common.cpp
+    ${CMAKE_CURRENT_SOURCE_DIR}/queued_stream_buffer_pool.cpp
 )
 
 set(HAILORT_CPP_SOURCES ${HAILORT_CPP_SOURCES} ${SRC_FILES} PARENT_SCOPE)
diff --git a/hailort/libhailort/src/stream_common/async_common.hpp b/hailort/libhailort/src/stream_common/async_common.hpp
deleted file mode 100644 (file)
index 31c39c8..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
-**/
-/**
- * @file async_common.hpp
- * @brief Common types/functions for async api
- **/
-
-#ifndef _HAILO_ASYNC_COMMON_HPP_
-#define _HAILO_ASYNC_COMMON_HPP_
-
-#include "hailo/stream.hpp"
-
-namespace hailort
-{
-
-// Internal function, wrapper to the user callbacks, accepts the callback status as an argument.
-using InternalTransferDoneCallback = std::function<void(hailo_status)>;
-
-struct TransferRequest {
-    MemoryView buffer;
-    InternalTransferDoneCallback callback;
-
-    // Optional pre-mapped user buffer. If set, mapped_buffer must be the same as the "buffer"
-    BufferPtr mapped_buffer = nullptr;
-};
-
-} /* namespace hailort */
-
-#endif /* _HAILO_ASYNC_COMMON_HPP_ */
diff --git a/hailort/libhailort/src/stream_common/async_stream_base.cpp b/hailort/libhailort/src/stream_common/async_stream_base.cpp
new file mode 100644 (file)
index 0000000..58278d8
--- /dev/null
@@ -0,0 +1,543 @@
+/**
+ * Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file async_stream_base.cpp
+ **/
+
+#include "async_stream_base.hpp"
+
+namespace hailort
+{
+
+// Currently there is 1-1 relation between buffer mode and api (sync vs async).
+// This function returns the API name for the buffer mode for better user logging.
+static const char *get_buffer_mode_api_name(StreamBufferMode mode)
+{
+    switch (mode) {
+    case StreamBufferMode::OWNING:
+        return "Sync";
+    case StreamBufferMode::NOT_OWNING:
+        return "Async";
+    case StreamBufferMode::NOT_SET:
+        return "Unset";
+    default:
+        return "Unknown";
+    }
+}
+
+AsyncInputStreamBase::AsyncInputStreamBase(const LayerInfo &edge_layer,
+    hailo_stream_interface_t stream_interface, EventPtr core_op_activated_event, hailo_status &status) :
+        InputStreamBase(edge_layer, stream_interface, core_op_activated_event, status),
+        m_is_stream_activated(false),
+        m_is_aborted(false),
+        m_timeout(DEFAULT_TRANSFER_TIMEOUT),
+        m_buffer_mode(StreamBufferMode::NOT_SET),
+        m_ongoing_transfers(0),
+        m_interrupt_callback(ignore_interrupts_callback)
+{}
+
+hailo_status AsyncInputStreamBase::abort()
+{
+    {
+        std::lock_guard<std::mutex> lock(m_stream_mutex);
+        m_is_aborted = true;
+    }
+    m_has_ready_buffer.notify_all();
+    return HAILO_SUCCESS;
+}
+
+hailo_status AsyncInputStreamBase::clear_abort()
+{
+    {
+        std::lock_guard<std::mutex> lock(m_stream_mutex);
+        m_is_aborted = false;
+    }
+
+    return HAILO_SUCCESS;
+}
+
+void AsyncInputStreamBase::notify_all()
+{
+    {
+        // Acquire mutex to make sure the notify_all will wake the blocking threads on the cv.
+        std::unique_lock<std::mutex> lock(m_stream_mutex);
+    }
+
+    m_has_ready_buffer.notify_all();
+}
+
+hailo_status AsyncInputStreamBase::set_buffer_mode(StreamBufferMode buffer_mode)
+{
+    CHECK(StreamBufferMode::NOT_SET != buffer_mode, HAILO_INVALID_OPERATION, "Can't set buffer mode to NOT_SET");
+
+    std::unique_lock<std::mutex> lock(m_stream_mutex);
+    if (m_buffer_mode == buffer_mode) {
+        // Nothing to be done
+        return HAILO_SUCCESS;
+    }
+
+    CHECK(StreamBufferMode::NOT_SET == m_buffer_mode, HAILO_INVALID_OPERATION, "Invalid {} operation on {} stream",
+        get_buffer_mode_api_name(buffer_mode), get_buffer_mode_api_name(m_buffer_mode));
+    m_buffer_mode = buffer_mode;
+
+    if (buffer_mode == StreamBufferMode::OWNING) {
+        assert(m_buffer_pool == nullptr);
+        auto buffer_pool = allocate_buffer_pool();
+        CHECK_EXPECTED_AS_STATUS(buffer_pool);
+        m_buffer_pool = buffer_pool.release();
+    }
+
+    return HAILO_SUCCESS;
+}
+
+std::chrono::milliseconds AsyncInputStreamBase::get_timeout() const
+{
+    return m_timeout;
+}
+
+hailo_status AsyncInputStreamBase::set_timeout(std::chrono::milliseconds timeout)
+{
+    m_timeout = timeout;
+    return HAILO_SUCCESS;
+}
+
+hailo_status AsyncInputStreamBase::flush()
+{
+    std::unique_lock<std::mutex> lock(m_stream_mutex);
+
+    if (0 == m_ongoing_transfers) {
+        return HAILO_SUCCESS;
+    }
+
+    const auto flush_timeout = m_ongoing_transfers.load() * m_timeout;
+    return cv_wait_for(lock, flush_timeout, [this]() {
+        return m_ongoing_transfers == 0;
+    });
+}
+
+hailo_status AsyncInputStreamBase::write_impl(const MemoryView &user_buffer, std::function<bool()> should_cancel)
+{
+    auto status = set_buffer_mode(StreamBufferMode::OWNING);
+    CHECK_SUCCESS(status);
+
+    std::unique_lock<std::mutex> lock(m_stream_mutex);
+    auto is_ready = [this]() { return is_ready_for_transfer() && is_ready_for_dequeue(); };
+    status = cv_wait_for(lock, m_timeout, is_ready, should_cancel);
+    if (HAILO_SUCCESS != status) {
+        // errors logs on cv_wait_for
+        return status;
+    }
+
+    auto stream_buffer_exp = m_buffer_pool->dequeue();
+    CHECK_EXPECTED_AS_STATUS(stream_buffer_exp);
+    auto stream_buffer = stream_buffer_exp.release();
+
+    status = stream_buffer.copy_from(user_buffer);
+    CHECK_SUCCESS(status);
+
+    return call_write_async_impl(TransferRequest{
+        stream_buffer,
+        [this, stream_buffer](hailo_status) {
+            std::unique_lock<std::mutex> lock(m_stream_mutex);
+            auto enqueue_status = m_buffer_pool->enqueue(TransferBuffer{stream_buffer});
+            if (HAILO_SUCCESS != enqueue_status) {
+                LOGGER__ERROR("Failed enqueue stream buffer {}", enqueue_status);
+            }
+        }
+    });
+}
+
+hailo_status AsyncInputStreamBase::write_impl(const MemoryView &user_buffer)
+{
+    const auto SHOULD_CANCEL = []() { return false; };
+    return write_impl(user_buffer, SHOULD_CANCEL);
+}
+
+hailo_status AsyncInputStreamBase::register_interrupt_callback(const ProcessingCompleteCallback &callback)
+{
+    std::unique_lock<std::mutex> lock(m_stream_mutex);
+    m_interrupt_callback = callback;
+    return HAILO_SUCCESS;
+}
+
+Expected<size_t> AsyncInputStreamBase::get_buffer_frames_size() const
+{
+    return get_max_ongoing_transfers();
+}
+
+Expected<size_t> AsyncInputStreamBase::get_async_max_queue_size() const
+{
+    return get_max_ongoing_transfers();
+}
+
+hailo_status AsyncInputStreamBase::wait_for_async_ready(size_t transfer_size, std::chrono::milliseconds timeout)
+{
+    auto status = set_buffer_mode(StreamBufferMode::NOT_OWNING);
+    CHECK_SUCCESS(status);
+
+    CHECK(transfer_size == get_frame_size(), HAILO_INVALID_OPERATION, "transfer size {} is expected to be {}",
+        transfer_size, get_frame_size());
+
+    std::unique_lock<std::mutex> lock(m_stream_mutex);
+    return cv_wait_for(lock, timeout, [this]() {
+        return is_ready_for_transfer();
+    });
+}
+
+hailo_status AsyncInputStreamBase::write_async(TransferRequest &&transfer_request)
+{
+    auto status = set_buffer_mode(StreamBufferMode::NOT_OWNING);
+    CHECK_SUCCESS(status);
+
+    std::unique_lock<std::mutex> lock(m_stream_mutex);
+    return call_write_async_impl(std::move(transfer_request));
+}
+
+hailo_status AsyncInputStreamBase::activate_stream()
+{
+    std::unique_lock<std::mutex> lock(m_stream_mutex);
+
+    auto status = activate_stream_impl();
+    CHECK_SUCCESS(status);
+
+    m_is_stream_activated = true;
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status AsyncInputStreamBase::deactivate_stream()
+{
+    hailo_status status = HAILO_SUCCESS; // success oriented
+
+    {
+        std::unique_lock<std::mutex> lock(m_stream_mutex);
+
+        if (!m_is_stream_activated) {
+            return HAILO_SUCCESS;
+        }
+
+        auto deactivate_channel_status = deactivate_stream_impl();
+        if (HAILO_SUCCESS != deactivate_channel_status) {
+            LOGGER__ERROR("Failed to stop channel with status {}", deactivate_channel_status);
+            status = deactivate_channel_status;
+        }
+
+        m_is_stream_activated = false;
+    }
+    m_has_ready_buffer.notify_all();
+
+    return status;
+}
+
+hailo_status AsyncInputStreamBase::call_write_async_impl(TransferRequest &&transfer_request)
+{
+    transfer_request.callback = [this, callback=transfer_request.callback](hailo_status callback_status) {
+        if (HAILO_SUCCESS == callback_status) {
+            // Calling interrupt callback first (only if successful), since callback() may update the state (and we call
+            // interrupt_callback before the state is activated).
+            m_interrupt_callback();
+        }
+
+        callback(callback_status);
+
+        {
+            std::lock_guard<std::mutex> lock(m_stream_mutex);
+            m_ongoing_transfers--;
+        }
+
+        m_has_ready_buffer.notify_all();
+    };
+
+
+    auto status = write_async_impl(std::move(transfer_request));
+    if ((HAILO_STREAM_NOT_ACTIVATED == status) || (HAILO_STREAM_ABORTED_BY_USER == status)) {
+        return status;
+    }
+    CHECK_SUCCESS(status);
+
+    m_ongoing_transfers++;
+
+    return HAILO_SUCCESS;
+}
+
+bool AsyncInputStreamBase::is_ready_for_transfer() const
+{
+    return m_ongoing_transfers < get_max_ongoing_transfers();
+}
+
+bool AsyncInputStreamBase::is_ready_for_dequeue() const
+{
+    return m_ongoing_transfers < m_buffer_pool->max_queue_size();
+}
+
+AsyncOutputStreamBase::AsyncOutputStreamBase(const LayerInfo &edge_layer, hailo_stream_interface_t interface,
+    EventPtr core_op_activated_event, hailo_status &status) :
+        OutputStreamBase(edge_layer, interface, std::move(core_op_activated_event), status),
+        m_is_stream_activated(false),
+        m_is_aborted(false),
+        m_timeout(DEFAULT_TRANSFER_TIMEOUT),
+        m_buffer_mode(StreamBufferMode::NOT_SET),
+        m_ongoing_transfers(0),
+        m_interrupt_callback(ignore_interrupts_callback)
+{}
+
+hailo_status AsyncOutputStreamBase::abort()
+{
+    {
+        std::lock_guard<std::mutex> lock(m_stream_mutex);
+        m_is_aborted = true;
+    }
+    m_has_ready_buffer.notify_all();
+    return HAILO_SUCCESS;
+}
+
+hailo_status AsyncOutputStreamBase::clear_abort()
+{
+    {
+        std::lock_guard<std::mutex> lock(m_stream_mutex);
+        m_is_aborted = false;
+    }
+    return HAILO_SUCCESS;
+}
+
+hailo_status AsyncOutputStreamBase::wait_for_async_ready(size_t transfer_size, std::chrono::milliseconds timeout)
+{
+    auto status = set_buffer_mode(StreamBufferMode::NOT_OWNING);
+    CHECK_SUCCESS(status);
+
+    CHECK(transfer_size == get_frame_size(), HAILO_INVALID_OPERATION, "transfer size {} is expected to be {}",
+        transfer_size, get_frame_size());
+
+    std::unique_lock<std::mutex> lock(m_stream_mutex);
+    return cv_wait_for(lock, timeout, [this]() {
+        return is_ready_for_transfer();
+    });
+}
+
+Expected<size_t> AsyncOutputStreamBase::get_async_max_queue_size() const
+{
+    return get_max_ongoing_transfers();
+}
+
+hailo_status AsyncOutputStreamBase::read_async(TransferRequest &&transfer_request)
+{
+    auto status = set_buffer_mode(StreamBufferMode::NOT_OWNING);
+    CHECK_SUCCESS(status);
+
+    std::unique_lock<std::mutex> lock(m_stream_mutex);
+    return call_read_async_impl(std::move(transfer_request));
+}
+
+hailo_status AsyncOutputStreamBase::call_read_async_impl(TransferRequest &&transfer_request)
+{
+    transfer_request.callback = [this, callback=transfer_request.callback](hailo_status callback_status) {
+        if (HAILO_SUCCESS == callback_status) {
+            // Calling interrupt callback first (only if successful), since callback() may update the state (and we call
+            // interrupt_callback before the state is activated).
+            m_interrupt_callback();
+        }
+
+        callback(callback_status);
+
+        {
+            std::lock_guard<std::mutex> lock(m_stream_mutex);
+            m_ongoing_transfers--;
+        }
+
+        m_has_ready_buffer.notify_all();
+    };
+
+
+    auto status = read_async_impl(std::move(transfer_request));
+    if (HAILO_STREAM_ABORTED_BY_USER == status) {
+        return status;
+    }
+    CHECK_SUCCESS(status);
+
+    m_ongoing_transfers++;
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status AsyncOutputStreamBase::register_interrupt_callback(const ProcessingCompleteCallback &callback)
+{
+    std::unique_lock<std::mutex> lock(m_stream_mutex);
+    m_interrupt_callback = callback;
+    return HAILO_SUCCESS;
+}
+
+hailo_status AsyncOutputStreamBase::activate_stream()
+{
+    std::unique_lock<std::mutex> lock(m_stream_mutex);
+
+    auto status = activate_stream_impl();
+    CHECK_SUCCESS(status);
+
+    // If the mode is OWNING is set, it means we use the read/read_impl API.
+    // We need to clear all pending buffers, and prepare transfers for next read requests.
+    if (StreamBufferMode::OWNING == m_buffer_mode) {
+        m_pending_buffers.clear();
+        m_buffer_pool->reset_pointers();
+
+        status = prepare_all_transfers();
+        CHECK_SUCCESS(status);
+    }
+
+    m_is_stream_activated = true;
+    return HAILO_SUCCESS;
+}
+
+hailo_status AsyncOutputStreamBase::deactivate_stream()
+{
+    hailo_status status = HAILO_SUCCESS; // success oriented
+
+    {
+        std::unique_lock<std::mutex> lock(m_stream_mutex);
+
+        if (!m_is_stream_activated) {
+            return HAILO_SUCCESS;
+        }
+
+        m_is_stream_activated = false;
+
+        auto deactivate_status = deactivate_stream_impl();
+        if (HAILO_SUCCESS != deactivate_status) {
+            LOGGER__ERROR("Failed to stop stream with status {}", deactivate_status);
+            status = deactivate_status;
+        }
+    }
+    m_has_ready_buffer.notify_all();
+
+    return status;
+}
+
+bool AsyncOutputStreamBase::is_ready_for_transfer() const
+{
+    return m_ongoing_transfers < get_max_ongoing_transfers();
+}
+
+hailo_status AsyncOutputStreamBase::prepare_all_transfers()
+{
+    const auto max_transfers_in_buffer = get_buffer_frames_size();
+    CHECK_EXPECTED_AS_STATUS(max_transfers_in_buffer);
+
+    assert(*max_transfers_in_buffer >= m_pending_buffers.size());
+    const auto transfers_count = *max_transfers_in_buffer - m_pending_buffers.size();
+    for (size_t i = 0; i < transfers_count; i++) {
+        auto status = dequeue_and_launch_transfer();
+        CHECK_SUCCESS(status);
+    }
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status AsyncOutputStreamBase::set_buffer_mode(StreamBufferMode buffer_mode)
+{
+    CHECK(StreamBufferMode::NOT_SET != buffer_mode, HAILO_INVALID_OPERATION, "Can't set buffer mode to NOT_SET");
+
+    std::unique_lock<std::mutex> lock(m_stream_mutex);
+    if (m_buffer_mode == buffer_mode) {
+        // Nothing to be done
+        return HAILO_SUCCESS;
+    }
+
+    CHECK(StreamBufferMode::NOT_SET == m_buffer_mode, HAILO_INVALID_OPERATION, "Invalid {} operation on {} stream",
+        get_buffer_mode_api_name(buffer_mode), get_buffer_mode_api_name(m_buffer_mode));
+    m_buffer_mode = buffer_mode;
+
+    if (buffer_mode == StreamBufferMode::OWNING) {
+        assert(m_buffer_pool == nullptr);
+        auto buffer_pool = allocate_buffer_pool();
+        CHECK_EXPECTED_AS_STATUS(buffer_pool);
+        m_buffer_pool = buffer_pool.release();
+
+        if (m_is_stream_activated) {
+            // if the streams are not activated, the transfers will be prepared on next activation.
+            auto status = prepare_all_transfers();
+            CHECK_SUCCESS(status);
+        }
+    }
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status AsyncOutputStreamBase::set_timeout(std::chrono::milliseconds timeout)
+{
+    m_timeout = timeout;
+    return HAILO_SUCCESS;
+}
+
+std::chrono::milliseconds AsyncOutputStreamBase::get_timeout() const
+{
+    return m_timeout;
+}
+
+Expected<size_t> AsyncOutputStreamBase::get_buffer_frames_size() const
+{
+    return get_max_ongoing_transfers();
+}
+
+
+hailo_status AsyncOutputStreamBase::read_impl(MemoryView user_buffer)
+{
+    auto status = set_buffer_mode(StreamBufferMode::OWNING);
+    CHECK_SUCCESS(status);
+
+    // Dequeue pending buffer, read it into user_buffer and return the buffer back to the pool.
+    std::unique_lock<std::mutex> lock(m_stream_mutex);
+    status = cv_wait_for(lock, m_timeout, [this]() { return !m_pending_buffers.empty(); });
+    if (HAILO_SUCCESS != status) {
+        // errors logs on cv_wait_for
+        return status;
+    }
+
+    auto stream_buffer = m_pending_buffers.dequeue();
+    CHECK_EXPECTED_AS_STATUS(stream_buffer);
+
+    status = stream_buffer->copy_to(user_buffer);
+    CHECK_SUCCESS(status);
+
+    status = m_buffer_pool->enqueue(stream_buffer.release());
+    CHECK_SUCCESS(status);
+
+    status = dequeue_and_launch_transfer();
+    if (HAILO_STREAM_ABORTED_BY_USER == status) {
+        // The buffer_pool state will reset on next activation.
+        return status;
+    }
+    CHECK_SUCCESS(status);
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status AsyncOutputStreamBase::dequeue_and_launch_transfer()
+{
+    auto buffer = m_buffer_pool->dequeue();
+    CHECK_EXPECTED_AS_STATUS(buffer);
+
+    auto callback  = [this, buffer=buffer.value()](hailo_status status) {
+        if (HAILO_STREAM_ABORTED_BY_USER == status) {
+            // On deactivation flow, we should get this status. We just ignore the callback here, and in the next
+            // activation we should reset the buffers.
+            return;
+        }
+
+        status = m_pending_buffers.enqueue(TransferBuffer{buffer});
+        if (HAILO_SUCCESS != status) {
+            LOGGER__ERROR("Failed to enqueue pending buffer {}", status);
+        }
+    };
+
+    auto status = call_read_async_impl(TransferRequest{buffer.value(), callback});
+    if (HAILO_STREAM_ABORTED_BY_USER == status) {
+        // The buffer_pool state will reset on next activation.
+        return status;
+    }
+    CHECK_SUCCESS(status, "Fatal error {} while launching transfer. state may be corrupted", status);
+
+    return HAILO_SUCCESS;
+}
+
+} /* namespace hailort */
diff --git a/hailort/libhailort/src/stream_common/async_stream_base.hpp b/hailort/libhailort/src/stream_common/async_stream_base.hpp
new file mode 100644 (file)
index 0000000..20335a5
--- /dev/null
@@ -0,0 +1,215 @@
+/**
+ * Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file async_stream_base.hpp
+ * @brief Base class for async streams, implements
+ *          1. Sync api (over async using buffer pool).
+ *          2. The full async stream api, including waiting.
+ **/
+
+#ifndef _HAILO_ASYNC_STREAM_BASE_HPP_
+#define _HAILO_ASYNC_STREAM_BASE_HPP_
+
+#include "stream_common/stream_internal.hpp"
+#include "stream_common/stream_buffer_pool.hpp"
+
+#include "utils/thread_safe_queue.hpp"
+
+namespace hailort
+{
+
+class AsyncInputStreamBase : public InputStreamBase {
+public:
+    AsyncInputStreamBase(const LayerInfo &edge_layer,
+        hailo_stream_interface_t stream_interface, EventPtr core_op_activated_event, hailo_status &status);
+
+    virtual hailo_status set_buffer_mode(StreamBufferMode buffer_mode) override;
+    virtual std::chrono::milliseconds get_timeout() const override;
+    virtual hailo_status set_timeout(std::chrono::milliseconds timeout) override;
+    virtual hailo_status flush() override;
+
+    virtual hailo_status abort() override;
+    virtual hailo_status clear_abort() override;
+
+    virtual void notify_all() override;
+
+    virtual hailo_status register_interrupt_callback(const ProcessingCompleteCallback &callback) override;
+    virtual Expected<size_t> get_buffer_frames_size() const override;
+    virtual Expected<size_t> get_async_max_queue_size() const override;
+    virtual hailo_status wait_for_async_ready(size_t transfer_size, std::chrono::milliseconds timeout) override;
+    virtual hailo_status write_async(TransferRequest &&transfer_request) override;
+
+    virtual hailo_status write_impl(const MemoryView &buffer, std::function<bool()> should_cancel);
+    virtual hailo_status write_impl(const MemoryView &buffer) override;
+
+    virtual hailo_status activate_stream() override;
+    virtual hailo_status deactivate_stream() override;
+
+    // APIs to be implemented by subclass want to get sync over async
+    virtual Expected<std::unique_ptr<StreamBufferPool>> allocate_buffer_pool() = 0;
+    virtual size_t get_max_ongoing_transfers() const = 0;
+    virtual hailo_status write_async_impl(TransferRequest &&transfer_request) = 0;
+    virtual hailo_status activate_stream_impl() { return HAILO_SUCCESS; }
+    virtual hailo_status deactivate_stream_impl() { return HAILO_SUCCESS; }
+
+protected:
+    StreamBufferMode buffer_mode() const { return m_buffer_mode; }
+
+private:
+    hailo_status call_write_async_impl(TransferRequest &&transfer_request);
+
+    bool is_ready_for_transfer() const;
+    bool is_ready_for_dequeue() const;
+
+    static void ignore_interrupts_callback() {}
+
+    template<typename Pred>
+    hailo_status cv_wait_for(std::unique_lock<std::mutex> &lock, std::chrono::milliseconds timeout, Pred &&pred,
+        std::function<bool()> should_cancel = [](){ return false; })
+    {
+        hailo_status status = HAILO_SUCCESS;
+        const auto wait_done = m_has_ready_buffer.wait_for(lock, timeout,
+            [this, pred, should_cancel, &status] {
+                if (m_is_aborted || should_cancel()) {
+                    status = HAILO_STREAM_ABORTED_BY_USER;
+                    return true;
+                }
+
+                if (!m_is_stream_activated) {
+                    status = HAILO_STREAM_NOT_ACTIVATED;
+                    return true;
+                }
+
+                return pred();
+            }
+        );
+        if (!wait_done) {
+            LOGGER__ERROR("Got HAILO_TIMEOUT while waiting for input stream buffer {}", name());
+            return HAILO_TIMEOUT;
+        } else if (HAILO_SUCCESS != status) {
+            LOGGER__TRACE("Waiting for stream buffer exit with {}", status);
+            return status;
+        }
+        return status;
+    }
+
+    bool m_is_stream_activated;
+    bool m_is_aborted;
+    std::chrono::milliseconds m_timeout;
+
+    std::mutex m_stream_mutex;
+    StreamBufferMode m_buffer_mode;
+
+    std::unique_ptr<StreamBufferPool> m_buffer_pool;
+
+    std::atomic_size_t m_ongoing_transfers;
+
+    // Conditional variable that is use to check if we have some buffer in m_buffer_pool ready to be written to.
+    std::condition_variable m_has_ready_buffer;
+
+    ProcessingCompleteCallback m_interrupt_callback;
+};
+
+
+class AsyncOutputStreamBase : public OutputStreamBase {
+public:
+    AsyncOutputStreamBase(const LayerInfo &edge_layer, hailo_stream_interface_t stream_interface,
+        EventPtr core_op_activated_event, hailo_status &status);
+
+    virtual hailo_status set_buffer_mode(StreamBufferMode buffer_mode) override;
+    virtual std::chrono::milliseconds get_timeout() const override;
+    virtual hailo_status set_timeout(std::chrono::milliseconds timeout) override;
+    virtual hailo_status register_interrupt_callback(const ProcessingCompleteCallback &callback) override;
+
+    virtual hailo_status wait_for_async_ready(size_t transfer_size, std::chrono::milliseconds timeout) override;
+    virtual Expected<size_t> get_async_max_queue_size() const override;
+    virtual hailo_status read_async(TransferRequest &&transfer_request) override;
+
+    virtual hailo_status read_impl(MemoryView buffer) override;
+
+    virtual Expected<size_t> get_buffer_frames_size() const override;
+
+    virtual hailo_status abort() override;
+    virtual hailo_status clear_abort() override;
+
+    virtual hailo_status activate_stream() override;
+    virtual hailo_status deactivate_stream() override;
+
+    // APIs to be implemented by subclass want to get sync over async
+    virtual Expected<std::unique_ptr<StreamBufferPool>> allocate_buffer_pool() = 0;
+    virtual size_t get_max_ongoing_transfers() const = 0;
+    virtual hailo_status read_async_impl(TransferRequest &&transfer_request) = 0;
+    virtual hailo_status activate_stream_impl() { return HAILO_SUCCESS; }
+    virtual hailo_status deactivate_stream_impl() { return HAILO_SUCCESS; }
+
+protected:
+    StreamBufferMode buffer_mode() const { return m_buffer_mode; }
+
+private:
+    hailo_status call_read_async_impl(TransferRequest &&transfer_request);
+
+    bool is_ready_for_transfer() const;
+
+    // Prepare transfers ahead for future reads. This function will launch transfers until the channel queue is filled.
+    hailo_status prepare_all_transfers();
+
+    hailo_status dequeue_and_launch_transfer();
+
+    static void ignore_interrupts_callback() {}
+
+    template<typename Pred>
+    hailo_status cv_wait_for(std::unique_lock<std::mutex> &lock, std::chrono::milliseconds timeout, Pred &&pred)
+    {
+        hailo_status status = HAILO_SUCCESS;
+        const auto wait_done = m_has_ready_buffer.wait_for(lock, timeout,
+            [this, pred, &status] {
+                if (m_is_aborted) {
+                    status = HAILO_STREAM_ABORTED_BY_USER;
+                    return true;
+                }
+
+                if (!m_is_stream_activated) {
+                    status = HAILO_STREAM_NOT_ACTIVATED;
+                    return true;
+                }
+
+                return pred();
+            }
+        );
+        if (!wait_done) {
+            LOGGER__ERROR("Got HAILO_TIMEOUT while waiting for output stream buffer {}", name());
+            return HAILO_TIMEOUT;
+        } else if (HAILO_SUCCESS != status) {
+            LOGGER__TRACE("Waiting for stream buffer exit with {}", status);
+            return status;
+        }
+        return status;
+    }
+
+    bool m_is_stream_activated;
+    bool m_is_aborted;
+    std::chrono::milliseconds m_timeout;
+
+    StreamBufferMode m_buffer_mode;
+
+    std::mutex m_stream_mutex;
+
+    std::unique_ptr<StreamBufferPool> m_buffer_pool;
+
+    // Queue of buffers that was read from the hw and are pending to read by the user.
+    SafeQueue<TransferBuffer> m_pending_buffers;
+
+    std::atomic_size_t m_ongoing_transfers;
+
+    // Conditional variable that is use to check if we have some pending buffer ready to be read.
+    std::condition_variable m_has_ready_buffer;
+
+    ProcessingCompleteCallback m_interrupt_callback;
+};
+
+
+} /* namespace hailort */
+
+#endif /* _HAILO_ASYNC_STREAM_BASE_HPP_ */
diff --git a/hailort/libhailort/src/stream_common/nms_stream.cpp b/hailort/libhailort/src/stream_common/nms_stream.cpp
new file mode 100644 (file)
index 0000000..988e120
--- /dev/null
@@ -0,0 +1,458 @@
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file nms_stream.cpp
+ *
+ * Explanation of state machine and logic:
+ * This class supports the following 5 nms cases:
+ *  1) Hailo-8 bbox mode (non burst mode)
+ *  2) Hailo-15 bbox mode
+ *  3) Hailo-8 Burst mode
+ *  4) Hailo-15 Burst per class mode
+ *  5) Hailo15 Burst per frame mode
+ *
+ * Lets explain each mode and the state machine of each mode:
+ * 1)-2) Hailo-8 bbox mode / Hailo-15 bbox mode - both work the same - they read bbox bbox from the nms core until a delimeter comes
+ *       and expect to read the amount of delimeters as the same amount of number of classes (times num chunks if more than one chunk per frame).
+ *
+ * 3) Hailo8 Burst mode - Hailo 8 burst mode reads bursts in the size of burst-size and expects each burst to be made of x bboxes and
+ *    then a delimeter and padding until the end of the burst - essentially what the state machine does here is read until the first delimeter
+ *    and then expect padding until end of burts (in release mode we dont check that the rest of burst is padding and
+ *    just go onto the next burst but in debug we validate that rest of burst is padding). NOTE: in Hailo-8 delimeter value and
+ *    padding value are both 0xFFFFFFFFFFFFFFFF so essentially we read until first delimeter - and the every following delimeter
+ *    in burst is padding. This mode also supports interrupt per frame - assuming burst size received from SDK is larger than max bboxes + 1 (for delimeter)
+ *    we know there will be one burst per class and hence the output size will be num classes * burst size and we enable one interrupt per frame.
+ *
+ * 4) Hailo15 Burst per class mode - Hailo-15 Burst per class mode reads bursts in the size of burst size and expects the following order.
+ *    x bboxes , followed by a delimeter, followed by an image delimeter, followed by padding until the end of the burst. The bbboxes, delimeter
+ *    and image delimeter can all be in different bursts - so essentially the way the state machine works is the following: we read burst burst,
+ *    in each burst we iterate over the bboxes until we find a delimeter - once after that we know how many bboxes there were for that class,
+ *    and then we expect to see a following image delimeter after the delimeter, once we read the image delimeter we expect padding until the end of the
+ *    burst (which we ensure in debug but not in release). NOTE: if a burst ends on a delimeter we need to read the next burst to get the image delimeter
+ *    even in the case where the amount of delimeters we read is equal to the amount of classes - otherwise there is data still in the core
+ *    that was not emptied and will be read as part of the next frame. This mode also supports interrupt per frame - assuming burst size received from SDK
+ *    is larger than max bboxes + 2 (for image delimeter and delimeter) we know there will be one burst per class and hence the output size will be 
+ *    num classes * burst size and we enable one interrupt per frame.
+ *
+ * 5) Hailo15 Burst per frame mode - Hailo-15 Burst per frame mode reads bursts in the size of burst size and expects the following order.
+ *    x bboxes , followed by a delimeter, for all the classes until the last class where the last delimeter should be followed by an image delimeter
+ *    and padding until the end of the burst. The state machine works in the following way - we read burst burst, and for each time we reach a delimeter
+ *    we save the amount of bboxes that were read for that class and keep reading the burst. NOTE: this is the only mode where there can be multiple
+ *    delimeters per burst. Once we read the last delimeter (which we know from number classes) - we ensure there is a following image delimeter (which again
+ *    can be in the following burst) and then assume the rest of the burst is padding (and in debug we verify that). NOTE: currently this mode is not
+ *    supported in the sdk.
+ *
+ **/
+
+#include "nms_stream.hpp"
+
+#include "hef/layer_info.hpp"
+#include "common/os_utils.hpp"
+#include "stream_common/queued_stream_buffer_pool.hpp"
+
+namespace hailort
+{
+
+static void finish_reading_burst_update_state(NMSBurstState *burst_state, bool *can_stop_reading_burst, size_t *burst_index)
+{
+    *burst_state = NMSBurstState::NMS_BURST_STATE_WAITING_FOR_DELIMETER;
+    *burst_index = (*burst_index + 1);
+    *can_stop_reading_burst = true;
+}
+
+// Function that implements the state machine of the 3 different nms burst modes based on the value of the current bbox and the current state.
+hailo_status NMSStreamReader::advance_state_machine(NMSBurstState *burst_state, const uint64_t current_bbox,
+    const hailo_nms_burst_type_t burst_type, const uint32_t num_classes, size_t *num_delimeters_received,
+    bool *can_stop_reading_burst, const size_t burst_offset, const size_t burst_size, size_t *burst_index)
+{
+    switch(current_bbox) {
+        // This is also case for Hailo8 padding - seeing as they are same value
+        case NMS_DELIMITER:
+        {
+            // If we are in hailo8 per class mode - if we are in state waiting for delimeter - we received delimeter
+            // otherwise we must be in state waiting for padding - in which case we received padding.
+            if (HAILO_BURST_TYPE_H8_PER_CLASS == burst_type) {
+                CHECK_IN_DEBUG((NMSBurstState::NMS_BURST_STATE_WAITING_FOR_DELIMETER == (*burst_state)) ||
+                    (NMSBurstState::NMS_BURST_STATE_WAITING_FOR_PADDING == (*burst_state)), HAILO_NMS_BURST_INVALID_DATA,
+                    "Invalid state, H8 NMS burst cannot receive delimeter while in state {}", (*burst_state));
+                // To differentiate from H8 padding - where we should not increment amount of delimeters found
+                if ((*burst_state) == NMSBurstState::NMS_BURST_STATE_WAITING_FOR_DELIMETER) {
+                    (*num_delimeters_received)++;
+                }
+#ifdef NDEBUG
+                // In hailo8 burst mode - if is in state waiting for delimeter and got delimeter - rest will be padding and can skip
+                if ((*burst_state) == NMSBurstState::NMS_BURST_STATE_WAITING_FOR_DELIMETER) {
+                    finish_reading_burst_update_state(burst_state, can_stop_reading_burst, burst_index);
+                    break;
+                }
+#endif
+                // In hailo8 mode after delimeter we expect padding until end of burst - seeing as h8 padding is same value
+                // Whether was in state wait for delimeter or state wait for padding - will always go to wait for padding until end of burst
+                *burst_state = NMSBurstState::NMS_BURST_STATE_WAITING_FOR_PADDING;
+                if (burst_offset == (burst_size - sizeof(current_bbox))) {
+                    finish_reading_burst_update_state(burst_state, can_stop_reading_burst, burst_index);
+                }
+                break;
+
+            } else if (HAILO_BURST_TYPE_H15_PER_CLASS == burst_type) {
+                CHECK_IN_DEBUG(NMSBurstState::NMS_BURST_STATE_WAITING_FOR_DELIMETER == (*burst_state), HAILO_NMS_BURST_INVALID_DATA,
+                    "Invalid state, H15 Per class NMS burst cannot receive delimeter while in state {}", (*burst_state));
+                (*num_delimeters_received)++;
+                *burst_state = NMSBurstState::NMS_BURST_STATE_WAITING_FOR_IMAGE_DELIMETER;
+            } else {
+                CHECK_IN_DEBUG(NMSBurstState::NMS_BURST_STATE_WAITING_FOR_DELIMETER == (*burst_state), HAILO_NMS_BURST_INVALID_DATA,
+                    "Invalid state, H15 Per Frame NMS burst cannot receive delimeter while in state {}", (*burst_state));
+                // in hailo15 per frame - if number of delimeter is same as num classes - we expect image delimeter next 
+                // otherwise expect another delimeter
+                (*num_delimeters_received)++;
+                if (num_classes == (*num_delimeters_received)) {
+                    *burst_state = NMSBurstState::NMS_BURST_STATE_WAITING_FOR_IMAGE_DELIMETER;
+                }
+            }
+            break;
+        }
+
+        case NMS_IMAGE_DELIMITER:
+        {
+            CHECK_IN_DEBUG(HAILO_BURST_TYPE_H8_PER_CLASS != burst_type, HAILO_NMS_BURST_INVALID_DATA,
+                "Invalid state, H8 NMS burst cannot receive image delimeter");
+
+            CHECK_IN_DEBUG(NMSBurstState::NMS_BURST_STATE_WAITING_FOR_IMAGE_DELIMETER == (*burst_state), HAILO_NMS_BURST_INVALID_DATA,
+                "Invalid state, H15 NMS burst cannot receive image delimeter in state {}", (*burst_state));
+
+            // in both hailo15 per class and per frame - when receiving image delimeter we move to expecting padding
+            *burst_state = NMSBurstState::NMS_BURST_STATE_WAITING_FOR_PADDING;
+
+#ifdef NDEBUG
+            finish_reading_burst_update_state(burst_state, can_stop_reading_burst, burst_index);
+#else
+            // Will only get to here in debug mode - if burst size is exactly max classes + 2 (1 for NMS_DELIMITER
+            // and another 1 for NMS_IMAGE_DELIMITER - (this is default for interrupt per frame)) - burst will have
+            // no padding at end and can finish reading burst now if is last bbox in burst
+            if (burst_offset == (burst_size - sizeof(current_bbox))) {
+                finish_reading_burst_update_state(burst_state, can_stop_reading_burst, burst_index);
+            }
+#endif // NDEBUG
+            break;
+        }
+
+        case NMS_H15_PADDING:
+        {
+            if ((HAILO_BURST_TYPE_H15_PER_CLASS == burst_type) || (HAILO_BURST_TYPE_H15_PER_FRAME == burst_type)) {
+                CHECK_IN_DEBUG(NMSBurstState::NMS_BURST_STATE_WAITING_FOR_PADDING == (*burst_state), HAILO_NMS_BURST_INVALID_DATA,
+                    "Invalid state, H15 NMS burst cannot receive padding in state {}", (*burst_state));
+            }
+            // In case of padding next state is wait for padding unless it is last padding of burst - then next state will be
+            // Wait for delimeter - will only get to this stage in debug - in release once image delimeter is read we ignore rest of
+            // burst seeing as it must be padding
+            if (burst_offset == (burst_size - sizeof(current_bbox))) {
+                finish_reading_burst_update_state(burst_state, can_stop_reading_burst, burst_index);
+            }
+            break;
+        }
+    }
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status NMSStreamReader::read_nms_bbox_mode(OutputStreamBase &stream, void *buffer, size_t offset)
+{
+    const uint32_t num_classes = stream.get_info().nms_info.number_of_classes;
+    const uint32_t chunks_per_frame = stream.get_info().nms_info.chunks_per_frame;
+    const size_t bbox_size = stream.get_info().nms_info.bbox_size;
+    const auto burst_type = stream.get_layer_info().nms_info.burst_type;
+
+    for (size_t delimeters_found = 0; delimeters_found < (num_classes * chunks_per_frame); delimeters_found++) {
+        nms_bbox_counter_t class_bboxes_count = 0;
+        nms_bbox_counter_t* class_bboxes_count_ptr = (nms_bbox_counter_t*)(reinterpret_cast<uint8_t*>(buffer) + offset);
+        offset += sizeof(*class_bboxes_count_ptr);
+
+        while (true) {
+            MemoryView buffer_view(static_cast<uint8_t*>(buffer) + offset, bbox_size);
+            auto status = stream.read_impl(buffer_view);
+            if ((HAILO_STREAM_ABORTED_BY_USER == status) ||
+                ((HAILO_STREAM_NOT_ACTIVATED == status))) {
+                return status;
+            }
+            CHECK_SUCCESS(status, "Failed reading nms bbox");
+            const uint64_t current_bbox = *(uint64_t*)((uint8_t*)buffer + offset);
+
+            if (NMS_IMAGE_DELIMITER == current_bbox) {
+                continue;
+            }
+
+            if (NMS_DELIMITER == current_bbox) {
+                break;
+            }
+
+            class_bboxes_count++;
+            CHECK_IN_DEBUG(class_bboxes_count <= stream.get_info().nms_info.max_bboxes_per_class, HAILO_INTERNAL_FAILURE,
+                "Data read from the device for the current class was size {}, max size is {}", class_bboxes_count,
+                stream.get_info().nms_info.max_bboxes_per_class);
+            offset += bbox_size;
+        }
+
+        *class_bboxes_count_ptr = class_bboxes_count;
+    }
+
+    if (HAILO_BURST_TYPE_H15_BBOX == burst_type) {
+        // At the end of frame on HAILO_BURST_TYPE_H15_BBOX, we need to read the image delimeter bbox (last bbox after
+        // last class delimeter)
+        uint64_t last_bbox = 0;
+        auto status = stream.read_impl(MemoryView(&last_bbox, sizeof(last_bbox)));
+        if ((HAILO_STREAM_ABORTED_BY_USER == status) ||
+            ((HAILO_STREAM_NOT_ACTIVATED == status))) {
+            return status;
+        }
+        CHECK_SUCCESS(status, "Failed reading last nms bbox");
+        CHECK(NMS_IMAGE_DELIMITER == last_bbox, HAILO_INTERNAL_FAILURE,
+            "Last bbox read is expected to be image delimiter {:x}, instead got {:x}", NMS_IMAGE_DELIMITER, last_bbox);
+    }
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status NMSStreamReader::read_nms_burst_mode(OutputStreamBase &stream, void *buffer, size_t offset, size_t buffer_size)
+{
+    NMSBurstState burst_state = NMSBurstState::NMS_BURST_STATE_WAITING_FOR_DELIMETER;
+    const uint32_t bbox_size = stream.get_info().nms_info.bbox_size;
+    const size_t burst_size = stream.get_layer_info().nms_info.burst_size * bbox_size;
+    const hailo_nms_burst_type_t burst_type = stream.get_layer_info().nms_info.burst_type;
+    const auto num_expected_delimeters = stream.get_info().nms_info.chunks_per_frame * stream.get_info().nms_info.number_of_classes;
+    // Transfer size if affected from if working in interrupt per burst or interrupt per frame
+    const size_t transfer_size = LayerInfoUtils::get_nms_layer_transfer_size(stream.get_layer_info());
+    const bool is_interrupt_per_frame = (transfer_size > burst_size);
+
+    CHECK(bbox_size == sizeof(uint64_t), HAILO_INTERNAL_FAILURE,
+        "Invalid Bbox size, must be 8 bytes received {}", bbox_size);
+
+    CHECK(transfer_size <= buffer_size, HAILO_INTERNAL_FAILURE, "Invalid transfer size {}, Cannot be larger than buffer {}",
+        transfer_size, buffer_size);
+
+    // Start writing bboxes at offset sizeof(nms_bbox_counter_t) - because the first sizeof(nms_bbox_counter_t) will be
+    // used to write amount of bboxes found for class 0 etc...
+    nms_bbox_counter_t class_bboxes_count = 0;
+    nms_bbox_counter_t* class_bboxes_count_ptr = (nms_bbox_counter_t*)(reinterpret_cast<uint8_t*>(buffer) + offset);
+    offset += sizeof(nms_bbox_counter_t);
+
+    // Counter of number of delimeters found in frame
+    size_t delimeters_found = 0;
+    size_t burst_index = 0;
+    MemoryView current_burst;
+    while ((delimeters_found < num_expected_delimeters) || (NMSBurstState::NMS_BURST_STATE_WAITING_FOR_IMAGE_DELIMETER == burst_state)) {
+        // In interrupt per frame we read whole frame once (in first iteration) - then don't read in following loop iterations
+        // delimeters_found will always be 0 in first iteration - and in interrupt_per_frame will always be larger in following iterations
+        if (!is_interrupt_per_frame || (0 == delimeters_found)) {
+            assert(offset + transfer_size <= buffer_size);
+            current_burst = MemoryView(static_cast<uint8_t*>(buffer) + offset, transfer_size);
+            auto status = stream.read_impl(current_burst);
+            if ((HAILO_STREAM_ABORTED_BY_USER == status) || ((HAILO_STREAM_NOT_ACTIVATED == status))) {
+                return status;
+            }
+            CHECK_SUCCESS(status, "Failed reading nms burst");
+        }
+
+        // Flag that marks if we can stop reading burst and continue to next burst
+        bool can_stop_reading_burst = false;
+        // Iterate through burst and copy relevant data to user buffer
+        for (size_t burst_offset = 0; burst_offset < burst_size; burst_offset += bbox_size) {
+            uint64_t current_bbox = 0;
+            if (is_interrupt_per_frame) {
+                assert((burst_index * burst_size) + burst_offset < transfer_size);
+                memcpy(&current_bbox, current_burst.data() + (burst_index * burst_size) + burst_offset,
+                    sizeof(current_bbox));
+            } else {
+                memcpy(&current_bbox, current_burst.data() + burst_offset, sizeof(current_bbox));
+            }
+
+            // If read delimeter - fill in information about num of bboxes found for the class (we also make sure that
+            //  It is in state NMS_BURST_STATE_WAITING_FOR_DELIMETER because in hailo8 padding is same value)
+            if ((NMS_DELIMITER == current_bbox) && (NMSBurstState::NMS_BURST_STATE_WAITING_FOR_DELIMETER == burst_state)) {
+                *class_bboxes_count_ptr = class_bboxes_count;
+                class_bboxes_count_ptr = (nms_bbox_counter_t*)(reinterpret_cast<uint8_t*>(buffer) + offset);
+                class_bboxes_count = 0;
+                offset += sizeof(nms_bbox_counter_t);
+            }
+
+            // Received delimeter can stop reading burst because rest of burst is image delimeter then padding
+            if ((NMS_DELIMITER == current_bbox) || (NMS_IMAGE_DELIMITER == current_bbox) || (NMS_H15_PADDING == current_bbox)) {
+                auto status = advance_state_machine(&burst_state, current_bbox, burst_type, stream.get_info().nms_info.number_of_classes,
+                    &delimeters_found, &can_stop_reading_burst, burst_offset, burst_size, &burst_index);
+                CHECK_SUCCESS(status);
+
+                if (can_stop_reading_burst) {
+                    break;
+                }
+                continue;
+            }
+
+            class_bboxes_count++;
+            CHECK_IN_DEBUG(class_bboxes_count <= stream.get_info().nms_info.max_bboxes_per_class, HAILO_INTERNAL_FAILURE,
+                "Data read from the device for the current class was size {}, max size is {}", class_bboxes_count,
+                stream.get_info().nms_info.max_bboxes_per_class);
+
+            // Copy bbox to correct location in buffer
+            memcpy((static_cast<uint8_t*>(buffer) + offset), &current_bbox, sizeof(current_bbox));
+            offset += bbox_size;
+        }
+    }
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status NMSStreamReader::read_nms(OutputStreamBase &stream, void *buffer, size_t offset, size_t size)
+{
+    hailo_status status = HAILO_UNINITIALIZED;
+    const auto burst_type = stream.get_layer_info().nms_info.burst_type;
+    const bool is_burst_mode = (HAILO_BURST_TYPE_H8_BBOX != burst_type) && (HAILO_BURST_TYPE_H15_BBOX != burst_type);
+    if (is_burst_mode) {
+        status = NMSStreamReader::read_nms_burst_mode(stream, buffer, offset, size);
+    } else {
+        status = NMSStreamReader::read_nms_bbox_mode(stream, buffer, offset);
+    }
+    if ((HAILO_STREAM_ABORTED_BY_USER == status) || (HAILO_STREAM_NOT_ACTIVATED == status)) {
+        return status;
+    }
+    CHECK_SUCCESS(status, "Failed reading nms");
+
+    return HAILO_SUCCESS;
+}
+
+Expected<std::shared_ptr<NmsOutputStream>> NmsOutputStream::create(std::shared_ptr<OutputStreamBase> base_stream,
+    const LayerInfo &edge_layer, size_t max_queue_size, EventPtr core_op_activated_event)
+{
+    auto status = HAILO_UNINITIALIZED;
+    auto nms_stream = make_shared_nothrow<NmsOutputStream>(base_stream, edge_layer, max_queue_size,
+        std::move(core_op_activated_event), status);
+    CHECK_NOT_NULL_AS_EXPECTED(nms_stream, HAILO_OUT_OF_HOST_MEMORY);
+    CHECK_SUCCESS_AS_EXPECTED(status);
+
+    // On nms stream, we always want that the underline stream will own the buffers the read operations.
+    status = base_stream->set_buffer_mode(StreamBufferMode::OWNING);
+    CHECK_SUCCESS_AS_EXPECTED(status);
+
+    return nms_stream;
+}
+
+hailo_stream_interface_t NmsOutputStream::get_interface() const
+{
+    return m_base_stream->get_interface();
+}
+
+Expected<std::unique_ptr<StreamBufferPool>> NmsOutputStream::allocate_buffer_pool()
+{
+    const size_t queue_size = m_reader_thread.get_max_ongoing_transfers();
+    const BufferStorageParams heap_params{};
+    auto queued_pool = QueuedStreamBufferPool::create(queue_size, get_frame_size(), heap_params);
+    CHECK_EXPECTED(queued_pool);
+
+    return std::unique_ptr<StreamBufferPool>(queued_pool.release());
+}
+
+size_t NmsOutputStream::get_max_ongoing_transfers() const
+{
+    return m_reader_thread.get_max_ongoing_transfers();
+}
+
+hailo_status NmsOutputStream::read_async_impl(TransferRequest &&transfer_request)
+{
+    return m_reader_thread.launch_transfer(std::move(transfer_request));
+}
+
+hailo_status NmsOutputStream::activate_stream_impl()
+{
+    return m_base_stream->activate_stream();
+}
+
+hailo_status NmsOutputStream::deactivate_stream_impl()
+{
+    return m_base_stream->deactivate_stream();
+}
+
+NmsReaderThread::NmsReaderThread(std::shared_ptr<OutputStreamBase> base_stream, size_t max_queue_size) :
+    m_base_stream(base_stream),
+    m_queue_max_size(max_queue_size),
+    m_should_quit(false),
+    m_worker_thread([this] { process_transfer_requests(); })
+{}
+
+NmsReaderThread::~NmsReaderThread()
+{
+    // Deactivate base stream to make sure nms worker thread will exit.
+    auto status = m_base_stream->deactivate_stream();
+    if (HAILO_SUCCESS != status) {
+        LOGGER__ERROR("Failed deactivate base stream, status {}", status);
+        // continue in the destruction.
+    }
+
+    if (m_worker_thread.joinable()) {
+        signal_thread_quit();
+        m_worker_thread.join();
+    }
+}
+
+hailo_status NmsReaderThread::launch_transfer(TransferRequest &&transfer_request)
+{
+    CHECK(0 == transfer_request.buffer.offset(), HAILO_INVALID_OPERATION,
+        "NMS stream doesn't support buffer with offset");
+
+    {
+        std::lock_guard<std::mutex> lock(m_queue_mutex);
+        if (m_queue.size() >= m_queue_max_size) {
+            return HAILO_QUEUE_IS_FULL;
+        }
+
+        m_queue.emplace(std::move(transfer_request));
+    }
+    m_queue_cond.notify_one();
+    return HAILO_SUCCESS;
+}
+
+size_t NmsReaderThread::get_max_ongoing_transfers() const
+{
+    return m_queue_max_size;
+}
+
+void NmsReaderThread::signal_thread_quit()
+{
+    {
+        std::unique_lock<std::mutex> lock(m_queue_mutex);
+        m_should_quit = true;
+    }
+    m_queue_cond.notify_all();
+}
+
+void NmsReaderThread::process_transfer_requests()
+{
+    OsUtils::set_current_thread_name("ASYNC_NMS");
+
+    while (true) {
+        TransferRequest transfer_request{};
+        {
+            std::unique_lock<std::mutex> lock(m_queue_mutex);
+            m_queue_cond.wait(lock, [&]{ return m_should_quit || !m_queue.empty(); });
+            if (m_should_quit) {
+                break;
+            }
+
+            transfer_request = m_queue.front();
+            m_queue.pop();
+        }
+
+        assert(0 == transfer_request.buffer.offset());
+        auto buffer = transfer_request.buffer.base_buffer();
+        auto status = NMSStreamReader::read_nms(*m_base_stream, buffer->data(), 0, buffer->size());
+
+        if ((HAILO_STREAM_NOT_ACTIVATED == status) || (HAILO_STREAM_ABORTED_BY_USER == status)) {
+            // On both deactivation/abort, we want to send HAILO_STREAM_ABORTED_BY_USER since it is part of the callback
+            // API.
+            transfer_request.callback(HAILO_STREAM_ABORTED_BY_USER);
+        } else {
+            transfer_request.callback(status);
+        }
+    }
+}
+
+} /* namespace hailort */
\ No newline at end of file
diff --git a/hailort/libhailort/src/stream_common/nms_stream.hpp b/hailort/libhailort/src/stream_common/nms_stream.hpp
new file mode 100644 (file)
index 0000000..b6f44ef
--- /dev/null
@@ -0,0 +1,106 @@
+/**
+ * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file nms_stream.hpp
+ * @brief Wraps some stream object that reads bbox/bursts into a stream object that reads nms frames.
+ **/
+
+#ifndef _NMS_STREAM_HPP_
+#define _NMS_STREAM_HPP_
+
+
+#include "common/utils.hpp"
+#include "hailo/hailort_common.hpp"
+
+#include "stream_common/stream_internal.hpp"
+#include "stream_common/async_stream_base.hpp"
+
+namespace hailort
+{
+
+static const uint64_t NMS_DELIMITER = 0xFFFFFFFFFFFFFFFF;
+static const uint64_t NMS_IMAGE_DELIMITER = 0xFFFFFFFFFFFFFFFE;
+static const uint64_t NMS_H15_PADDING = 0xFFFFFFFFFFFFFFFD;
+
+enum class NMSBurstState {
+    NMS_BURST_STATE_WAITING_FOR_DELIMETER = 0,
+    NMS_BURST_STATE_WAITING_FOR_IMAGE_DELIMETER = 1,
+    NMS_BURST_STATE_WAITING_FOR_PADDING = 2,
+};
+
+// static class that helps receives and reads the nms ouput stream according to the differnet burst mode, type and size.
+// For explanation on the different burst modes and types and state machine and logic of the class please check out the cpp.
+class NMSStreamReader {
+public:
+    static hailo_status read_nms(OutputStreamBase &stream, void *buffer, size_t offset, size_t size);
+private:
+    static hailo_status read_nms_bbox_mode(OutputStreamBase &stream, void *buffer, size_t offset);
+    static hailo_status read_nms_burst_mode(OutputStreamBase &stream, void *buffer, size_t offset, size_t buffer_size);
+    static hailo_status advance_state_machine(NMSBurstState *burst_state, const uint64_t current_bbox,
+        const hailo_nms_burst_type_t burst_type, const uint32_t num_classes, size_t *num_delimeters_received,
+        bool *can_stop_reading_burst, const size_t burst_offset, const size_t burst_size, size_t *burst_index);
+};
+
+class NmsReaderThread final {
+public:
+
+    NmsReaderThread(std::shared_ptr<OutputStreamBase> base_stream, size_t max_queue_size);
+    ~NmsReaderThread();
+
+    NmsReaderThread(const NmsReaderThread &) = delete;
+    NmsReaderThread &operator=(const NmsReaderThread &) = delete;
+
+    hailo_status launch_transfer(TransferRequest &&transfer_request);
+
+    size_t get_max_ongoing_transfers() const;
+
+private:
+
+    void signal_thread_quit();
+    void process_transfer_requests();
+
+    std::shared_ptr<OutputStreamBase> m_base_stream;
+    const size_t m_queue_max_size;
+    std::mutex m_queue_mutex;
+    std::condition_variable m_queue_cond;
+    // TODO: use SpscQueue (HRT-10554)
+    std::queue<TransferRequest> m_queue;
+    // m_should_quit is used to quit the thread (called on destruction)
+    bool m_should_quit;
+    std::thread m_worker_thread;
+};
+
+// NMS requires multiple reads from the device + parsing the output. Hence, a background thread is needed.
+// This class opens a worker thread that processes nms transfers, signalling the user's callback upon completion.
+// read_async adds transfer requests to a producer-consumer queue
+class NmsOutputStream : public AsyncOutputStreamBase {
+public:
+    static Expected<std::shared_ptr<NmsOutputStream>> create(std::shared_ptr<OutputStreamBase> base_stream,
+        const LayerInfo &edge_layer, size_t max_queue_size, EventPtr core_op_activated_event);
+
+    virtual hailo_stream_interface_t get_interface() const override;
+
+    NmsOutputStream(std::shared_ptr<OutputStreamBase> base_stream, const LayerInfo &edge_layer, size_t max_queue_size,
+        EventPtr core_op_activated_event, hailo_status &status) :
+            AsyncOutputStreamBase(edge_layer, base_stream->get_interface(), std::move(core_op_activated_event), status),
+            m_base_stream(base_stream),
+            m_reader_thread(base_stream, max_queue_size)
+    {}
+
+protected:
+    virtual Expected<std::unique_ptr<StreamBufferPool>> allocate_buffer_pool() override;
+    virtual size_t get_max_ongoing_transfers() const override;
+    virtual hailo_status read_async_impl(TransferRequest &&transfer_request) override;
+    virtual hailo_status activate_stream_impl() override;
+    virtual hailo_status deactivate_stream_impl() override;
+
+    std::shared_ptr<OutputStreamBase> m_base_stream;
+
+    NmsReaderThread m_reader_thread;
+};
+
+} /* namespace hailort */
+
+#endif /* _NMS_STREAM_HPP_ */
\ No newline at end of file
diff --git a/hailort/libhailort/src/stream_common/nms_stream_reader.cpp b/hailort/libhailort/src/stream_common/nms_stream_reader.cpp
deleted file mode 100644 (file)
index 618be44..0000000
+++ /dev/null
@@ -1,300 +0,0 @@
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file nms_stream_reader.cpp
- * @brief static class that helps receive and read the nms ouput stream according to the different burst mode, type and size.
- * 
- * Explanation of state machine and logic:
- * This class supports the following 5 nms cases:
- *  1) Hailo-8 bbox mode (non burst mode)
- *  2) Hailo-15 bbox mode
- *  3) Hailo-8 Burst mode 
- *  4) Hailo-15 Burst per class mode
- *  5) Hailo15 Burst per frame mode
- * 
- * Lets explain each mode and the state machine of each mode:
- * 1)-2) Hailo-8 bbox mode / Hailo-15 bbox mode - both work the same - they read bbox bbox from the nms core until a delimeter comes
- *       and expect to read the amount of delimeters as the same amount of number of classes (times num chunks if more than one chunk per frame).
- * 
- * 3) Hailo8 Burst mode - Hailo 8 burst mode reads bursts in the size of burst-size and expects each burst to be made of x bboxes and
- *    then a delimeter and padding until the end of the burst - essentially what the state machine does here is read until the first delimeter
- *    and then expect padding until end of burts (in release mode we dont check that the rest of burst is padding and
- *    just go onto the next burst but in debug we validate that rest of burst is padding). NOTE: in Hailo-8 delimeter value and
- *    padding value are both 0xFFFFFFFFFFFFFFFF so essentially we read until first delimeter - and the every following delimeter
- *    in burst is padding. This mode also supports interrupt per frame - assuming burst size received from SDK is larger than max bboxes + 1 (for delimeter)
- *    we know there will be one burst per class and hence the output size will be num classes * burst size and we enable one interrupt per frame.
- * 
- * 4) Hailo15 Burst per class mode - Hailo-15 Burst per class mode reads bursts in the size of burst size and expects the following order.
- *    x bboxes , followed by a delimeter, followed by an image delimeter, followed by padding until the end of the burst. The bbboxes, delimeter
- *    and image delimeter can all be in different bursts - so essentially the way the state machine works is the following: we read burst burst,
- *    in each burst we iterate over the bboxes until we find a delimeter - once after that we know how many bboxes there were for that class,
- *    and then we expect to see a following image delimeter after the delimeter, once we read the image delimeter we expect padding until the end of the
- *    burst (which we ensure in debug but not in release). NOTE: if a burst ends on a delimeter we need to read the next burst to get the image delimeter
- *    even in the case where the amount of delimeters we read is equal to the amount of classes - otherwise there is data still in the core
- *    that was not emptied and will be read as part of the next frame. This mode also supports interrupt per frame - assuming burst size received from SDK
- *    is larger than max bboxes + 2 (for image delimeter and delimeter) we know there will be one burst per class and hence the output size will be 
- *    num classes * burst size and we enable one interrupt per frame.
- * 
- * 5) Hailo15 Burst per frame mode - Hailo-15 Burst per frame mode reads bursts in the size of burst size and expects the following order.
- *    x bboxes , followed by a delimeter, for all the classes until the last class where the last delimeter should be followed by an image delimeter
- *    and padding until the end of the burst. The state machine works in the following way - we read burst burst, and for each time we reach a delimeter
- *    we save the amount of bboxes that were read for that class and keep reading the burst. NOTE: this is the only mode where there can be multiple
- *    delimeters per burst. Once we read the last delimeter (which we know from number classes) - we ensure there is a following image delimeter (which again
- *    can be in the following burst) and then assume the rest of the burst is padding (and in debug we verify that). NOTE: currently this mode is not
- *    supported in the sdk.
- * 
- **/
-
-#include "hailo/hailort.h"
-#include "hailo/expected.hpp"
-#include "stream_common/nms_stream_reader.hpp"
-#include "src/hef/layer_info.hpp"
-
-namespace hailort
-{
-
-static void finish_reading_burst_update_state(NMSBurstState *burst_state, bool *can_stop_reading_burst, size_t *burst_index)
-{
-    *burst_state = NMSBurstState::NMS_BURST_STATE_WAITING_FOR_DELIMETER;
-    *burst_index = (*burst_index + 1);
-    *can_stop_reading_burst = true;
-}
-
-// Function that implements the state machine of the 3 different nms burst modes based on the value of the current bbox and the current state.
-hailo_status NMSStreamReader::advance_state_machine(NMSBurstState *burst_state, const uint64_t current_bbox,
-    const hailo_nms_burst_type_t burst_type, const uint32_t num_classes, size_t *num_delimeters_received,
-    bool *can_stop_reading_burst, const size_t burst_offset, const size_t burst_size, size_t *burst_index)
-{
-    switch(current_bbox) {
-        // This is also case for Hailo8 padding - seeing as they are same value
-        case NMS_DELIMITER:
-        {
-            // If we are in hailo8 per class mode - if we are in state waiting for delimeter - we received delimeter
-            // otherwise we must be in state waiting for padding - in which case we received padding.
-            if (HAILO_BURST_TYPE_H8_PER_CLASS == burst_type) {
-                CHECK_IN_DEBUG((NMSBurstState::NMS_BURST_STATE_WAITING_FOR_DELIMETER == (*burst_state)) ||
-                    (NMSBurstState::NMS_BURST_STATE_WAITING_FOR_PADDING == (*burst_state)), HAILO_NMS_BURST_INVALID_DATA,
-                    "Invalid state, H8 NMS burst cannot receive delimeter while in state {}", (*burst_state));
-                // To differentiate from H8 padding - where we should not increment amount of delimeters found
-                if ((*burst_state) == NMSBurstState::NMS_BURST_STATE_WAITING_FOR_DELIMETER) {
-                    (*num_delimeters_received)++;
-                }
-#ifdef NDEBUG
-                // In hailo8 burst mode - if is in state waiting for delimeter and got delimeter - rest will be padding and can skip
-                if ((*burst_state) == NMSBurstState::NMS_BURST_STATE_WAITING_FOR_DELIMETER) {
-                    finish_reading_burst_update_state(burst_state, can_stop_reading_burst, burst_index);
-                    break;
-                }
-#endif
-                // In hailo8 mode after delimeter we expect padding until end of burst - seeing as h8 padding is same value
-                // Weather was in state wait for delimeter or state wait for padding - will always go to wait for padding until end of burst
-                *burst_state = NMSBurstState::NMS_BURST_STATE_WAITING_FOR_PADDING;
-                if (burst_offset == (burst_size - sizeof(current_bbox))) {
-                    finish_reading_burst_update_state(burst_state, can_stop_reading_burst, burst_index);
-                }
-                break;
-
-            } else if (HAILO_BURST_TYPE_H15_PER_CLASS == burst_type) {
-                CHECK_IN_DEBUG(NMSBurstState::NMS_BURST_STATE_WAITING_FOR_DELIMETER == (*burst_state), HAILO_NMS_BURST_INVALID_DATA,
-                    "Invalid state, H15 Per class NMS burst cannot receive delimeter while in state {}", (*burst_state));
-                (*num_delimeters_received)++;
-                *burst_state = NMSBurstState::NMS_BURST_STATE_WAITING_FOR_IMAGE_DELIMETER;
-            } else {
-                CHECK_IN_DEBUG(NMSBurstState::NMS_BURST_STATE_WAITING_FOR_DELIMETER == (*burst_state), HAILO_NMS_BURST_INVALID_DATA,
-                    "Invalid state, H15 Per Frame NMS burst cannot receive delimeter while in state {}", (*burst_state));
-                // in hailo15 per frame - if number of delimeter is same as num classes - we expect image delimeter next 
-                // otherwise expect another delimeter
-                (*num_delimeters_received)++;
-                if (num_classes == (*num_delimeters_received)) {
-                    *burst_state = NMSBurstState::NMS_BURST_STATE_WAITING_FOR_IMAGE_DELIMETER;
-                }
-            }
-            break;
-        }
-
-        case NMS_IMAGE_DELIMITER:
-        {
-            CHECK_IN_DEBUG(HAILO_BURST_TYPE_H8_PER_CLASS != burst_type, HAILO_NMS_BURST_INVALID_DATA,
-                "Invalid state, H8 NMS burst cannot receive image delimeter");
-
-            CHECK_IN_DEBUG(NMSBurstState::NMS_BURST_STATE_WAITING_FOR_IMAGE_DELIMETER == (*burst_state), HAILO_NMS_BURST_INVALID_DATA,
-                "Invalid state, H15 NMS burst cannot receive image delimeter in state {}", (*burst_state));
-            
-            // in both hailo15 per class and per frame - when receiving image delimeter we move to expecting padding
-            *burst_state = NMSBurstState::NMS_BURST_STATE_WAITING_FOR_PADDING;
-    
-#ifdef NDEBUG
-            finish_reading_burst_update_state(burst_state, can_stop_reading_burst, burst_index);
-#endif // NDEBUG
-            break;
-        }
-
-        case NMS_H15_PADDING:
-        {
-            if ((HAILO_BURST_TYPE_H15_PER_CLASS == burst_type) || (HAILO_BURST_TYPE_H15_PER_FRAME == burst_type)) {
-                CHECK_IN_DEBUG(NMSBurstState::NMS_BURST_STATE_WAITING_FOR_PADDING == (*burst_state), HAILO_NMS_BURST_INVALID_DATA,
-                    "Invalid state, H15 NMS burst cannot receive padding in state {}", (*burst_state));
-            }
-            // In case of padding next state is wait for padding unless it is last padding of burst - then next state will be
-            // Wait for delimeter - will only get to this stage in debug - in release once image delimeter is read we ignore rest of
-            // burst seeing as it must be padding
-            if (burst_offset == (burst_size - sizeof(current_bbox))) {
-                finish_reading_burst_update_state(burst_state, can_stop_reading_burst, burst_index);
-            }
-            break;
-        }
-    }
-
-    return HAILO_SUCCESS;
-}
-
-hailo_status NMSStreamReader::read_nms_bbox_mode(OutputStream &stream, void *buffer, size_t offset)
-{
-    const uint32_t num_classes = stream.get_info().nms_info.number_of_classes;
-    const uint32_t chunks_per_frame = stream.get_info().nms_info.chunks_per_frame;
-    const size_t bbox_size = stream.get_info().nms_info.bbox_size;
-    
-    for (size_t delimeters_found = 0; delimeters_found < (num_classes * chunks_per_frame); delimeters_found++) {
-        nms_bbox_counter_t class_bboxes_count = 0;
-        nms_bbox_counter_t* class_bboxes_count_ptr = (nms_bbox_counter_t*)(reinterpret_cast<uint8_t*>(buffer) + offset);
-        offset += sizeof(*class_bboxes_count_ptr);
-
-        while (true) {
-            MemoryView buffer_view(static_cast<uint8_t*>(buffer) + offset, bbox_size);
-            auto status = stream.read_impl(buffer_view);
-            if ((HAILO_STREAM_ABORTED_BY_USER == status) ||
-                ((HAILO_STREAM_NOT_ACTIVATED == status))) {
-                return status;
-            }
-            CHECK_SUCCESS(status, "Failed reading nms bbox");
-            const uint64_t current_bbox = *(uint64_t*)((uint8_t*)buffer + offset);
-
-            if (NMS_IMAGE_DELIMITER == current_bbox) {
-                continue;
-            }
-
-            if (NMS_DELIMITER == current_bbox) {
-                break;
-            }
-
-            class_bboxes_count++;
-            CHECK_IN_DEBUG(class_bboxes_count <= stream.get_info().nms_info.max_bboxes_per_class, HAILO_INTERNAL_FAILURE,
-                "Data read from the device for the current class was size {}, max size is {}", class_bboxes_count,
-                stream.get_info().nms_info.max_bboxes_per_class);
-            offset += bbox_size;
-        }
-
-        *class_bboxes_count_ptr = class_bboxes_count;
-    }
-
-    return HAILO_SUCCESS;
-}
-
-hailo_status NMSStreamReader::read_nms_burst_mode(OutputStream &stream, void *buffer, size_t offset, size_t buffer_size)
-{
-    NMSBurstState burst_state = NMSBurstState::NMS_BURST_STATE_WAITING_FOR_DELIMETER;
-    const uint32_t bbox_size = stream.get_info().nms_info.bbox_size;
-    const size_t burst_size = stream.get_layer_info().nms_info.burst_size * bbox_size;
-    const hailo_nms_burst_type_t burst_type = stream.get_layer_info().nms_info.burst_type;
-    const auto num_expected_delimeters = stream.get_info().nms_info.chunks_per_frame * stream.get_info().nms_info.number_of_classes;
-    // Transfer size if affected from if working in interrupt per burst or interrupt per frame
-    const size_t transfer_size = LayerInfoUtils::get_nms_layer_transfer_size(stream.get_layer_info());
-    const bool is_interrupt_per_frame = (transfer_size > burst_size);
-
-    CHECK(bbox_size == sizeof(uint64_t), HAILO_INTERNAL_FAILURE,
-        "Invalid Bbox size, must be 8 bytes received {}", bbox_size);
-
-    CHECK(transfer_size <= buffer_size, HAILO_INTERNAL_FAILURE, "Invalid transfer size {}, Cannot be larger than buffer {}",
-        transfer_size, buffer_size);
-
-    // Start writing bboxes at offset sizeof(nms_bbox_counter_t) - because the first sizeof(nms_bbox_counter_t) will be
-    // used to write amount of bboxes found for class 0 etc...
-    nms_bbox_counter_t class_bboxes_count = 0;
-    nms_bbox_counter_t* class_bboxes_count_ptr = (nms_bbox_counter_t*)(reinterpret_cast<uint8_t*>(buffer) + offset);
-    offset += sizeof(nms_bbox_counter_t);
-
-    // Counter of number of delimeters found in frame
-    size_t delimeters_found = 0;
-    size_t burst_index = 0;
-    uint8_t *start_index_of_burst_in_buffer = nullptr;
-    while ((delimeters_found < num_expected_delimeters) || (NMSBurstState::NMS_BURST_STATE_WAITING_FOR_IMAGE_DELIMETER == burst_state)) {
-        // In interrupt per frame we read whole frame once (in first iteration) - then don't read in following loop iterations
-        // delimeters_found will always be 0 in first iteration - and in interrupt_per_frame will always be larger in following iterations
-        if (!is_interrupt_per_frame || (0 == delimeters_found)) {
-            assert(offset + transfer_size <= buffer_size);
-            start_index_of_burst_in_buffer = static_cast<uint8_t*>(buffer) + offset;
-            MemoryView buffer_view(start_index_of_burst_in_buffer, transfer_size);
-            auto status = stream.read_impl(buffer_view);
-            if ((HAILO_STREAM_ABORTED_BY_USER == status) || ((HAILO_STREAM_NOT_ACTIVATED == status))) {
-                return status;
-            }
-            CHECK_SUCCESS(status, "Failed reading nms burst");
-        }
-
-        // Flag that marks if we can stop reading burst and continue to next burst
-        bool can_stop_reading_burst = false;
-        // Iterate through burst and copy relevant data to user buffer
-        for (size_t burst_offset = 0; burst_offset < burst_size; burst_offset += bbox_size) {
-            uint64_t current_bbox = 0;
-            if (is_interrupt_per_frame) {
-                assert((burst_index * burst_size) + burst_offset < transfer_size);
-                current_bbox = *(uint64_t*)((uint8_t*)start_index_of_burst_in_buffer + (burst_index * burst_size) + burst_offset);
-            } else {
-                current_bbox = *(uint64_t*)((uint8_t*)start_index_of_burst_in_buffer + burst_offset);
-            }
-
-            // If read delimeter - fill in information about num of bboxes found for the class (we also make sure that
-            //  It is in state NMS_BURST_STATE_WAITING_FOR_DELIMETER because in hailo8 padding is same value)
-            if ((NMS_DELIMITER == current_bbox) && (NMSBurstState::NMS_BURST_STATE_WAITING_FOR_DELIMETER == burst_state)) {
-                *class_bboxes_count_ptr = class_bboxes_count;
-                class_bboxes_count_ptr = (nms_bbox_counter_t*)(reinterpret_cast<uint8_t*>(buffer) + offset);
-                class_bboxes_count = 0;
-                offset += sizeof(nms_bbox_counter_t);
-            }
-
-            // Received delimeter can stop reading burst because rest of burst is image delimeter then padding
-            if ((NMS_DELIMITER == current_bbox) || (NMS_IMAGE_DELIMITER == current_bbox) || (NMS_H15_PADDING == current_bbox)) {
-                auto status = advance_state_machine(&burst_state, current_bbox, burst_type, stream.get_info().nms_info.number_of_classes,
-                    &delimeters_found, &can_stop_reading_burst, burst_offset, burst_size, &burst_index);
-                CHECK_SUCCESS(status);
-
-                if (can_stop_reading_burst) {
-                    break;
-                }
-                continue;
-            }
-
-            class_bboxes_count++;
-            CHECK_IN_DEBUG(class_bboxes_count <= stream.get_info().nms_info.max_bboxes_per_class, HAILO_INTERNAL_FAILURE,
-                "Data read from the device for the current class was size {}, max size is {}", class_bboxes_count,
-                stream.get_info().nms_info.max_bboxes_per_class);
-            
-            // Copy bbox to correct location in buffer
-            memcpy((static_cast<uint8_t*>(buffer) + offset), &current_bbox, sizeof(current_bbox));
-            offset += bbox_size;
-        }
-    }
-
-    return HAILO_SUCCESS;
-}
-
-hailo_status NMSStreamReader::read_nms(OutputStream &stream, void *buffer, size_t offset, size_t size)
-{
-    hailo_status status = HAILO_UNINITIALIZED;
-    const bool burst_mode = (HAILO_BURST_TYPE_NO_BURST != stream.get_layer_info().nms_info.burst_type);
-    if (burst_mode) {
-        status = NMSStreamReader::read_nms_burst_mode(stream, buffer, offset, size);
-    } else {
-        status = NMSStreamReader::read_nms_bbox_mode(stream, buffer, offset);
-    }
-    if ((HAILO_STREAM_ABORTED_BY_USER == status) || (HAILO_STREAM_NOT_ACTIVATED == status)) {
-        return status;
-    }
-    CHECK_SUCCESS(status, "Failed reading nms");
-
-    return HAILO_SUCCESS;
-}
-
-} /* namespace hailort */
\ No newline at end of file
diff --git a/hailort/libhailort/src/stream_common/nms_stream_reader.hpp b/hailort/libhailort/src/stream_common/nms_stream_reader.hpp
deleted file mode 100644 (file)
index db5139c..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file nms_stream_reader.hpp
- * @brief static class that helps receives and reads the nms ouput stream according to the differnet burst mode, type and size.
- * 
- * For explanation on the different burst modes and types and state machine and logic of the class please check out the cpp.
- * 
- **/
-
-#ifndef _NMS_STREAM_READER_HPP_
-#define _NMS_STREAM_READER_HPP_
-
-#include "hailo/stream.hpp"
-#include "common/utils.hpp"
-#include "hailo/hailort_common.hpp"
-
-namespace hailort
-{
-
-static constexpr uint32_t MAX_NMS_BURST_SIZE = 65536;
-static const uint64_t NMS_DELIMITER = 0xFFFFFFFFFFFFFFFF;
-static const uint64_t NMS_IMAGE_DELIMITER = 0xFFFFFFFFFFFFFFFE;
-static const uint64_t NMS_H15_PADDING = 0xFFFFFFFFFFFFFFFD;
-
-enum class NMSBurstState {
-    NMS_BURST_STATE_WAITING_FOR_DELIMETER = 0,
-    NMS_BURST_STATE_WAITING_FOR_IMAGE_DELIMETER = 1,
-    NMS_BURST_STATE_WAITING_FOR_PADDING = 2,
-};
-
-class NMSStreamReader {
-public:
-    static hailo_status read_nms(OutputStream &stream, void *buffer, size_t offset, size_t size);
-private:
-    static hailo_status read_nms_bbox_mode(OutputStream &stream, void *buffer, size_t offset);
-    static hailo_status read_nms_burst_mode(OutputStream &stream, void *buffer, size_t offset, size_t buffer_size);
-    static hailo_status advance_state_machine(NMSBurstState *burst_state, const uint64_t current_bbox,
-        const hailo_nms_burst_type_t burst_type, const uint32_t num_classes, size_t *num_delimeters_received,
-        bool *can_stop_reading_burst, const size_t burst_offset, const size_t burst_size, size_t *burst_index);
-};
-
-} /* namespace hailort */
-
-#endif /* _STREAM_INTERNAL_HPP_ */
\ No newline at end of file
diff --git a/hailort/libhailort/src/stream_common/queued_stream_buffer_pool.cpp b/hailort/libhailort/src/stream_common/queued_stream_buffer_pool.cpp
new file mode 100644 (file)
index 0000000..e93de5a
--- /dev/null
@@ -0,0 +1,78 @@
+/**
+ * Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file queued_stream_buffer_pool.cpp
+ **/
+
+#include "queued_stream_buffer_pool.hpp"
+
+namespace hailort
+{
+
+Expected<std::unique_ptr<QueuedStreamBufferPool>> QueuedStreamBufferPool::create(size_t max_queue_size, size_t buffer_size,
+    BufferStorageParams buffer_params)
+{
+    std::vector<BufferPtr> storage;
+    storage.reserve(max_queue_size);
+    for (size_t i = 0; i < max_queue_size; i++) {
+        auto buffer = Buffer::create_shared(buffer_size, 0, buffer_params);
+        CHECK_EXPECTED(buffer);
+        storage.emplace_back(buffer.release());
+    }
+
+    auto pool = make_unique_nothrow<QueuedStreamBufferPool>(std::move(storage));
+    CHECK_NOT_NULL_AS_EXPECTED(pool, HAILO_OUT_OF_HOST_MEMORY);
+    return pool;
+}
+
+QueuedStreamBufferPool::QueuedStreamBufferPool(std::vector<BufferPtr> &&storage) :
+    m_storage(std::move(storage))
+{
+    for (auto buffer : m_storage) {
+        m_queue.push(buffer);
+    }
+}
+
+size_t QueuedStreamBufferPool::max_queue_size() const
+{
+    return m_storage.size();
+}
+
+Expected<TransferBuffer> QueuedStreamBufferPool::dequeue()
+{
+    CHECK_AS_EXPECTED(!m_queue.empty(), HAILO_INTERNAL_FAILURE, "QueuedStreamBufferPool is empty");
+
+    auto buffer = m_queue.front();
+    m_queue.pop();
+    return TransferBuffer(buffer);
+}
+
+hailo_status QueuedStreamBufferPool::enqueue(TransferBuffer &&buffer_info)
+{
+    CHECK(buffer_info.offset() == 0, HAILO_INTERNAL_FAILURE, "Cant use offset on queued buffer pool");
+    CHECK(buffer_info.size() == m_storage[0]->size(), HAILO_INTERNAL_FAILURE, "Invalid enqueue buffer size");
+    CHECK(buffer_info.base_buffer()->data() == m_storage[m_next_enqueue_buffer_index]->data(), HAILO_INTERNAL_FAILURE,
+        "Out of order enqueue for queued stream buffer pool");
+
+    m_queue.push(buffer_info.base_buffer());
+    m_next_enqueue_buffer_index = (m_next_enqueue_buffer_index + 1) % (m_storage.size());
+    return HAILO_SUCCESS;
+}
+
+void QueuedStreamBufferPool::reset_pointers()
+{
+    // First, clear all queued buffers (data may be lost, as required from reset_pointers).
+    while (!m_queue.empty()) {
+        m_queue.pop();
+    }
+
+    // Now fill the buffers from the storage in the right order
+    for (auto buffer : m_storage) {
+        m_queue.push(buffer);
+    }
+    m_next_enqueue_buffer_index = 0;
+}
+
+} /* namespace hailort */
diff --git a/hailort/libhailort/src/stream_common/queued_stream_buffer_pool.hpp b/hailort/libhailort/src/stream_common/queued_stream_buffer_pool.hpp
new file mode 100644 (file)
index 0000000..373206d
--- /dev/null
@@ -0,0 +1,44 @@
+/**
+ * Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file queued_stream_buffer_pool.hpp
+ * @brief Simplest stream buffer pool, just using std::queue with max size for the buffers.
+ **/
+
+#ifndef _HAILO_QUEUED_STREAM_BUFFER_POOL_HPP_
+#define _HAILO_QUEUED_STREAM_BUFFER_POOL_HPP_
+
+#include "stream_common/stream_buffer_pool.hpp"
+
+#include <queue>
+
+namespace hailort
+{
+
+class QueuedStreamBufferPool : public StreamBufferPool {
+public:
+    static Expected<std::unique_ptr<QueuedStreamBufferPool>> create(size_t max_queue_size, size_t buffer_size,
+        BufferStorageParams buffer_params);
+
+    explicit QueuedStreamBufferPool(std::vector<BufferPtr> &&storage);
+
+    virtual size_t max_queue_size() const override;
+    virtual Expected<TransferBuffer> dequeue() override;
+    virtual hailo_status enqueue(TransferBuffer &&buffer_info) override;
+    virtual void reset_pointers() override;
+
+private:
+    // Hold the buffer storage, keeps all buffers alive.
+    std::vector<BufferPtr> m_storage;
+
+    std::queue<BufferPtr> m_queue;
+
+    // Used for buffer enqueue order validation.
+    size_t m_next_enqueue_buffer_index = 0;
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_QUEUED_STREAM_BUFFER_POOL_HPP_ */
diff --git a/hailort/libhailort/src/stream_common/remote_process_stream.cpp b/hailort/libhailort/src/stream_common/remote_process_stream.cpp
new file mode 100644 (file)
index 0000000..5fae0a1
--- /dev/null
@@ -0,0 +1,595 @@
+/**
+ * Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file remote_process_stream.cpp
+ **/
+
+#include "remote_process_stream.hpp"
+
+#include "common/os_utils.hpp"
+
+namespace hailort
+{
+
+constexpr size_t MIN_QUEUE_SIZE = 2;
+constexpr size_t DEFAULT_QUEUE_SIZE = 4;
+
+Expected<std::unique_ptr<RemoteProcessBufferPool>> RemoteProcessBufferPool::create(
+    hailo_stream_direction_t stream_direction, size_t frame_size, size_t queue_size)
+{
+    // queue_size must be some (power-of-2 minus 1) in order to fit CircularArray.
+    queue_size = get_nearest_powerof_2(static_cast<uint32_t>(queue_size + 1), MIN_QUEUE_SIZE) - 1;
+    queue_size = std::min(queue_size, ONGOING_TRANSFERS_SIZE);
+
+    hailo_status status = HAILO_UNINITIALIZED;
+    auto buffer_pool = make_unique_nothrow<RemoteProcessBufferPool>(stream_direction, frame_size, queue_size,
+        status);
+    CHECK_NOT_NULL_AS_EXPECTED(buffer_pool, HAILO_OUT_OF_HOST_MEMORY);
+    CHECK_SUCCESS_AS_EXPECTED(status, "Failed creating remote process buffer pool");
+    return buffer_pool;
+}
+
+RemoteProcessBufferPool::RemoteProcessBufferPool(hailo_stream_direction_t stream_direction, size_t frame_size,
+    size_t queue_size, hailo_status &status) :
+        m_hw_buffers_queue(queue_size + 1),
+        m_host_buffers_queue(queue_size + 1)
+{
+    // On H2D, the user will dequeue from user_buffers_queue, fill it and sent to the hw_buffers_queue.
+    // On D2H, the read thread will dequeue from hw_buffers_pool, read into it and sent it to the user_buffers_queue.
+    auto &queue_to_fill = (HAILO_H2D_STREAM == stream_direction) ?
+        m_host_buffers_queue :
+        m_hw_buffers_queue;
+
+    for (size_t i = 0; i < queue_size; i++) {
+        // We create here the buffer as dma-able since it force them to be shared between processes.
+        // In the future, we may have some new buffer storage params for shared memory.
+        auto buffer = Buffer::create_shared(frame_size, BufferStorageParams::create_dma());
+        if (!buffer) {
+            LOGGER__ERROR("Failed allocating buffer");
+            status = buffer.status();
+            return;
+        }
+
+        m_buffers_guard.emplace_back(buffer.release());
+
+        auto buffer_view = MemoryView(*m_buffers_guard.back());
+        queue_to_fill.push_back(SharedBuffer{buffer_view, SharedBuffer::Type::DATA});
+    }
+
+    status = HAILO_SUCCESS;
+}
+
+void RemoteProcessBufferPool::abort()
+{
+    {
+        std::unique_lock<RecursiveSharedMutex> lock(m_mutex);
+        m_is_aborted = true;
+    }
+    m_cv.notify_all();
+}
+
+void RemoteProcessBufferPool::clear_abort()
+{
+    std::unique_lock<RecursiveSharedMutex> lock(m_mutex);
+    m_is_aborted = false;
+}
+
+Expected<RemoteProcessBufferPool::SharedBuffer> RemoteProcessBufferPool::dequeue_hw_buffer(
+    std::chrono::milliseconds timeout)
+{
+    std::unique_lock<RecursiveSharedMutex> lock(m_mutex);
+    auto status = cv_wait_for(lock, timeout, [this]() {
+        return !m_hw_buffers_queue.empty();
+    });
+    if (HAILO_SUCCESS != status) {
+        return make_unexpected(status);
+    }
+
+    auto result = m_hw_buffers_queue.front();
+    m_hw_buffers_queue.pop_front();
+    return result;
+}
+
+hailo_status RemoteProcessBufferPool::enqueue_hw_buffer(SharedBuffer buffer)
+{
+    {
+        std::unique_lock<RecursiveSharedMutex> lock(m_mutex);
+        CHECK(!m_hw_buffers_queue.full(), HAILO_INTERNAL_FAILURE, "HW buffer is full");
+        m_hw_buffers_queue.push_back(buffer);
+    }
+    m_cv.notify_one();
+    return HAILO_SUCCESS;
+}
+
+Expected<RemoteProcessBufferPool::SharedBuffer> RemoteProcessBufferPool::dequeue_host_buffer(
+    std::chrono::milliseconds timeout)
+{
+    std::unique_lock<RecursiveSharedMutex> lock(m_mutex);
+    auto status = cv_wait_for(lock, timeout, [this]() {
+        return !m_host_buffers_queue.empty();
+    });
+    if (HAILO_SUCCESS != status) {
+        return make_unexpected(status);
+    }
+
+    auto result = m_host_buffers_queue.front();
+    m_host_buffers_queue.pop_front();
+    return result;
+}
+
+hailo_status RemoteProcessBufferPool::enqueue_host_buffer(SharedBuffer buffer)
+{
+    {
+        std::unique_lock<RecursiveSharedMutex> lock(m_mutex);
+        CHECK(!m_host_buffers_queue.full(), HAILO_INTERNAL_FAILURE, "Host buffer is full");
+        m_host_buffers_queue.push_back(buffer);
+    }
+    m_cv.notify_one();
+    return HAILO_SUCCESS;
+}
+
+hailo_status RemoteProcessBufferPool::wait_until_host_queue_full(std::chrono::milliseconds timeout)
+{
+    std::unique_lock<RecursiveSharedMutex> lock(m_mutex);
+    return cv_wait_for(lock, timeout, [this]() {
+        return m_host_buffers_queue.full();
+    });
+}
+
+/** Input stream **/
+Expected<std::shared_ptr<RemoteProcessInputStream>> RemoteProcessInputStream::create(
+    std::shared_ptr<InputStreamBase> base_stream)
+{
+    // Set when the thread needs to be stopped.
+    auto thread_stop_event = Event::create_shared(Event::State::not_signalled);
+    CHECK_EXPECTED(thread_stop_event);
+
+    auto status = HAILO_UNINITIALIZED;
+    auto stream = make_shared_nothrow<RemoteProcessInputStream>(std::move(base_stream),
+        thread_stop_event.release(), status);
+    CHECK_NOT_NULL_AS_EXPECTED(stream, HAILO_OUT_OF_HOST_MEMORY);
+    CHECK_SUCCESS_AS_EXPECTED(status);
+
+    return stream;
+}
+
+RemoteProcessInputStream::~RemoteProcessInputStream()
+{
+    if (m_write_thread.joinable()) {
+        auto status = m_wait_for_activation.shutdown();
+        if (HAILO_SUCCESS != status) {
+            LOGGER__ERROR("Shutdown thread failed with {}", status);
+            // continue
+        }
+
+        status = deactivate_stream();
+        if (HAILO_SUCCESS != status) {
+            LOGGER__ERROR("Failed to deactivate stream with {}", status);
+            // continue
+        }
+
+        // Calling abort() to make sure the thread will exit
+        status = abort();
+        if (HAILO_SUCCESS != status) {
+            LOGGER__ERROR("Failed to abort stream with {}", status);
+            // continue
+        }
+
+        m_write_thread.join();
+    }
+}
+
+hailo_stream_interface_t RemoteProcessInputStream::get_interface() const
+{
+    return m_base_stream->get_interface();
+}
+
+std::chrono::milliseconds RemoteProcessInputStream::get_timeout() const
+{
+    return m_timeout;
+}
+
+hailo_status RemoteProcessInputStream::set_timeout(std::chrono::milliseconds timeout)
+{
+    // Should affect only m_timeout, and not base stream.
+    m_timeout = timeout;
+    return HAILO_SUCCESS;
+}
+
+hailo_status RemoteProcessInputStream::abort()
+{
+    m_buffer_pool->abort();
+    return HAILO_SUCCESS;
+}
+
+hailo_status RemoteProcessInputStream::clear_abort()
+{
+    m_buffer_pool->clear_abort();
+    return HAILO_SUCCESS;
+}
+
+Expected<size_t> RemoteProcessInputStream::get_buffer_frames_size() const
+{
+    // Must be called on main process
+    return make_unexpected(HAILO_INTERNAL_FAILURE);
+}
+
+bool RemoteProcessInputStream::is_scheduled()
+{
+    return m_base_stream->is_scheduled();
+}
+
+hailo_status RemoteProcessInputStream::flush()
+{
+    const auto flush_timeout = m_timeout * m_buffer_pool->capacity();
+
+    // Get available buffer. We don't use the buffer, just use it to send flush request
+    auto write_buffer = m_buffer_pool->dequeue_host_buffer(flush_timeout);
+    if (HAILO_STREAM_ABORTED_BY_USER == write_buffer.status()) {
+        return HAILO_STREAM_ABORTED_BY_USER;
+    }
+    CHECK_EXPECTED_AS_STATUS(write_buffer);
+
+    // Set flush property. Will be cleared by writer.
+    write_buffer->type = RemoteProcessBufferPool::SharedBuffer::Type::FLUSH;
+
+    // Send flush request
+    auto status = m_buffer_pool->enqueue_hw_buffer(*write_buffer);
+    CHECK_SUCCESS(status);
+
+    // Now wait until available buffers is full
+    status = m_buffer_pool->wait_until_host_queue_full(flush_timeout);
+    if (HAILO_STREAM_ABORTED_BY_USER == status) {
+        return HAILO_STREAM_ABORTED_BY_USER;
+    }
+    CHECK_SUCCESS(status);
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status RemoteProcessInputStream::activate_stream()
+{
+    return m_base_stream->activate_stream();
+}
+
+hailo_status RemoteProcessInputStream::deactivate_stream()
+{
+    return m_base_stream->deactivate_stream();
+}
+
+hailo_status RemoteProcessInputStream::write_impl(const MemoryView &buffer)
+{
+    // Get available buffer
+    auto write_buffer = m_buffer_pool->dequeue_host_buffer(m_timeout);
+    if (HAILO_STREAM_ABORTED_BY_USER == write_buffer.status()) {
+        return HAILO_STREAM_ABORTED_BY_USER;
+    }
+    CHECK_EXPECTED_AS_STATUS(write_buffer);
+
+    // memcpy to write buffer
+    CHECK(write_buffer->buffer.size() == buffer.size(), HAILO_INTERNAL_FAILURE, "Invalid buffer size");
+    memcpy(write_buffer->buffer.data(), buffer.data(), buffer.size());
+
+    // Send to write thread
+    auto status = m_buffer_pool->enqueue_hw_buffer(*write_buffer);
+    CHECK_SUCCESS(status);
+
+    return HAILO_SUCCESS;
+}
+
+RemoteProcessInputStream::RemoteProcessInputStream(std::shared_ptr<InputStreamBase> base_stream,
+    EventPtr thread_stop_event, hailo_status &status) :
+        InputStreamBase(base_stream->get_layer_info(), base_stream->get_interface(),
+                        base_stream->get_core_op_activated_event(), status),
+        m_base_stream(base_stream),
+        m_timeout(m_base_stream->get_timeout()),
+        m_wait_for_activation(m_base_stream->get_core_op_activated_event(), thread_stop_event)
+{
+    if (HAILO_SUCCESS != status) {
+        // Failure on base class
+        return;
+    }
+
+    // Set infinite timeout on the base stream - the write will exit only on abort/deactivate.
+    // It doesn't affect timeout for this class write function (m_timeout).
+    auto set_timeout_status = m_base_stream->set_timeout(HAILO_INFINITE_TIMEOUT);
+    if (HAILO_SUCCESS != set_timeout_status) {
+        LOGGER__ERROR("Failed setting base stream timeout {}", set_timeout_status);
+        status = set_timeout_status;
+        return;
+    }
+
+    // Not all streams supports get_buffer_frames_size, fallback to default.
+    auto queue_size_exp = m_base_stream->get_buffer_frames_size();
+    const auto queue_size = queue_size_exp ? *queue_size_exp : DEFAULT_QUEUE_SIZE;
+
+    auto buffer_pool = RemoteProcessBufferPool::create(HAILO_H2D_STREAM, base_stream->get_frame_size(), queue_size);
+    if (!buffer_pool) {
+        LOGGER__ERROR("Failed creating buffer pool {}", buffer_pool.status());
+        status = buffer_pool.status();
+        return;
+    }
+    m_buffer_pool = buffer_pool.release();
+
+    // Launch the thread
+    m_write_thread = std::thread([this]() { run_write_thread(); });
+    status = HAILO_SUCCESS;
+}
+
+void RemoteProcessInputStream::run_write_thread()
+{
+    OsUtils::set_current_thread_name("STREAM_WRITE");
+
+    while (true) {
+        auto status = m_wait_for_activation.wait(HAILO_INFINITE_TIMEOUT);
+        if (HAILO_SHUTDOWN_EVENT_SIGNALED == status) {
+            // Shutdown the thread
+            return;
+        }
+        if (HAILO_SUCCESS != status) {
+            LOGGER__ERROR("Failed wait for activation {}", status);
+            return;
+        }
+
+        status = write_single_buffer();
+        if ((HAILO_STREAM_ABORTED_BY_USER == status) || (HAILO_STREAM_NOT_ACTIVATED == status)) {
+            continue;
+        } else if (HAILO_SUCCESS != status) {
+            LOGGER__ERROR("Failure on read thread {}", status);
+            break;
+        }
+    }
+}
+
+hailo_status RemoteProcessInputStream::write_single_buffer()
+{
+    auto ready_buffer = m_buffer_pool->dequeue_hw_buffer(HAILO_INFINITE_TIMEOUT);
+    if (!ready_buffer) {
+        // Log on caller (if unexpected status)
+        return ready_buffer.status();
+    }
+
+    hailo_status status = HAILO_UNINITIALIZED;
+    if (RemoteProcessBufferPool::SharedBuffer::Type::DATA == ready_buffer->type) {
+        status = m_base_stream->write(ready_buffer->buffer);
+    } else if (RemoteProcessBufferPool::SharedBuffer::Type::FLUSH == ready_buffer->type) {
+        ready_buffer->type = RemoteProcessBufferPool::SharedBuffer::Type::DATA; // clear flush mark.
+        status = m_base_stream->flush();
+    } else {
+        LOGGER__ERROR("Got invalid buffer type");
+        status = HAILO_INTERNAL_FAILURE;
+    }
+
+    if (HAILO_SUCCESS != status) {
+        // If the read fails, we need to return the buffer to the host queue for later writes.
+        auto enqueue_status = m_buffer_pool->enqueue_host_buffer(*ready_buffer);
+        if (HAILO_SUCCESS != enqueue_status) {
+            LOGGER__ERROR("Fail to enqueue buffer back after read was fail {}", enqueue_status);
+            // continue
+        }
+
+        return status;
+    }
+
+    // buffer is now available
+    status = m_buffer_pool->enqueue_host_buffer(*ready_buffer);
+    CHECK_SUCCESS(status);
+
+    return HAILO_SUCCESS;
+}
+
+/** Output stream **/
+Expected<std::shared_ptr<RemoteProcessOutputStream>> RemoteProcessOutputStream::create(
+    std::shared_ptr<OutputStreamBase> base_stream)
+{
+    // Set when the thread needs to be stopped.
+    auto thread_stop_event = Event::create_shared(Event::State::not_signalled);
+    CHECK_EXPECTED(thread_stop_event);
+
+    auto status = HAILO_UNINITIALIZED;
+    auto stream = make_shared_nothrow<RemoteProcessOutputStream>(std::move(base_stream),
+        thread_stop_event.release(), status);
+    CHECK_NOT_NULL_AS_EXPECTED(stream, HAILO_OUT_OF_HOST_MEMORY);
+    CHECK_SUCCESS_AS_EXPECTED(status);
+
+    return stream;
+}
+
+RemoteProcessOutputStream::~RemoteProcessOutputStream()
+{
+    if (m_read_thread.joinable()) {
+        auto status = m_wait_for_activation.shutdown();
+        if (HAILO_SUCCESS != status) {
+            LOGGER__ERROR("Shutdown thread failed with {}", status);
+            // continue
+        }
+
+        status = deactivate_stream();
+        if (HAILO_SUCCESS != status) {
+            LOGGER__ERROR("Failed to deactivate stream with {}", status);
+            // continue
+        }
+
+        // Calling abort() to make sure the thread will exit
+        status = abort();
+        if (HAILO_SUCCESS != status) {
+            LOGGER__ERROR("Failed to abort stream with {}", status);
+            // continue
+        }
+
+        m_read_thread.join();
+    }
+}
+
+hailo_stream_interface_t RemoteProcessOutputStream::get_interface() const
+{
+    return m_base_stream->get_interface();
+}
+
+std::chrono::milliseconds RemoteProcessOutputStream::get_timeout() const
+{
+    return m_timeout;
+}
+
+hailo_status RemoteProcessOutputStream::set_timeout(std::chrono::milliseconds timeout)
+{
+    // Should affect only m_timeout, and not base stream.
+    m_timeout = timeout;
+    return HAILO_SUCCESS;
+}
+
+hailo_status RemoteProcessOutputStream::abort()
+{
+    m_buffer_pool->abort();
+    return HAILO_SUCCESS;
+}
+
+hailo_status RemoteProcessOutputStream::clear_abort()
+{
+    m_buffer_pool->clear_abort();
+    return HAILO_SUCCESS;
+}
+
+Expected<size_t> RemoteProcessOutputStream::get_buffer_frames_size() const
+{
+    // Must be called on main process
+    return make_unexpected(HAILO_INTERNAL_FAILURE);
+}
+
+bool RemoteProcessOutputStream::is_scheduled()
+{
+    return m_base_stream->is_scheduled();
+}
+
+hailo_status RemoteProcessOutputStream::activate_stream()
+{
+    return m_base_stream->activate_stream();
+}
+
+hailo_status RemoteProcessOutputStream::deactivate_stream()
+{
+    return m_base_stream->deactivate_stream();
+}
+
+hailo_status RemoteProcessOutputStream::register_interrupt_callback(const ProcessingCompleteCallback &)
+{
+    // register_interrupt_callback is an internal function (used by the scheduler)
+    // and it shouldn't be called from here.
+    return HAILO_NOT_SUPPORTED;
+}
+
+hailo_status RemoteProcessOutputStream::read_impl(MemoryView buffer)
+{
+    auto read_buffer = m_buffer_pool->dequeue_host_buffer(m_timeout);
+    if (HAILO_STREAM_ABORTED_BY_USER == read_buffer.status()) {
+        return HAILO_STREAM_ABORTED_BY_USER;
+    }
+    CHECK_EXPECTED_AS_STATUS(read_buffer);
+
+    // memcpy to user
+    CHECK(read_buffer->buffer.size() == buffer.size(), HAILO_INTERNAL_FAILURE, "Invalid buffer size");
+    memcpy(buffer.data(), read_buffer->buffer.data(), buffer.size());
+
+    auto status = m_buffer_pool->enqueue_hw_buffer(*read_buffer);
+    CHECK_SUCCESS(status);
+
+    return HAILO_SUCCESS;
+}
+
+RemoteProcessOutputStream::RemoteProcessOutputStream(std::shared_ptr<OutputStreamBase> base_stream,
+    EventPtr thread_stop_event, hailo_status &status) :
+        OutputStreamBase(base_stream->get_layer_info(), base_stream->get_interface(),
+                         base_stream->get_core_op_activated_event(), status),
+        m_base_stream(base_stream),
+        m_timeout(m_base_stream->get_timeout()),
+        m_wait_for_activation(m_base_stream->get_core_op_activated_event(), thread_stop_event)
+{
+    if (HAILO_SUCCESS != status) {
+        return;
+    }
+
+    // Set infinite timeout on the base stream - the read will exit only on abort/deactivate.
+    // It doesn't affect timeout for this class write function (m_timeout).
+    auto set_timeout_status = m_base_stream->set_timeout(HAILO_INFINITE_TIMEOUT);
+    if (HAILO_SUCCESS != set_timeout_status) {
+        LOGGER__ERROR("Failed setting base stream timeout {}", set_timeout_status);
+        status = set_timeout_status;
+        return;
+    }
+
+    // Not all streams supports get_buffer_frames_size, fallback to default.
+    auto queue_size_exp = m_base_stream->get_buffer_frames_size();
+    auto queue_size = queue_size_exp ? *queue_size_exp : DEFAULT_QUEUE_SIZE;
+
+    auto buffer_pool = RemoteProcessBufferPool::create(HAILO_D2H_STREAM, base_stream->get_frame_size(), queue_size);
+    if (!buffer_pool) {
+        LOGGER__ERROR("Failed creating buffer pool {}", buffer_pool.status());
+        status = buffer_pool.status();
+        return;
+    }
+    m_buffer_pool = buffer_pool.release();
+
+
+    // Launch the thread
+    m_read_thread = std::thread([this]() { run_read_thread(); });
+
+    status = HAILO_SUCCESS;
+}
+
+void RemoteProcessOutputStream::run_read_thread()
+{
+    OsUtils::set_current_thread_name("STREAM_READ");
+
+    // Breaks when the thread shutdown event is signaled..
+    while (true) {
+        auto status = m_wait_for_activation.wait(HAILO_INFINITE_TIMEOUT);
+        if (HAILO_SHUTDOWN_EVENT_SIGNALED == status) {
+            // Shutdown the thread
+            return;
+        }
+        if (HAILO_SUCCESS != status) {
+            LOGGER__ERROR("Failed wait for activation {}", status);
+            return;
+        }
+
+        status = read_single_buffer();
+        if ((HAILO_STREAM_ABORTED_BY_USER == status) || (HAILO_STREAM_NOT_ACTIVATED == status)) {
+            continue;
+        } else if (HAILO_SUCCESS != status) {
+            LOGGER__ERROR("Failure on read thread {}", status);
+            break;
+        }
+    }
+}
+
+hailo_status RemoteProcessOutputStream::read_single_buffer()
+{
+    auto ready_buffer = m_buffer_pool->dequeue_hw_buffer(HAILO_INFINITE_TIMEOUT);
+    if (!ready_buffer) {
+        // Log on caller (if unexpected status)
+        return ready_buffer.status();
+    }
+
+    assert(RemoteProcessBufferPool::SharedBuffer::Type::DATA == ready_buffer->type);
+    auto status = m_base_stream->read(ready_buffer->buffer);
+    if (HAILO_SUCCESS != status) {
+        // If the read fails, we need to return the buffer to the hw queue for later reads.
+        auto enqueue_status = m_buffer_pool->enqueue_hw_buffer(*ready_buffer);
+        if (HAILO_SUCCESS != enqueue_status) {
+            LOGGER__ERROR("Fail to enqueue buffer back after read was fail {}", enqueue_status);
+            // continue
+        }
+
+        return status;
+    }
+
+    // buffer is now available
+    status = m_buffer_pool->enqueue_host_buffer(*ready_buffer);
+    CHECK_SUCCESS(status);
+
+    return HAILO_SUCCESS;
+}
+
+} /* namespace hailort */
diff --git a/hailort/libhailort/src/stream_common/remote_process_stream.hpp b/hailort/libhailort/src/stream_common/remote_process_stream.hpp
new file mode 100644 (file)
index 0000000..14e87e7
--- /dev/null
@@ -0,0 +1,203 @@
+/**
+ * Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file remote_process_stream.hpp
+ * @brief Stream wrapper for multi process environment (i.e python api).
+ *        Using shared queue to make sure all low level operations are executed
+ *        on one process.
+ **/
+
+#ifndef _HAILO_REMOTE_PROCESS_STREAM_HPP_
+#define _HAILO_REMOTE_PROCESS_STREAM_HPP_
+
+#include "common/fork_support.hpp"
+
+#include "stream_common/stream_internal.hpp"
+
+#include "common/utils.hpp"
+#include "common/circular_buffer.hpp"
+
+#include "hailo/buffer.hpp"
+
+namespace hailort
+{
+
+class RemoteProcessBufferPool final : public SharedAllocatedObject {
+public:
+   struct SharedBuffer {
+
+        enum class Type {
+            DATA,
+            FLUSH, // For input streams, don't use the buffer content, just flush the stream.
+        };
+
+        MemoryView buffer;
+        Type type;
+    };
+
+    // We always use unique_ptr to make sure the buffer is allocated on shared memory.
+    // queue_size must be some (power-of-2 minus 1) in order to fit CircularArray.
+    static Expected<std::unique_ptr<RemoteProcessBufferPool>> create(hailo_stream_direction_t stream_direction,
+        size_t frame_size, size_t queue_size);
+
+    RemoteProcessBufferPool(hailo_stream_direction_t stream_direction, size_t frame_size, size_t queue_size,
+        hailo_status &status);
+
+    Expected<SharedBuffer> dequeue_hw_buffer(std::chrono::milliseconds timeout);
+    hailo_status enqueue_hw_buffer(SharedBuffer buffer);
+
+    Expected<SharedBuffer> dequeue_host_buffer(std::chrono::milliseconds timeout);
+    hailo_status enqueue_host_buffer(SharedBuffer buffer);
+    hailo_status wait_until_host_queue_full(std::chrono::milliseconds timeout);
+
+    void abort();
+    void clear_abort();
+
+    size_t capacity() const
+    {
+        assert(m_hw_buffers_queue.capacity() == m_host_buffers_queue.capacity());
+        return m_hw_buffers_queue.capacity();
+    }
+
+private:
+
+    template<typename CondFunc>
+    hailo_status cv_wait_for(std::unique_lock<RecursiveSharedMutex> &lock,
+        std::chrono::milliseconds timeout, CondFunc &&cond)
+    {
+        assert(lock.owns_lock());
+        bool done = m_cv.wait_for(lock, timeout, [this, cond]() {
+            if (m_is_aborted) {
+                return true;
+            }
+
+            return cond();
+        });
+        CHECK(done, HAILO_TIMEOUT, "Timeout waiting on cond variable");
+        if (m_is_aborted) {
+            return HAILO_STREAM_ABORTED_BY_USER;
+        }
+        return HAILO_SUCCESS;
+    }
+
+    // Guards memory allocation.
+    std::vector<BufferPtr> m_buffers_guard;
+
+    using BufferQueue = CircularArray<SharedBuffer, std::array<SharedBuffer, ONGOING_TRANSFERS_SIZE>>;
+
+    // On input streams - buffers with user data, ready to be sent to the hw.
+    // On output streams - buffers that are ready, the stream can receive into them.
+    BufferQueue m_hw_buffers_queue;
+
+    // On input streams - buffers that are ready, the user can write into them.
+    // On output streams - buffers with data from the hw, ready to be read by the user
+    BufferQueue m_host_buffers_queue;
+
+    RecursiveSharedMutex m_mutex;
+    SharedConditionVariable m_cv;
+
+    bool m_is_aborted;
+};
+
+
+class RemoteProcessInputStream : public InputStreamBase {
+public:
+    static Expected<std::shared_ptr<RemoteProcessInputStream>> create(std::shared_ptr<InputStreamBase> base_stream);
+    virtual ~RemoteProcessInputStream();
+
+    virtual hailo_status set_buffer_mode(StreamBufferMode buffer_mode) override
+    {
+        // Buffer mode needs to be set by the parent process (since buffers can be allocated only there) either manually
+        // or automatically. On this class, the mode will be set to OWNING automatically on the first write.
+        CHECK(buffer_mode == StreamBufferMode::OWNING, HAILO_INVALID_ARGUMENT,
+            "RemoteProcessInputStream streams supports only sync api");
+        return HAILO_SUCCESS;
+    }
+
+    virtual hailo_stream_interface_t get_interface() const override;
+    virtual std::chrono::milliseconds get_timeout() const override;
+    virtual hailo_status set_timeout(std::chrono::milliseconds timeout) override;
+    virtual hailo_status abort() override;
+    virtual hailo_status clear_abort() override;
+    virtual Expected<size_t> get_buffer_frames_size() const override;
+    virtual bool is_scheduled() override;
+    virtual hailo_status flush() override;
+
+    virtual hailo_status activate_stream() override;
+    virtual hailo_status deactivate_stream() override;
+
+
+    RemoteProcessInputStream(std::shared_ptr<InputStreamBase> base_stream, EventPtr thread_stop_event,
+        hailo_status &status);
+
+    virtual hailo_status write_impl(const MemoryView &buffer) override;
+protected:
+
+    void run_write_thread();
+    hailo_status write_single_buffer();
+
+    std::shared_ptr<InputStreamBase> m_base_stream;
+    std::chrono::milliseconds m_timeout;
+
+    // Runs on parent, execute writes
+    std::thread m_write_thread;
+
+    // Store as unique_ptr to allow shared memory
+    std::unique_ptr<RemoteProcessBufferPool> m_buffer_pool;
+
+    WaitOrShutdown m_wait_for_activation;
+};
+
+class RemoteProcessOutputStream : public OutputStreamBase {
+public:
+    static Expected<std::shared_ptr<RemoteProcessOutputStream>> create(std::shared_ptr<OutputStreamBase> base_stream);
+
+    virtual ~RemoteProcessOutputStream();
+
+    virtual hailo_status set_buffer_mode(StreamBufferMode buffer_mode) override
+    {
+        // Buffer mode needs to be set by the parent process (since buffers can be allocated only there) either manually
+        // or automatically. On this class, the mode will be set to OWNING automatically on the first write.
+        CHECK(buffer_mode == StreamBufferMode::OWNING, HAILO_INVALID_ARGUMENT,
+            "RemoteProcessInputStream streams supports only sync api");
+        return HAILO_SUCCESS;
+    }
+
+    virtual hailo_stream_interface_t get_interface() const override;
+    virtual std::chrono::milliseconds get_timeout() const override;
+    virtual hailo_status set_timeout(std::chrono::milliseconds timeout) override;
+    virtual hailo_status abort() override;
+    virtual hailo_status clear_abort() override;
+    virtual Expected<size_t> get_buffer_frames_size() const override;
+    virtual bool is_scheduled() override;
+
+    virtual hailo_status activate_stream() override;
+    virtual hailo_status deactivate_stream() override;
+
+    virtual hailo_status register_interrupt_callback(const ProcessingCompleteCallback &);
+
+    RemoteProcessOutputStream(std::shared_ptr<OutputStreamBase> base_stream, EventPtr thread_stop_event,
+        hailo_status &status);
+
+    virtual hailo_status read_impl(MemoryView buffer) override;
+protected:
+
+    void run_read_thread();
+    hailo_status read_single_buffer();
+
+    std::shared_ptr<OutputStreamBase> m_base_stream;
+    std::chrono::milliseconds m_timeout;
+
+    // Runs on parent, execute reads
+    std::thread m_read_thread;
+
+    std::unique_ptr<RemoteProcessBufferPool> m_buffer_pool;
+
+    WaitOrShutdown m_wait_for_activation;
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_REMOTE_PROCESS_STREAM_HPP_ */
index 909fbbf58c80fe965249b3a1326b7a6a0152737a..c2fa06a8a974216b4645941d3fbaa4f2f116e823 100644 (file)
@@ -12,7 +12,6 @@
 #include "hailo/hailort_common.hpp"
 #include "hailo/transform.hpp"
 #include "common/utils.hpp"
-#include "stream_common/nms_stream_reader.hpp"
 
 #include <sstream>
 
@@ -24,22 +23,6 @@ hailo_status InputStream::flush()
     return HAILO_SUCCESS;
 }
 
-hailo_status InputStream::write(const MemoryView &buffer)
-{
-    CHECK(buffer.size() == get_frame_size(), HAILO_INVALID_ARGUMENT,
-        "write size {} must be {}", buffer.size(), get_frame_size());
-
-    CHECK(((buffer.size() % HailoRTCommon::HW_DATA_ALIGNMENT) == 0), HAILO_INVALID_ARGUMENT,
-        "Input must be aligned to {} (got {})", HailoRTCommon::HW_DATA_ALIGNMENT, buffer.size());
-
-    return write_impl(buffer);
-}
-
-hailo_status InputStream::write(const void *buffer, size_t size)
-{
-    return write(MemoryView::create_const(buffer, size));
-}
-
 hailo_status InputStream::wait_for_async_ready(size_t /* transfer_size */, std::chrono::milliseconds /* timeout */)
 {
     LOGGER__ERROR("wait_for_async_ready not implemented for sync API");
@@ -66,31 +49,6 @@ EventPtr &InputStream::get_network_group_activated_event()
     return get_core_op_activated_event();
 }
 
-hailo_status OutputStream::read_nms(void *buffer, size_t offset, size_t size)
-{
-    CHECK(size == get_info().hw_frame_size, HAILO_INSUFFICIENT_BUFFER,
-        "On nms stream buffer size should be {} (given size {})", get_info().hw_frame_size, size);
-
-    return NMSStreamReader::read_nms((*this), buffer, offset, size);
-}
-
-hailo_status OutputStream::read(MemoryView buffer)
-{
-    CHECK(buffer.size() == get_frame_size(), HAILO_INVALID_ARGUMENT, "Read size {} must be {}", buffer.size(),
-        get_frame_size());
-
-    if (get_info().format.order == HAILO_FORMAT_ORDER_HAILO_NMS){
-        return read_nms(buffer.data(), 0, buffer.size());
-    } else {
-        return read_impl(buffer);
-    }
-}
-
-hailo_status OutputStream::read(void *buffer, size_t size)
-{
-    return read(MemoryView(buffer, size));
-}
-
 hailo_status OutputStream::wait_for_async_ready(size_t /* transfer_size */, std::chrono::milliseconds /* timeout */)
 {
     LOGGER__ERROR("wait_for_async_ready not implemented for sync API");
diff --git a/hailort/libhailort/src/stream_common/stream_buffer_pool.hpp b/hailort/libhailort/src/stream_common/stream_buffer_pool.hpp
new file mode 100644 (file)
index 0000000..71c830e
--- /dev/null
@@ -0,0 +1,38 @@
+/**
+ * Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file stream_buffer_pool.hpp
+ * @brief Base class for buffer pools used for stream objects.
+ **/
+
+#ifndef _HAILO_STREAM_BUFFER_POOL_HPP_
+#define _HAILO_STREAM_BUFFER_POOL_HPP_
+
+#include "hailo/expected.hpp"
+#include "stream_common/transfer_common.hpp"
+
+namespace hailort
+{
+
+// This class is NOT thread safe. All function calls must be synchronized.
+class StreamBufferPool {
+public:
+    virtual ~StreamBufferPool() = default;
+
+    virtual size_t max_queue_size() const = 0;
+
+    // Dequeues buffer from the pool, fails if there is no buffer ready.
+    virtual Expected<TransferBuffer> dequeue() = 0;
+
+    // Enqueues buffer into the pool. The enqueue order must be the same as the dequeue order.
+    virtual hailo_status enqueue(TransferBuffer &&buffer_info) = 0;
+
+    // Resets the pointers to its initial state. Any dequeued buffer is lost (and the data will be overriden).
+    virtual void reset_pointers() = 0;
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_STREAM_BUFFER_POOL_HPP_ */
index 76a642191e554a4e348b199799d70666c3fd441b..2c34128f8f1bc74ed4f545404e94e72e3e3f5184 100644 (file)
@@ -13,6 +13,7 @@
 
 #include "common/utils.hpp"
 #include "common/logger_macros.hpp"
+#include "common/os_utils.hpp"
 
 #include "stream_common/stream_internal.hpp"
 
 namespace hailort
 {
 
-InputStreamBase::InputStreamBase(const hailo_stream_info_t &stream_info,
-        const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config, const EventPtr &core_op_activated_event) :
-    m_nn_stream_config(nn_stream_config), m_core_op_activated_event(core_op_activated_event)
+static Expected<BufferPtr> create_dma_able_buffer_from_user_size(void *addr, size_t size)
 {
-    m_stream_info = stream_info;
+    auto storage = DmaStorage::create_from_user_address(addr, size);
+    CHECK_EXPECTED(storage);
+
+    auto buffer = make_shared_nothrow<Buffer>(storage.release());
+    CHECK_NOT_NULL_AS_EXPECTED(buffer, HAILO_OUT_OF_HOST_MEMORY);
+
+    return buffer;
+}
+
+hailo_status InputStreamBase::write(const MemoryView &buffer)
+{
+    CHECK(buffer.size() == get_frame_size(), HAILO_INVALID_ARGUMENT,
+        "write size {} must be {}", buffer.size(), get_frame_size());
+
+    CHECK(((buffer.size() % HailoRTCommon::HW_DATA_ALIGNMENT) == 0), HAILO_INVALID_ARGUMENT,
+        "Input must be aligned to {} (got {})", HailoRTCommon::HW_DATA_ALIGNMENT, buffer.size());
+
+    return write_impl(buffer);
+}
+
+hailo_status InputStreamBase::write(const void *buffer, size_t size)
+{
+    return write(MemoryView::create_const(buffer, size));
 }
 
 hailo_status InputStreamBase::write_async(BufferPtr buffer, const TransferDoneCallback &user_callback)
@@ -37,19 +58,15 @@ hailo_status InputStreamBase::write_async(BufferPtr buffer, const TransferDoneCa
     auto wrapped_callback = [buffer, user_callback](hailo_status status) {
         user_callback(CompletionInfo{status, buffer->data(), buffer->size()});
     };
-    return write_async(TransferRequest{MemoryView(*buffer), wrapped_callback, buffer});
+    return write_async(TransferRequest{ buffer, wrapped_callback});
 }
 
 hailo_status InputStreamBase::write_async(const MemoryView &buffer, const TransferDoneCallback &user_callback)
 {
-    CHECK_ARG_NOT_NULL(buffer.data());
-    CHECK(buffer.size() == get_frame_size(), HAILO_INVALID_ARGUMENT, "Write size {} must be frame size {}", buffer.size(),
-        get_frame_size());
+    auto dma_able_buffer = create_dma_able_buffer_from_user_size(const_cast<uint8_t*>(buffer.data()), buffer.size());
+    CHECK_EXPECTED_AS_STATUS(dma_able_buffer);
 
-    auto wrapped_callback = [buffer, user_callback](hailo_status status) {
-        user_callback(CompletionInfo{status, const_cast<uint8_t*>(buffer.data()), buffer.size()});
-    };
-    return write_async(TransferRequest{buffer, wrapped_callback});
+    return write_async(dma_able_buffer.release(), user_callback);
 }
 
 hailo_status InputStreamBase::write_async(const void *buffer, size_t size, const TransferDoneCallback &user_callback)
@@ -73,11 +90,37 @@ bool InputStreamBase::is_scheduled()
     return false;
 }
 
+// TODO - HRT-11739 - remove vdevice related members/functions (get/set_vdevice_core_op_handle)
+vdevice_core_op_handle_t InputStreamBase::get_vdevice_core_op_handle()
+{
+    LOGGER__WARNING("VDevice InputStream::get_vedvice_core_op_handle is not implemented for this class.");
+    return INVALID_CORE_OP_HANDLE;
+}
+
+void InputStreamBase::set_vdevice_core_op_handle(vdevice_core_op_handle_t /*core_op_handle*/)
+{
+    LOGGER__WARNING("VDevice InputStream::set_vedvice_core_op_handle is not implemented for this class.");
+}
+
 OutputStreamBase::OutputStreamBase(const LayerInfo &layer_info, const hailo_stream_info_t &stream_info,
         const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config, const EventPtr &core_op_activated_event) :
     m_nn_stream_config(nn_stream_config), m_layer_info(layer_info), m_core_op_activated_event(core_op_activated_event)
 {
     m_stream_info = stream_info;
+    m_quant_infos = m_layer_info.quant_infos;
+}
+
+hailo_status OutputStreamBase::read(MemoryView buffer)
+{
+    CHECK(buffer.size() == get_frame_size(), HAILO_INVALID_ARGUMENT, "Read size {} must be {}", buffer.size(),
+        get_frame_size());
+
+    return read_impl(buffer);
+}
+
+hailo_status OutputStreamBase::read(void *buffer, size_t size)
+{
+    return read(MemoryView(buffer, size));
 }
 
 hailo_status OutputStreamBase::read_async(BufferPtr buffer, const TransferDoneCallback &user_callback)
@@ -90,7 +133,7 @@ hailo_status OutputStreamBase::read_async(BufferPtr buffer, const TransferDoneCa
     auto wrapped_callback = [buffer, user_callback](hailo_status status) {
         user_callback(CompletionInfo{status, const_cast<uint8_t*>(buffer->data()), buffer->size()});
     };
-    return read_async(TransferRequest{MemoryView(*buffer), wrapped_callback, buffer});
+    return read_async(TransferRequest{buffer, wrapped_callback});
 }
 
 hailo_status OutputStreamBase::read_async(MemoryView buffer, const TransferDoneCallback &user_callback)
@@ -102,7 +145,11 @@ hailo_status OutputStreamBase::read_async(MemoryView buffer, const TransferDoneC
     auto wrapped_callback = [buffer, user_callback](hailo_status status) {
         user_callback(CompletionInfo{status, const_cast<uint8_t*>(buffer.data()), buffer.size()});
     };
-    return read_async(TransferRequest{buffer, wrapped_callback});
+
+    auto dma_able_buffer = create_dma_able_buffer_from_user_size(buffer.data(), buffer.size());
+    CHECK_EXPECTED_AS_STATUS(dma_able_buffer);
+
+    return read_async(dma_able_buffer.release(), user_callback);
 }
 
 hailo_status OutputStreamBase::read_async(void *buffer, size_t size, const TransferDoneCallback &user_callback)
index b09340cfdce8544cc637eb821f0a6674c2283c31..c8d366355b03707caf711015b0b39007e5dd3bf0 100644 (file)
@@ -8,26 +8,28 @@
  *        "interface" (not technically an interface, but good enough). All internal input/output streams
  *        should inherit from the InputStreamBase/OutputStreamBase classes.
  *        Hence, the hierarchy is as follows:
- * 
+ *
  * InputStream                      (External "interface")
  * |-- InputStreamBase              (Base class)
- *     |-- VdmaInputStreamBase
+ *     |-- AsyncInputStreamBase
  *          |-- VdmaInputStream
- *          |-- VdmaAsyncInputStream
+ *          |-- ScheduledInputStream
  *     |-- EthernetInputStream
  *     |-- MipiInputStream
- *     |-- VDeviceInputStreamBase
- *          |-- See vdevice_stream.hpp for subclasses
- * 
+ *     |-- RemoteProcessInputStream (used for pyhailort to support fork)
+ *     |-- VDeviceNativeInputStream
+ *     |-- VDeviceInputStreamMultiplexerWrapper
  *
  * OutputStream                      (External "interface")
  * |-- OutputStreamBase              (Base class)
- *     |-- VdmaOutputStreamBase
+ *     |-- AsyncOutputStreamBase
  *          |-- VdmaOutputStream
- *          |-- VdmaAsyncOutputStream
+ *          |-- NmsOutputStream (wraps other OutputStreamBase, accumulate bbox/burst reads into frame reads).
+ *          |-- ScheduledOutputStream
  *     |-- EthernetOutputStream
- *     |-- VDeviceOutputStreamBase
- *          |-- See vdevice_stream.hpp for subclasses
+ *     |-- RemoteProcessOutputStream (used for pyhailort to support fork)
+ *     |-- VDeviceNativeOutputStream
+ *     |-- VDeviceOutputStreamMultiplexerWrapper
  **/
 
 #ifndef _STREAM_INTERNAL_HPP_
 #include "hailo/event.hpp"
 #include "hailo/hailort_common.hpp"
 
-#include "stream_common/async_common.hpp"
+#include "stream_common/transfer_common.hpp"
 #include "hef/hef_internal.hpp"
 #include "device_common/control_protocol.hpp"
 #include "hef/layer_info.hpp"
-#include "vdma/channel/boundary_channel.hpp"
-
-using device_id_t = std::string;
 
 
 namespace hailort
@@ -61,20 +60,42 @@ typedef struct hailo_mux_info_t{
     void* buffer;
 } hailo_mux_info_t;
 
-class InputStreamWrapper;
-class OutputStreamWrapper;
+
+enum class StreamBufferMode {
+    // The buffer mode is not determined yet.
+    // It will be set automatically based on the functions call (For example, calling write_async on input stream force
+    // usage of NOT_OWNING mode) or manually by calling set_stream_mode()
+    NOT_SET,
+
+    // The buffer is owned by the stream. On each write/read call we copy the buffer into/from the stream buffer.
+    OWNING,
+
+    // The buffer is owned by the user. On each write_async/read_async call, we launch the transfer directly on the
+    // user buffer.
+    NOT_OWNING
+};
 
 class InputStreamBase : public InputStream
 {
 public:
     virtual ~InputStreamBase() = default;
 
+    // Manually set the buffer mode, fails if the mode was already set (and different from buffer_mode)
+    virtual hailo_status set_buffer_mode(StreamBufferMode buffer_mode) = 0;
+
     virtual const CONTROL_PROTOCOL__nn_stream_config_t &get_nn_stream_config()
     {
         return m_nn_stream_config;
     };
 
-    virtual hailo_status send_pending_buffer(const device_id_t &device_id)
+    const LayerInfo& get_layer_info()
+    {
+        return m_layer_info;
+    };
+
+    // Use by the scheduler to launch the transfer on the given activated device.
+    // TODO HRT-11679: remove this.
+    virtual hailo_status launch_transfer(const device_id_t &device_id)
     {
         (void)device_id;
         return HAILO_INVALID_OPERATION;
@@ -85,25 +106,55 @@ public:
         return make_unexpected(HAILO_INVALID_OPERATION);
     }
 
-    virtual Expected<size_t> get_pending_frames_count() const
+    const std::vector<hailo_quant_info_t> &get_quant_infos() const
     {
-        return make_unexpected(HAILO_INVALID_OPERATION);
+        return m_quant_infos;
     }
 
+    virtual hailo_status write(const MemoryView &buffer) override final;
+    virtual hailo_status write(const void *buffer, size_t size) override final;
+
+    virtual hailo_status write_impl(const MemoryView &buffer) = 0;
+
     virtual hailo_status write_async(BufferPtr buffer, const TransferDoneCallback &user_callback) override final;
     virtual hailo_status write_async(const MemoryView &buffer, const TransferDoneCallback &user_callback) override final;
     virtual hailo_status write_async(const void *buffer, size_t size, const TransferDoneCallback &user_callback) override final;
 
     virtual hailo_status write_async(TransferRequest &&transfer_request);
 
+    virtual EventPtr &get_core_op_activated_event() override;
+    virtual bool is_scheduled() override;
+
+    virtual hailo_status activate_stream() = 0;
+    virtual hailo_status deactivate_stream() = 0;
+
+    using ProcessingCompleteCallback = std::function<void()>;
+    virtual hailo_status register_interrupt_callback(const ProcessingCompleteCallback &)
+    {
+        return HAILO_INVALID_OPERATION;
+    }
+
+    virtual void notify_all()
+    {
+        // Do nothing, override on subclass if notify is needed.
+    }
+
+    virtual vdevice_core_op_handle_t get_vdevice_core_op_handle();
+
+    virtual void set_vdevice_core_op_handle(vdevice_core_op_handle_t core_op_handle);
+
     CONTROL_PROTOCOL__nn_stream_config_t m_nn_stream_config;
 
 protected:
     explicit InputStreamBase(const LayerInfo &layer_info, hailo_stream_interface_t stream_interface,
-        EventPtr &&core_op_activated_event, hailo_status &status) :
+        EventPtr core_op_activated_event, hailo_status &status) :
+        m_layer_info(layer_info),
         m_core_op_activated_event(std::move(core_op_activated_event))
     {
-        m_stream_info = LayerInfoUtils::get_stream_info_from_layer_info(layer_info);
+        const auto &stream_infos = LayerInfoUtils::get_stream_infos_from_layer_info(layer_info);
+        assert(1 == stream_infos.size());
+        m_stream_info = stream_infos[0];
+        m_quant_infos = layer_info.quant_infos;
 
         auto max_periph_bytes_from_hef = HefConfigurator::max_periph_bytes_value(stream_interface);
         if (HAILO_SUCCESS != max_periph_bytes_from_hef.status()) {
@@ -124,30 +175,27 @@ protected:
         status = HAILO_SUCCESS;
     }
 
-    InputStreamBase(const hailo_stream_info_t &stream_info,
-        const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config, const EventPtr &core_op_activated_event);
-
-    virtual EventPtr &get_core_op_activated_event() override;
-    virtual bool is_scheduled() override;
+    LayerInfo m_layer_info;
 
 private:
-    friend class InputStreamWrapper;
 
     EventPtr m_core_op_activated_event;
 };
 
-
 class OutputStreamBase : public OutputStream
 {
 public:
     virtual ~OutputStreamBase() = default;
 
+    // Manually set the buffer mode, fails if the mode was already set (and different from buffer_mode)
+    virtual hailo_status set_buffer_mode(StreamBufferMode buffer_mode) = 0;
+
     virtual const CONTROL_PROTOCOL__nn_stream_config_t &get_nn_stream_config()
     {
         return m_nn_stream_config;
     };
 
-    virtual const LayerInfo& get_layer_info() override
+    const LayerInfo& get_layer_info()
     {
         return m_layer_info;
     };
@@ -157,31 +205,53 @@ public:
         return make_unexpected(HAILO_INVALID_OPERATION);
     }
 
-    virtual Expected<size_t> get_pending_frames_count() const
+    const std::vector<hailo_quant_info_t> &get_quant_infos() const override
     {
-        return make_unexpected(HAILO_INVALID_OPERATION);
+        return m_quant_infos;
     }
 
-    virtual hailo_status set_next_device_to_read(const device_id_t &device_id)
+    // Use by the scheduler to launch the transfer on the given activated device.
+    // TODO HRT-11679: remove this.
+    virtual hailo_status launch_transfer(const device_id_t &device_id)
     {
         (void)device_id;
         return HAILO_INVALID_OPERATION;
     }
 
+    virtual hailo_status read(MemoryView buffer) override;
+    virtual hailo_status read(void *buffer, size_t size) override;
+
+    virtual hailo_status read_impl(MemoryView buffer) = 0;
+
     virtual hailo_status read_async(BufferPtr buffer, const TransferDoneCallback &user_callback) override final;
     virtual hailo_status read_async(MemoryView buffer, const TransferDoneCallback &user_callback) override final;
     virtual hailo_status read_async(void *buffer, size_t size, const TransferDoneCallback &user_callback) override final;
 
     virtual hailo_status read_async(TransferRequest &&transfer_request);
 
+    virtual EventPtr &get_core_op_activated_event() override;
+    virtual bool is_scheduled() override;
+
+    virtual hailo_status activate_stream() = 0;
+    virtual hailo_status deactivate_stream() = 0;
+
+    using ProcessingCompleteCallback = std::function<void()>;
+    virtual hailo_status register_interrupt_callback(const ProcessingCompleteCallback &)
+    {
+        return HAILO_INVALID_OPERATION;
+    }
+
     CONTROL_PROTOCOL__nn_stream_config_t m_nn_stream_config;
 
 protected:
     explicit OutputStreamBase(const LayerInfo &layer_info, hailo_stream_interface_t stream_interface,
-        EventPtr &&core_op_activated_event, hailo_status &status) :
+        EventPtr core_op_activated_event, hailo_status &status) :
         m_layer_info(layer_info), m_core_op_activated_event(std::move(core_op_activated_event))
     {
-        m_stream_info = LayerInfoUtils::get_stream_info_from_layer_info(m_layer_info);
+        const auto &stream_infos = LayerInfoUtils::get_stream_infos_from_layer_info(layer_info);
+        assert(1 == stream_infos.size());
+        m_stream_info = stream_infos[0];
+        m_quant_infos = m_layer_info.quant_infos;
 
         auto max_periph_bytes_from_hef = HefConfigurator::max_periph_bytes_value(stream_interface);
         if (HAILO_SUCCESS != max_periph_bytes_from_hef.status()) {
@@ -204,14 +274,9 @@ protected:
     OutputStreamBase(const LayerInfo &layer_info, const hailo_stream_info_t &stream_info,
         const CONTROL_PROTOCOL__nn_stream_config_t &nn_stream_config, const EventPtr &core_op_activated_event);
 
-    virtual EventPtr &get_core_op_activated_event() override;
-    virtual bool is_scheduled() override;
-
     LayerInfo m_layer_info;
 
 private:
-    friend class OutputStreamWrapper;
-
     EventPtr m_core_op_activated_event;
 };
 
diff --git a/hailort/libhailort/src/stream_common/transfer_common.cpp b/hailort/libhailort/src/stream_common/transfer_common.cpp
new file mode 100644 (file)
index 0000000..06abb10
--- /dev/null
@@ -0,0 +1,125 @@
+/**
+ * Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file transfer_common.cpp
+ **/
+
+#include "transfer_common.hpp"
+#include "vdma/memory/mapped_buffer.hpp"
+
+namespace hailort
+{
+
+
+TransferBuffer::TransferBuffer() :
+    m_base_buffer(nullptr),
+    m_size(0),
+    m_offset(0)
+{}
+
+TransferBuffer::TransferBuffer(BufferPtr base_buffer, size_t size, size_t offset) :
+    m_base_buffer(std::move(base_buffer)),
+    m_size(size),
+    m_offset(offset)
+{
+    assert(m_size <= m_base_buffer->size());
+    assert(m_offset < m_base_buffer->size());
+}
+
+TransferBuffer::TransferBuffer(BufferPtr base_buffer)
+    : TransferBuffer(base_buffer, base_buffer->size(), 0)
+{}
+
+Expected<vdma::MappedBufferPtr> TransferBuffer::map_buffer(HailoRTDriver &driver, HailoRTDriver::DmaDirection direction)
+{
+    CHECK_AS_EXPECTED(m_base_buffer->storage().type() == BufferStorage::Type::DMA, HAILO_INVALID_ARGUMENT,
+        "Buffer must be dma-able (provided buffer type {})", static_cast<int>(m_base_buffer->storage().type()));
+
+    // Map if not already mapped
+    auto is_new_mapping_exp = m_base_buffer->storage().dma_map(driver, to_hailo_dma_direction(direction));
+    CHECK_EXPECTED(is_new_mapping_exp);
+
+    return m_base_buffer->storage().get_dma_mapped_buffer(driver.device_id());
+}
+
+hailo_status TransferBuffer::copy_to(MemoryView buffer)
+{
+    CHECK(buffer.size() == m_size, HAILO_INTERNAL_FAILURE, "buffer size {} must be {}", buffer.size(), m_size);
+
+    auto continuous_parts = get_continuous_parts();
+    memcpy(buffer.data(), continuous_parts.first.data(), continuous_parts.first.size());
+    if (!continuous_parts.second.empty()) {
+        const size_t dest_offset = continuous_parts.first.size();
+        memcpy(buffer.data() + dest_offset, continuous_parts.second.data(), continuous_parts.second.size());
+    }
+    return HAILO_SUCCESS;
+}
+
+hailo_status TransferBuffer::copy_from(const MemoryView buffer)
+{
+    CHECK(buffer.size() == m_size, HAILO_INTERNAL_FAILURE, "buffer size {} must be {}", buffer.size(), m_size);
+
+    auto continuous_parts = get_continuous_parts();
+    memcpy(continuous_parts.first.data(), buffer.data(), continuous_parts.first.size());
+    if (!continuous_parts.second.empty()) {
+        const size_t src_offset = continuous_parts.first.size();
+        memcpy(continuous_parts.second.data(), buffer.data() + src_offset, continuous_parts.second.size());
+    }
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status TransferBuffer::synchronize(HailoRTDriver &driver, HailoRTDriver::DmaSyncDirection sync_direction)
+{
+    auto mapped_buffer = m_base_buffer->storage().get_dma_mapped_buffer(driver.device_id());
+    CHECK_EXPECTED_AS_STATUS(mapped_buffer);
+
+    auto continuous_parts = get_continuous_parts();
+
+    auto status = synchronize_part(*mapped_buffer, continuous_parts.first, sync_direction);
+    CHECK_SUCCESS(status);
+
+    if (!continuous_parts.second.empty()) {
+        status = synchronize_part(*mapped_buffer, continuous_parts.second, sync_direction);
+        CHECK_SUCCESS(status);
+    }
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status TransferBuffer::synchronize_part(vdma::MappedBufferPtr &mapped_buffer, MemoryView continuous_part,
+    HailoRTDriver::DmaSyncDirection sync_direction)
+{
+    assert(!continuous_part.empty());
+    assert(continuous_part.data() >= m_base_buffer->data());
+
+    return mapped_buffer->synchronize(continuous_part.data() - m_base_buffer->data(), continuous_part.size(),
+        sync_direction);
+}
+
+bool TransferBuffer::is_wrap_around() const
+{
+    return (m_offset + m_size) > m_base_buffer->size();
+}
+
+std::pair<MemoryView, MemoryView> TransferBuffer::get_continuous_parts()
+{
+    if (is_wrap_around()) {
+        const auto size_to_end = m_base_buffer->size() - m_offset;
+        assert(size_to_end < m_size);
+        return std::make_pair(
+            MemoryView(m_base_buffer->data() + m_offset, size_to_end),
+            MemoryView(m_base_buffer->data(), m_size - size_to_end)
+        );
+
+    } else {
+        return std::make_pair(
+            MemoryView(m_base_buffer->data() + m_offset, m_size),
+            MemoryView()
+        );
+    }
+}
+
+} /* namespace hailort */
diff --git a/hailort/libhailort/src/stream_common/transfer_common.hpp b/hailort/libhailort/src/stream_common/transfer_common.hpp
new file mode 100644 (file)
index 0000000..77e7366
--- /dev/null
@@ -0,0 +1,73 @@
+/**
+ * Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file transfer_common.hpp
+ * @brief Common types/functions for async api
+ **/
+
+#ifndef _HAILO_TRANSFER_COMMON_HPP_
+#define _HAILO_TRANSFER_COMMON_HPP_
+
+#include "hailo/stream.hpp"
+#include "hailo/buffer.hpp"
+
+#include "os/hailort_driver.hpp"
+
+namespace hailort
+{
+
+// Contains buffer that can be transferred. The buffer can be circular -
+// It relies at [m_offset, m_base_buffer.size()) and [0, m_base_buffer.size() - m_size).
+class TransferBuffer final {
+public:
+
+    TransferBuffer();
+    TransferBuffer(BufferPtr base_buffer);
+    TransferBuffer(BufferPtr base_buffer, size_t size, size_t offset);
+
+    BufferPtr base_buffer() { return m_base_buffer; }
+    size_t offset() const { return m_offset; }
+    size_t size() const { return m_size; }
+
+    Expected<vdma::MappedBufferPtr> map_buffer(HailoRTDriver &driver, HailoRTDriver::DmaDirection direction);
+
+    hailo_status copy_to(MemoryView buffer);
+    hailo_status copy_from(const MemoryView buffer);
+
+    // Sync the buffer to the given direction, fails if the buffer is not mapped.
+    hailo_status synchronize(HailoRTDriver &driver, HailoRTDriver::DmaSyncDirection sync_direction);
+
+private:
+
+    // Sync a signal continuous part
+    hailo_status synchronize_part(vdma::MappedBufferPtr &mapped_buffer, MemoryView continuous_part,
+        HailoRTDriver::DmaSyncDirection sync_direction);
+
+    bool is_wrap_around() const;
+
+    // Returns the continuous parts of the buffer.
+    // There are 2 cases:
+    //      1. If the buffer is_wrap_around(), both parts are valid, the first one starts at m_offset until
+    //         m_base_buffer end
+    //         The second part is the residue, starting from offset 0.
+    //      2. If the buffer is not circular, the first part will contain the buffer, the second will point to nullptr.
+    std::pair<MemoryView, MemoryView> get_continuous_parts();
+
+    BufferPtr m_base_buffer;
+    size_t m_size;
+    size_t m_offset;
+};
+
+// Internal function, wrapper to the user callbacks, accepts the callback status as an argument.
+using InternalTransferDoneCallback = std::function<void(hailo_status)>;
+
+struct TransferRequest {
+    TransferBuffer buffer;
+    InternalTransferDoneCallback callback;
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_TRANSFER_COMMON_HPP_ */
index 54e520850572775bdfc46fc23327a8f17cfd6664..ed12efb571b6810b9e6d6c0b8a2faa01c64f5622 100644 (file)
@@ -13,6 +13,7 @@
 #include "hailo/hailort_common.hpp"
 #include "hailo/quantization.hpp"
 #include "hailo/hailort_defaults.hpp"
+#include "net_flow/ops/nms_post_process.hpp"
 
 #include "common/compiler_extensions_compat.hpp"
 #include "common/logger_macros.hpp"
 namespace hailort
 {
 
-#define HW_DATA_ALIGNMENT (8)
 #define RGB_FEATURES (3)
 
 
-bool TransformContextUtils::should_quantize(const hailo_stream_direction_t stream_direction, 
-    const hailo_format_t &src_format, const hailo_format_t &dst_format, const hailo_quant_info_t &quant_info)
+bool TransformContextUtils::should_quantize_by_flags(const hailo_stream_direction_t stream_direction,
+    const hailo_format_flags_t &src_format_flags, const hailo_format_flags_t &dst_format_flags)
+{
+    return (HAILO_H2D_STREAM == stream_direction) ?
+        (!(HAILO_FORMAT_FLAGS_QUANTIZED & src_format_flags) && (HAILO_FORMAT_FLAGS_QUANTIZED & dst_format_flags)) :
+        ((HAILO_FORMAT_FLAGS_QUANTIZED & src_format_flags) && !(HAILO_FORMAT_FLAGS_QUANTIZED & dst_format_flags));
+}
+
+Expected<bool> TransformContextUtils::should_quantize_by_type(const hailo_stream_direction_t stream_direction,
+    const hailo_format_type_t &src_format_type, const hailo_format_type_t &dst_format_type)
 {
     if (HAILO_H2D_STREAM == stream_direction) {
-        return (!(HAILO_FORMAT_FLAGS_QUANTIZED & src_format.flags) &&
-            (HAILO_FORMAT_FLAGS_QUANTIZED & dst_format.flags) &&
-            !((Quantization::is_identity_qp(quant_info)) && (src_format.type == dst_format.type)));
+        CHECK_AS_EXPECTED(HAILO_FORMAT_TYPE_FLOAT32 != dst_format_type, HAILO_INVALID_ARGUMENT,
+            "dst type cant be {} on input quantization", HailoRTCommon::get_format_type_str(HAILO_FORMAT_TYPE_FLOAT32));
+        CHECK_AS_EXPECTED(!((HAILO_FORMAT_TYPE_UINT8 == dst_format_type) && (HAILO_FORMAT_TYPE_UINT16 == src_format_type)),
+            HAILO_INVALID_ARGUMENT, "src type is {}, while the model compiled for type {}. Input quantization is impossible with this src type.",
+            HailoRTCommon::get_format_type_str(HAILO_FORMAT_TYPE_UINT16), HailoRTCommon::get_format_type_str(HAILO_FORMAT_TYPE_UINT8));
+        if ((src_format_type == HAILO_FORMAT_TYPE_UINT8) && (dst_format_type == HAILO_FORMAT_TYPE_UINT16)) {
+            LOGGER__WARNING("src type is {}, while the model compiled for type {}. libhailort will type-cast every value which might reduce performance. Consider recompiling the model.",
+                HailoRTCommon::get_format_type_str(HAILO_FORMAT_TYPE_UINT8), HailoRTCommon::get_format_type_str(HAILO_FORMAT_TYPE_UINT16));
+            return true;
+        }
+        return ((src_format_type != HAILO_FORMAT_TYPE_AUTO) && (dst_format_type != src_format_type));
     } else {
-        return (HAILO_FORMAT_FLAGS_QUANTIZED & src_format.flags) && 
-            !(HAILO_FORMAT_FLAGS_QUANTIZED & dst_format.flags);
+        CHECK_AS_EXPECTED(HAILO_FORMAT_TYPE_FLOAT32 != src_format_type, HAILO_INVALID_ARGUMENT,
+            "src type cant be {} on output de-quantization", HailoRTCommon::get_format_type_str(HAILO_FORMAT_TYPE_FLOAT32));
+        CHECK_AS_EXPECTED(!((HAILO_FORMAT_TYPE_UINT8 == dst_format_type) && (HAILO_FORMAT_TYPE_UINT16 == src_format_type)),
+            HAILO_INVALID_ARGUMENT, "The model compiled for type {}, while the dst type is {}. Output de-quantization is impossible to this dst type",
+            HailoRTCommon::get_format_type_str(HAILO_FORMAT_TYPE_UINT16), HailoRTCommon::get_format_type_str(HAILO_FORMAT_TYPE_UINT8));
+        if ((src_format_type == HAILO_FORMAT_TYPE_UINT8) && (dst_format_type == HAILO_FORMAT_TYPE_UINT16)) {
+            LOGGER__WARNING("The model compiled for type {}, while the dst type is {}. libhailort will type-cast every value which might reduce performance. Consider recompiling the model.",
+                HailoRTCommon::get_format_type_str(HAILO_FORMAT_TYPE_UINT8), HailoRTCommon::get_format_type_str(HAILO_FORMAT_TYPE_UINT16));
+            return true;
+        }
+        return ((dst_format_type != HAILO_FORMAT_TYPE_AUTO) && (dst_format_type != src_format_type));
+    }
+}
+
+Expected<bool> TransformContextUtils::should_quantize(const hailo_stream_direction_t stream_direction, 
+    const hailo_format_t &src_format, const hailo_format_t &dst_format, const std::vector<hailo_quant_info_t> &quant_infos)
+{
+    auto should_quantize_by_flags = TransformContextUtils::should_quantize_by_flags(stream_direction, src_format.flags, dst_format.flags);
+    auto should_quantize_by_type = TransformContextUtils::should_quantize_by_type(stream_direction, src_format.type, dst_format.type);
+    CHECK_EXPECTED(should_quantize_by_type);
+
+    if (should_quantize_by_type.value() != should_quantize_by_flags) {
+        auto direction_str = (HAILO_H2D_STREAM == stream_direction) ? "H2D" : "D2H";
+        auto quantization_by_type_needed_str = (should_quantize_by_type.value()) ? "" : "not ";
+        LOGGER__WARNING(
+            "{} stream is marked as quantized={}, but according to format types (src={}, dst={}), quantization is {}needed. Usage of HAILO_FORMAT_FLAGS_QUANTIZED is deprecated and will be ignored.",
+            direction_str, !should_quantize_by_flags, HailoRTCommon::get_format_type_str(src_format.type), HailoRTCommon::get_format_type_str(dst_format.type),
+            quantization_by_type_needed_str);
+    }
+
+    if (HAILO_H2D_STREAM == stream_direction) {
+        return (should_quantize_by_type.value() && !((are_all_quant_infos_identity(quant_infos)) && (src_format.type == dst_format.type)));
+    } else {
+        return should_quantize_by_type;
     }
 }
 
@@ -52,13 +100,11 @@ bool TransformContextUtils::should_transpose(const hailo_format_flags_t &src_fla
 bool TransformContextUtils::should_reorder(const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
     const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format)
 {
-
     /* If shapes and format are different - need to use transform_context */
     if  (!((src_image_shape.features        == dst_image_shape.features) &&
            (src_image_shape.height          == dst_image_shape.height)   && 
            (src_image_shape.width           == dst_image_shape.width)    &&
-           (src_format.order                == dst_format.order)         &&
-           (src_format.type                 == dst_format.type))) {
+           (src_format.order                == dst_format.order))) {
         return true;
     }
 
@@ -66,53 +112,43 @@ bool TransformContextUtils::should_reorder(const hailo_3d_image_shape_t &src_ima
     Note: In order to add new order to the list - add test to test_transform with all shapes and types same 
     pre and post transform */
     switch (src_format.order) {
-        case HAILO_FORMAT_ORDER_NHWC:
-        case HAILO_FORMAT_ORDER_NHCW:
-        case HAILO_FORMAT_ORDER_NC:
-        case HAILO_FORMAT_ORDER_NHW:
-        case HAILO_FORMAT_ORDER_FCR:
-        case HAILO_FORMAT_ORDER_BAYER_RGB:
-        case HAILO_FORMAT_ORDER_12_BIT_BAYER_RGB:
-        case HAILO_FORMAT_ORDER_YUY2:
-            return false;
+        // Orders that are supported both on host and hw sides, and where transformation is still needed when shapes are equals
         case HAILO_FORMAT_ORDER_F8CR:
         case HAILO_FORMAT_ORDER_HAILO_NMS:
-        case HAILO_FORMAT_ORDER_RGB888:
-        case HAILO_FORMAT_ORDER_NCHW:
-        case HAILO_FORMAT_ORDER_NV12:
-        case HAILO_FORMAT_ORDER_NV21:
             return true;
         default:
-            LOGGER__WARN("Hailo Internal warning - Unrecognised order. Transformation optimization would not be activated");
-            /* In case user asks to add new order - please add this order to one of the true or false lists */
-            assert(false);
-            return true;
+            return false;
     }
 }
 
-bool TransformContextUtils::is_transformation_required(const hailo_stream_direction_t stream_direction,
+Expected<bool> TransformContextUtils::is_transformation_required(const hailo_stream_direction_t stream_direction,
     const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
-    const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, const hailo_quant_info_t &quant_info)
+    const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, const std::vector<hailo_quant_info_t> &quant_infos)
 {
+    if (quant_infos.size() == 1) {
+        CHECK_AS_EXPECTED(Quantization::is_qp_valid(quant_infos.at(0)), HAILO_INVALID_ARGUMENT,
+            "quant_info is invalid as the model was compiled with multiple quant_infos. Please compile again or provide a vector of quant_infos.");
+    }
     /* This function should be called after auto expend function */
     assert((HAILO_FORMAT_ORDER_AUTO != src_format.order) && (HAILO_FORMAT_ORDER_AUTO != dst_format.order));
     assert((HAILO_FORMAT_TYPE_AUTO != src_format.type) && (HAILO_FORMAT_TYPE_AUTO != dst_format.type));
 
-    return (should_quantize(stream_direction, src_format, dst_format, quant_info) ||
-        should_transpose(src_format.flags, dst_format.flags) ||
+    auto should_quantize_exp = should_quantize(stream_direction, src_format, dst_format, quant_infos);
+    CHECK_EXPECTED(should_quantize_exp);
+
+    return (*should_quantize_exp || should_transpose(src_format.flags, dst_format.flags) ||
         should_reorder(src_image_shape, src_format, dst_image_shape, dst_format));
 }
 
 std::string TransformContextUtils::make_quantization_description(hailo_format_type_t src_type,
-    hailo_format_type_t dst_type, hailo_quant_info_t quant_info)
+    hailo_format_type_t dst_type, const std::vector<hailo_quant_info_t> &quant_infos)
 {
     std::stringstream quant_description;
     quant_description << "Quantization - src_type: " << HailoRTCommon::get_format_type_str(src_type) <<
-        ", dst_type " << HailoRTCommon::get_format_type_str(dst_type) <<
-        ", qp_scale: " << quant_info.qp_scale <<
-        ", qp_zp: " << quant_info.qp_zp <<
-        ", limvals_min: " << quant_info.limvals_min <<
-        ", limvals_max: " << quant_info.limvals_max;
+        ", dst_type " << HailoRTCommon::get_format_type_str(dst_type);
+
+    quant_description <<", limvals_min: " << quant_infos[0].limvals_min <<
+        ", limvals_max: " << quant_infos[0].limvals_max;
 
     return quant_description.str();
 }
@@ -139,6 +175,16 @@ std::string TransformContextUtils::make_transpose_description(hailo_3d_image_sha
     return transpose_description.str();
 }
 
+bool TransformContextUtils::are_all_quant_infos_identity(const std::vector<hailo_quant_info_t> &quant_infos)
+{
+    for (const auto &quant_info : quant_infos) {
+        if (!Quantization::is_identity_qp(quant_info)) {
+            return false;
+        }
+    }
+    return true;
+}
+
 template<typename T, typename Q>
 void cast_elements_inplace(T *dst_ptr, uint32_t frame_size)
 {
@@ -148,6 +194,15 @@ void cast_elements_inplace(T *dst_ptr, uint32_t frame_size)
     }
 }
 
+template<typename T, typename Q>
+void cast_elements(const Q *src_ptr, T *dst_ptr, uint32_t frame_size)
+{
+    static_assert(sizeof(T) >= sizeof(Q), "cast_elements() cannot cast to smaller size");
+    for (uint32_t i = 0; i < frame_size; i++) {
+        dst_ptr[i] = (T)(*((Q*)src_ptr + i));
+    }
+}
+
 /* Transpose funcs */
 static hailo_3d_image_shape_t transposed_shape(const hailo_3d_image_shape_t &shape)
 {
@@ -372,6 +427,35 @@ void transform__h2d_NHWC_to_NHCW(const T *src_ptr, hailo_3d_image_shape_t *src_i
     }
 }
 
+template<typename T>
+void transform__h2d_NHCW_to_NHCW(const T *src_ptr, hailo_3d_image_shape_t *src_image_shape,
+    T *dst_ptr, hailo_3d_image_shape_t *dst_image_shape)
+{
+    /* Validate arguments */
+    ASSERT(NULL != src_ptr);
+    ASSERT(NULL != dst_ptr);
+
+    size_t src_frame_offset = 0;
+    size_t dst_frame_offset = 0;
+    uint32_t pad_size = dst_image_shape->width - src_image_shape->width;
+
+    /* Copy data while considering padding */
+    for (uint32_t r = 0; r < src_image_shape->height; r++) {
+        for (uint32_t f = 0; f < src_image_shape->features; f++) {
+            for (uint32_t c = 0; c < src_image_shape->width; c++) {
+                src_frame_offset = r * src_image_shape->width * src_image_shape->features + f * src_image_shape->width + c;
+                dst_frame_offset = r * dst_image_shape->width * dst_image_shape->features + f * dst_image_shape->width + c;
+                dst_ptr[dst_frame_offset] = src_ptr[src_frame_offset];
+            }
+            /* pad width to the specified width */
+            if (pad_size > 0) {
+                dst_frame_offset = r * dst_image_shape->width * dst_image_shape->features + f * dst_image_shape->width + src_image_shape->width;
+                memset(dst_ptr + dst_frame_offset, 0, pad_size * sizeof(T));
+            }
+        }
+    }
+}
+
 template<typename T>
 void transform__d2h_NHCW_to_NHWC(const T *src_ptr, hailo_3d_image_shape_t *src_image_shape,
     T *dst_ptr, hailo_3d_image_shape_t *dst_image_shape)
@@ -434,15 +518,6 @@ void transform__d2h_NC_to_NC(const T *src_ptr, T *dst_ptr, hailo_3d_image_shape_
     memcpy(dst_ptr, src_ptr, dst_image_shape->features * sizeof(T));
 }
 
-static inline void transform__parse_and_copy_bbox (hailo_bbox_t *dst, uint64_t* proposal)
-{
-    dst->y_min = (uint16_t)((*((uint64_t*)proposal) & 0xfff000000000) >> 36);
-    dst->x_min = (uint16_t)((*((uint64_t*)proposal) & 0xfff000000) >> 24);
-    dst->y_max = (uint16_t)((*((uint64_t*)proposal) & 0xfff000) >> 12);
-    dst->x_max = (uint16_t)((*((uint64_t*)proposal) & 0xfff));
-    dst->score = (uint16_t)((*((uint64_t*)proposal) & 0xffff000000000000) >> 48);
-}
-
 void transform__d2h_NMS(const uint8_t *src_ptr, uint8_t *dst_ptr, const hailo_nms_info_t &nms_info, std::vector<size_t> &chunk_offsets)
 {
     /* Validate arguments */
@@ -492,7 +567,7 @@ void transform__d2h_NMS(const uint8_t *src_ptr, uint8_t *dst_ptr, const hailo_nm
             src_offset += sizeof(nms_bbox_counter_t);
 
             for (bbox_index = 0; bbox_index < class_bboxes_count; bbox_index++) {
-                transform__parse_and_copy_bbox((hailo_bbox_t *)(dst_ptr + dst_offset), (uint64_t*)(src_ptr + src_offset));
+                net_flow::NmsPostProcessOp::transform__parse_and_copy_bbox((hailo_bbox_t *)(dst_ptr + dst_offset), (uint64_t*)(src_ptr + src_offset));
                 src_offset += bbox_size;
                 dst_offset += sizeof(hailo_bbox_t);
             }
@@ -535,6 +610,7 @@ void transform__h2d_F8CR(const T *src_ptr, hailo_3d_image_shape_t *src_image_sha
     /* Validate arguments */
     ASSERT(NULL != src_ptr);
     ASSERT(NULL != dst_ptr);
+    ASSERT(0 == (dst_image_shape->features % HW_DATA_ALIGNMENT));
 
     uint32_t src_row_size = src_image_shape->width * src_image_shape->features;
     uint32_t dst_row_size = dst_image_shape->width * dst_image_shape->features;
@@ -665,7 +741,7 @@ hailo_status transform__h2d_NCHW_to_NHCW(
           "NCHW_to_NHCW Transform height src/dst should be the same");
     CHECK(src_image_shape->width <= dst_image_shape->width, HAILO_INVALID_ARGUMENT,
           "NCHW_to_NHCW Transform src width should be smaller/equal than dst width");
-    CHECK((dst_image_shape->width % HW_DATA_ALIGNMENT) == 0, HAILO_INVALID_ARGUMENT,
+    CHECK(((dst_image_shape->width * sizeof(T)) % HW_DATA_ALIGNMENT) == 0, HAILO_INVALID_ARGUMENT,
           "NCHW_to_NHCW Transform dst width must be aligned to {}", HW_DATA_ALIGNMENT);
 
     size_t width_size = src_image_shape->width;
@@ -690,41 +766,6 @@ hailo_status transform__h2d_NCHW_to_NHCW(
     return HAILO_SUCCESS;
 }
 
-template<typename T>
-hailo_status transform__d2h_NHCW_to_NCHW(
-    const T *src_ptr, hailo_3d_image_shape_t *src_image_shape,
-    T *dst_ptr, hailo_3d_image_shape_t *dst_image_shape)
-{
-    /* Validate arguments */
-    ASSERT(NULL != src_ptr);
-    ASSERT(NULL != dst_ptr);
-    CHECK(src_image_shape->features == dst_image_shape->features, HAILO_INVALID_ARGUMENT,
-          "NCHW_to_NHCW Transform features src/dst should be the same");
-    CHECK(src_image_shape->height == dst_image_shape->height, HAILO_INVALID_ARGUMENT,
-          "NCHW_to_NHCW Transform height src/dst should be the same");
-    CHECK(dst_image_shape->width <= src_image_shape->width, HAILO_INVALID_ARGUMENT,
-          "NCHW_to_NHCW Transform dst width should be smaller/equal than src width");
-    CHECK((src_image_shape->width % HW_DATA_ALIGNMENT) == 0, HAILO_INVALID_ARGUMENT,
-          "NCHW_to_NHCW Transform src width must be aligned to {}", HW_DATA_ALIGNMENT);
-
-    size_t width_size = dst_image_shape->width;
-    for (uint32_t r = 0; r < src_image_shape->height; r++) {
-        for (uint32_t c = 0; c < src_image_shape->features; c++) {
-            // Copy width
-            T *dst = dst_ptr +
-                dst_image_shape->width * dst_image_shape->height * c +
-                dst_image_shape->width * r;
-            const T *src = src_ptr +
-                src_image_shape->features * src_image_shape->width * r +
-                src_image_shape->width * c;
-
-            std::copy_n(src, width_size, dst);
-        }
-    }
-
-    return HAILO_SUCCESS;
-}
-
 template<typename T>
 hailo_status transform__d2h_argmax_NHCW_to_NHW(const T *src_ptr, const hailo_3d_image_shape_t &src_image_shape,
     T *dst_ptr, const hailo_3d_image_shape_t &dst_image_shape)
@@ -780,8 +821,10 @@ hailo_status transform__h2d_YUY2_to_YUY2(const T *src_ptr, T *dst_ptr, uint32_t
     /* Validate arguments */
     ASSERT(NULL != src_ptr);
     ASSERT(NULL != dst_ptr);
+    
+    auto shape_size_in_bytes = shape_size * sizeof(T);
 
-    CHECK((shape_size % HW_DATA_ALIGNMENT) == 0, HAILO_INVALID_ARGUMENT,
+    CHECK((shape_size_in_bytes % HW_DATA_ALIGNMENT) == 0, HAILO_INVALID_ARGUMENT,
           "YUY2_to_YUY2 Transform shape_size must be aligned to {}", HW_DATA_ALIGNMENT);
 
     std::copy_n(src_ptr, shape_size, dst_ptr);
@@ -860,19 +903,8 @@ hailo_status InputTransformContext::quantize_stream(const void *src_ptr, void *q
 
     switch (m_src_format.type) {
         case HAILO_FORMAT_TYPE_UINT8:
-            if (HAILO_FORMAT_TYPE_UINT8 == m_dst_format.type) {
-                Quantization::quantize_input_buffer<uint8_t, uint8_t>((uint8_t*)src_ptr, (uint8_t*)quant_buffer, shape_size, m_dst_quant_info);
-            }
-            else {
-                return HAILO_INVALID_OPERATION;
-            }
-            break;
-        case HAILO_FORMAT_TYPE_UINT16:
-            if (HAILO_FORMAT_TYPE_UINT8 == m_dst_format.type) {
-                Quantization::quantize_input_buffer<uint16_t, uint8_t>((uint16_t*)src_ptr, (uint8_t *)quant_buffer, shape_size, m_dst_quant_info);
-            }
-            else if (HAILO_FORMAT_TYPE_UINT16 == m_dst_format.type) {
-                Quantization::quantize_input_buffer<uint16_t, uint16_t>((uint16_t*)src_ptr, (uint16_t *)quant_buffer, shape_size, m_dst_quant_info);
+            if (HAILO_FORMAT_TYPE_UINT16 == m_dst_format.type) {
+                cast_elements<uint16_t, uint8_t>(static_cast<const uint8_t*>(src_ptr), static_cast<uint16_t*>(quant_buffer), shape_size);
             }
             else {
                 return HAILO_INVALID_OPERATION;
@@ -880,10 +912,10 @@ hailo_status InputTransformContext::quantize_stream(const void *src_ptr, void *q
             break;
         case HAILO_FORMAT_TYPE_FLOAT32:
             if (HAILO_FORMAT_TYPE_UINT8 == m_dst_format.type) {
-                Quantization::quantize_input_buffer<float32_t, uint8_t>((float32_t*)src_ptr, (uint8_t*)quant_buffer, shape_size, m_dst_quant_info);
+                Quantization::quantize_input_buffer<float32_t, uint8_t>((float32_t*)src_ptr, (uint8_t*)quant_buffer, shape_size, m_dst_quant_infos[0]);
             }
             else if (HAILO_FORMAT_TYPE_UINT16 == m_dst_format.type) {
-                Quantization::quantize_input_buffer<float32_t, uint16_t>((float32_t*)src_ptr, (uint16_t*)quant_buffer, shape_size, m_dst_quant_info);
+                Quantization::quantize_input_buffer<float32_t, uint16_t>((float32_t*)src_ptr, (uint16_t*)quant_buffer, shape_size, m_dst_quant_infos[0]);
             }
             else {
                 return HAILO_INVALID_OPERATION;
@@ -901,34 +933,10 @@ hailo_status FrameOutputTransformContext::quantize_stream(const void *dst_ptr)
     auto shape_size = HailoRTCommon::get_shape_size(m_dst_image_shape);
 
     switch (m_dst_format.type) {
-        case HAILO_FORMAT_TYPE_UINT8:
-            if (HAILO_FORMAT_TYPE_UINT8 == m_src_format.type) {
-                if (m_are_all_qps_the_same) {
-                    Quantization::dequantize_output_buffer_in_place<uint8_t, uint8_t>((uint8_t*)dst_ptr, shape_size, m_dst_quant_info);
-                } else {
-                    dequantize_output_by_feature<uint8_t, uint8_t>((uint8_t*)dst_ptr, shape_size, m_quant_info_per_feature, m_quant_infos_rep_count);
-                }
-            }
-            else {
-                return HAILO_INVALID_OPERATION;
-            }
-            break;
         case HAILO_FORMAT_TYPE_UINT16:
             if (HAILO_FORMAT_TYPE_UINT8 == m_src_format.type) {
-                if (m_are_all_qps_the_same) {
-                    Quantization::dequantize_output_buffer_in_place<uint16_t, uint8_t>((uint16_t*)dst_ptr, shape_size, m_dst_quant_info);
-                } else {
-                    dequantize_output_by_feature<uint16_t, uint8_t>((uint16_t*)dst_ptr, shape_size, m_quant_info_per_feature, m_quant_infos_rep_count);
-                }
-            }
-            else if (HAILO_FORMAT_TYPE_UINT16 == m_src_format.type) {
-                if (m_are_all_qps_the_same) {
-                    Quantization::dequantize_output_buffer_in_place<uint16_t, uint16_t>((uint16_t*)dst_ptr, shape_size, m_dst_quant_info);
-                } else {
-                    dequantize_output_by_feature<uint16_t, uint16_t>((uint16_t*)dst_ptr, shape_size, m_quant_info_per_feature, m_quant_infos_rep_count);
-                }
-            }
-            else {
+                cast_elements_inplace<uint16_t, uint8_t>((uint16_t*)dst_ptr, shape_size);
+            } else {
                 return HAILO_INVALID_OPERATION;
             }
             break;
@@ -937,14 +945,14 @@ hailo_status FrameOutputTransformContext::quantize_stream(const void *dst_ptr)
             if (HAILO_FORMAT_ORDER_NHW != m_dst_format.order) {
                 if (HAILO_FORMAT_TYPE_UINT8 == m_src_format.type) {
                     if (m_are_all_qps_the_same) {
-                        Quantization::dequantize_output_buffer_in_place<float32_t, uint8_t>((float32_t*)dst_ptr, shape_size, m_dst_quant_info);
+                        Quantization::dequantize_output_buffer_in_place<float32_t, uint8_t>((float32_t*)dst_ptr, shape_size, m_dst_quant_infos[0]);
                     } else {
                         dequantize_output_by_feature<float32_t, uint8_t>((float32_t*)dst_ptr, shape_size, m_quant_info_per_feature, m_quant_infos_rep_count);
                     }
                 }
                 else if (HAILO_FORMAT_TYPE_UINT16 == m_src_format.type) {
                     if (m_are_all_qps_the_same) {
-                        Quantization::dequantize_output_buffer_in_place<float32_t, uint16_t>((float32_t*)dst_ptr, shape_size, m_dst_quant_info);
+                        Quantization::dequantize_output_buffer_in_place<float32_t, uint16_t>((float32_t*)dst_ptr, shape_size, m_dst_quant_infos[0]);
                     } else {
                         dequantize_output_by_feature<float32_t, uint16_t>((float32_t*)dst_ptr, shape_size, m_quant_info_per_feature, m_quant_infos_rep_count);
                     }
@@ -991,6 +999,22 @@ hailo_status reorder_input_stream(const void *src_ptr, hailo_3d_image_shape_t sr
         return HAILO_SUCCESS;
     }
 
+    if ((HAILO_FORMAT_ORDER_NHCW == src_format.order) &&
+        (HAILO_FORMAT_ORDER_NHCW == dst_format.order)) {
+        switch (dst_format.type) {
+            case HAILO_FORMAT_TYPE_UINT8:
+                transform__h2d_NHCW_to_NHCW<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
+                break;
+            case HAILO_FORMAT_TYPE_UINT16:
+                transform__h2d_NHCW_to_NHCW<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
+                break;
+            default:
+                LOGGER__ERROR("Invalid src-buffer's type format");
+                return HAILO_INVALID_ARGUMENT;
+        }
+        return HAILO_SUCCESS;
+    }
+
     if ((HAILO_FORMAT_ORDER_NHWC == src_format.order) &&
         (HAILO_FORMAT_ORDER_NHWC == dst_format.order)) {
         switch (dst_format.type) {
@@ -1025,7 +1049,8 @@ hailo_status reorder_input_stream(const void *src_ptr, hailo_3d_image_shape_t sr
 
     if (((HAILO_FORMAT_ORDER_FCR == src_format.order) || (HAILO_FORMAT_ORDER_NHWC == src_format.order)) &&
         (HAILO_FORMAT_ORDER_FCR == dst_format.order)) {
-        assert(0 == (dst_image_shape.features % 8));
+        //Check that there is alignment for 8 bytes
+        assert(0 == ((HailoRTCommon::get_data_bytes(dst_format.type) * dst_image_shape.features) % HW_DATA_ALIGNMENT));
         switch (dst_format.type) {
             case HAILO_FORMAT_TYPE_UINT8:
                 transform__h2d_FCR<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
@@ -1203,7 +1228,8 @@ hailo_status reorder_input_stream(const void *src_ptr, hailo_3d_image_shape_t sr
     }
 
     LOGGER__ERROR("Unsupported input stream transformation from hailo_format_order_t "
-                "{} to hailo_format_order_t {}", src_format.order, dst_format.order);
+        "{} to hailo_format_order_t {}", HailoRTCommon::get_format_order_str(src_format.order),
+        HailoRTCommon::get_format_order_str(dst_format.order));
     return HAILO_INVALID_OPERATION;
 }
 
@@ -1298,10 +1324,10 @@ hailo_status reorder_output_stream(const void *src_ptr, hailo_3d_image_shape_t s
                (HAILO_FORMAT_ORDER_NCHW) == dst_format.order) {
             switch (src_format.type) {
                 case HAILO_FORMAT_TYPE_UINT8:
-                    transform__d2h_NHCW_to_NCHW<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
+                    TransformContextUtils::transform__d2h_NHCW_to_NCHW<uint8_t>((uint8_t*)src_ptr, &src_image_shape, (uint8_t*)dst_ptr, &dst_image_shape);
                     break;
                 case HAILO_FORMAT_TYPE_UINT16:
-                    transform__d2h_NHCW_to_NCHW<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
+                    TransformContextUtils::transform__d2h_NHCW_to_NCHW<uint16_t>((uint16_t*)src_ptr, &src_image_shape, (uint16_t*)dst_ptr, &dst_image_shape);
                     break;
                 default:
                     LOGGER__ERROR("Invalid src-buffer's type format");
@@ -1472,18 +1498,18 @@ hailo_status FrameOutputTransformContext::transform_inner(const void *src_ptr, v
 
         transposed_image_shape = transposed_shape(transposed_image_shape);
     }
-
-    if (m_should_quantize) {
-        auto status = quantize_stream(dst_ptr);
-        CHECK_SUCCESS(status);
-    }
     
     if (!(m_should_transpose || m_should_reorder)) {
         /* If quantize is the only step - need to copy src buffer to dst buffer */
-        auto frame_size = HailoRTCommon::get_frame_size(m_dst_image_shape, m_dst_format);
+        auto frame_size = HailoRTCommon::get_frame_size(m_src_image_shape, m_src_format);
         memcpy(dst_ptr, src_ptr, frame_size);
     }
 
+    if (m_should_quantize) {
+        auto status = quantize_stream(dst_ptr);
+        CHECK_SUCCESS(status);
+    }
+
     return HAILO_SUCCESS;
 }
 
@@ -1530,32 +1556,17 @@ hailo_status transform_demux_raw_frame(const void *src, uint32_t offset,
 hailo_status validate_input_transform_params(hailo_3d_image_shape_t src_image_shape, hailo_format_t src_format,
     hailo_3d_image_shape_t dst_image_shape, hailo_format_t dst_format)
 {
-    /* Check quantize flags - where quantize is no needed */
-    if ((HAILO_FORMAT_FLAGS_QUANTIZED & src_format.flags) && !(HAILO_FORMAT_FLAGS_QUANTIZED & dst_format.flags)) {
-        LOGGER__ERROR("Cannot dequantize input data");
-        return HAILO_INVALID_ARGUMENT;
-    }
-
-    /* Check for overscale transformation*/
-    CHECK((hailo_format_type_t::HAILO_FORMAT_TYPE_AUTO == src_format.type) || (src_format.type >= dst_format.type),
-        HAILO_INVALID_ARGUMENT, "Overscale transformation is not supported");
-
     /* Check device type */
     if (!((HAILO_FORMAT_TYPE_UINT16 == dst_format.type) || (HAILO_FORMAT_TYPE_UINT8 == dst_format.type))) {
-        LOGGER__ERROR("Unsupported device-side format_type {}", dst_format.type);
+        LOGGER__ERROR("Unsupported device-side format_type {}", HailoRTCommon::get_format_type_str(dst_format.type));
         return HAILO_INVALID_ARGUMENT;
     }
 
-    /* Check for scaled type without quantization flag*/
-    CHECK(!(HAILO_FORMAT_FLAGS_QUANTIZED & src_format.flags) ||
-        ((src_format.type == dst_format.type) || (hailo_format_type_t::HAILO_FORMAT_TYPE_AUTO == src_format.type)),
-        HAILO_INVALID_ARGUMENT, "src-data-type ({}) is bigger than dst-data-type ({}), and must be marked as not quantized",
-        src_format.type, dst_format.type);
-
     /* Check reorder flags - where no reorder is needed */
     if ((HAILO_FORMAT_ORDER_FCR == src_format.order) &&
         (HAILO_FORMAT_ORDER_FCR == dst_format.order)) {
-        if (0 != (dst_image_shape.features % 8)) {
+        //Check that there is alignment for 8 bytes
+        if (0 != ((HailoRTCommon::get_data_bytes(dst_format.type) * dst_image_shape.features) % HW_DATA_ALIGNMENT)) {
             LOGGER__ERROR("HW features must be aligned to {}. passed hw features - {}",
                 HW_DATA_ALIGNMENT, dst_image_shape.features);
             return HAILO_INVALID_ARGUMENT;
@@ -1574,8 +1585,8 @@ hailo_status validate_input_transform_params(hailo_3d_image_shape_t src_image_sh
         }
     } else if ((HAILO_FORMAT_ORDER_YUY2 == src_format.order) &&
         (HAILO_FORMAT_ORDER_YUY2 == dst_format.order)) {
-        auto shape_size = HailoRTCommon::get_shape_size(src_image_shape);
-        CHECK((shape_size % HW_DATA_ALIGNMENT) == 0, HAILO_INVALID_ARGUMENT,
+        auto shape_size_in_bytes = HailoRTCommon::get_shape_size(src_image_shape) * HailoRTCommon::get_data_bytes(src_format.type);
+        CHECK(shape_size_in_bytes % HW_DATA_ALIGNMENT == 0, HAILO_INVALID_ARGUMENT,
           "YUY2_to_YUY2 Transform shape_size must be aligned to {}", HW_DATA_ALIGNMENT);
     }
 
@@ -1585,28 +1596,12 @@ hailo_status validate_input_transform_params(hailo_3d_image_shape_t src_image_sh
 hailo_status validate_output_transform_params(hailo_3d_image_shape_t src_image_shape, hailo_format_t src_format,
     hailo_3d_image_shape_t dst_image_shape, hailo_format_t dst_format)
 {
-    /* Check quantize flags - where quantize is no needed */
-    if (!(HAILO_FORMAT_FLAGS_QUANTIZED & src_format.flags) && (HAILO_FORMAT_FLAGS_QUANTIZED & dst_format.flags)) {
-        LOGGER__ERROR("Cannot quantize output data");
-        return HAILO_INVALID_ARGUMENT;
-    }
-
     /* Check device type */
     if (!((HAILO_FORMAT_TYPE_UINT16 == src_format.type) || (HAILO_FORMAT_TYPE_UINT8 == src_format.type))) {
-        LOGGER__ERROR("Unsupported device-side format_type {}", dst_format.type);
+        LOGGER__ERROR("Unsupported device-side format_type {}", HailoRTCommon::get_format_type_str(src_format.type));
         return HAILO_INVALID_ARGUMENT;
     }
 
-    /* Check for underscale transformation*/
-    CHECK((hailo_format_type_t::HAILO_FORMAT_TYPE_AUTO == dst_format.type) || (src_format.type <= dst_format.type),
-        HAILO_INVALID_ARGUMENT, "Underscale transformation is not supported");
-
-    /* Check for scaled type without quantization flag*/
-    CHECK(!(HAILO_FORMAT_FLAGS_QUANTIZED & dst_format.flags) ||
-        ((src_format.type == dst_format.type) || (hailo_format_type_t::HAILO_FORMAT_TYPE_AUTO == dst_format.type)),
-        HAILO_INVALID_ARGUMENT, "dst-data-type ({}) is bigger than src-data-type ({}), and must be marked as not quantized",
-        dst_format.type, src_format.type);
-
     /* Check reorder flags - where no reorder is needed */
     if ((HAILO_FORMAT_ORDER_BAYER_RGB == src_format.order) &&
         (HAILO_FORMAT_ORDER_BAYER_RGB == dst_format.order)) {
@@ -1620,14 +1615,29 @@ hailo_status validate_output_transform_params(hailo_3d_image_shape_t src_image_s
     return HAILO_SUCCESS;
 }
 
+Expected<bool> InputTransformContext::is_transformation_required(
+    const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format, 
+    const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, 
+    const std::vector<hailo_quant_info_t> &quant_infos)
+{
+    auto host_format = HailoRTDefaults::expand_auto_format(src_format, dst_format);
+    auto val = TransformContextUtils::is_transformation_required(HAILO_H2D_STREAM, src_image_shape, host_format,
+        dst_image_shape, dst_format, quant_infos);
+    return val;
+}
+
 bool InputTransformContext::is_transformation_required(
     const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format, 
     const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, 
     const hailo_quant_info_t &quant_info)
 {
-    auto host_format = HailoRTDefaults::expand_auto_format(src_format, dst_format);
-    return TransformContextUtils::is_transformation_required(HAILO_H2D_STREAM, src_image_shape, host_format,
-        dst_image_shape, dst_format, quant_info);
+    LOGGER__WARNING("Using a deprecated function. Use is_transformation_required that recieves a vector of hailo_quant_info_t instead");
+    std::vector<hailo_quant_info_t> quant_infos = { quant_info };
+    auto expected_is_transformation_required = is_transformation_required(src_image_shape, src_format, dst_image_shape, dst_format, quant_infos);
+    if (!expected_is_transformation_required) {
+        return true;
+    }
+    return expected_is_transformation_required.release();
 }
 
 std::string InputTransformContext::description() const
@@ -1641,7 +1651,7 @@ std::string InputTransformContext::description() const
         } else {
             first = false;
         }
-        transform_description << TransformContextUtils::make_quantization_description(m_src_format.type, m_dst_format.type, m_dst_quant_info);
+        transform_description << TransformContextUtils::make_quantization_description(m_src_format.type, m_dst_format.type, m_dst_quant_infos);
     }
 
     if (m_should_transpose) {
@@ -1667,7 +1677,7 @@ std::string InputTransformContext::description() const
 
 Expected<std::unique_ptr<InputTransformContext>> InputTransformContext::create(const hailo_3d_image_shape_t &src_image_shape,
     const hailo_format_t &src_format, const hailo_3d_image_shape_t &dst_image_shape,
-    const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info)
+    const hailo_format_t &dst_format, const std::vector<hailo_quant_info_t> &dst_quant_infos)
 {
     auto status = validate_input_transform_params(src_image_shape, src_format, dst_image_shape, dst_format);
     CHECK_SUCCESS_AS_EXPECTED(status);
@@ -1678,9 +1688,10 @@ Expected<std::unique_ptr<InputTransformContext>> InputTransformContext::create(c
     const auto dst_frame_size = HailoRTCommon::get_frame_size(dst_image_shape, dst_format);
 
     Buffer quant_buffer;
-    bool should_quantize = TransformContextUtils::should_quantize(HAILO_H2D_STREAM, src_format, dst_format, 
-        dst_quant_info);
-    if (should_quantize) {
+    auto should_quantize = TransformContextUtils::should_quantize(HAILO_H2D_STREAM, src_format, dst_format, 
+        dst_quant_infos);
+    CHECK_EXPECTED(should_quantize);
+    if (should_quantize.value()) {
         auto expected_quant_buffer = Buffer::create(src_frame_size, 0);
         CHECK_EXPECTED(expected_quant_buffer);
         quant_buffer = expected_quant_buffer.release();
@@ -1698,18 +1709,29 @@ Expected<std::unique_ptr<InputTransformContext>> InputTransformContext::create(c
     auto should_reorder = TransformContextUtils::should_reorder(src_image_shape, src_format, dst_image_shape, dst_format);
 
     std::unique_ptr<InputTransformContext> transform_context(new (std::nothrow) InputTransformContext(src_frame_size, src_image_shape,
-        internal_src_format, dst_frame_size, dst_image_shape, dst_format, dst_quant_info, std::move(quant_buffer),
-        std::move(transpose_buffer), should_quantize, should_transpose, should_reorder));
+        internal_src_format, dst_frame_size, dst_image_shape, dst_format, dst_quant_infos, std::move(quant_buffer),
+        std::move(transpose_buffer), *should_quantize, should_transpose, should_reorder));
     CHECK_AS_EXPECTED(nullptr != transform_context, HAILO_OUT_OF_HOST_MEMORY);
 
     return transform_context;
 }
 
+Expected<std::unique_ptr<InputTransformContext>> InputTransformContext::create(const hailo_3d_image_shape_t &src_image_shape,
+    const hailo_format_t &src_format, const hailo_3d_image_shape_t &dst_image_shape,
+    const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info)
+{
+    CHECK_AS_EXPECTED(Quantization::is_qp_valid(dst_quant_info), HAILO_INVALID_ARGUMENT,
+        "quant_info is invalid as the model was compiled with multiple quant_infos. Please compile again or provide a list of quant_infos.");
+
+    std::vector<hailo_quant_info_t> dst_quant_infos = { dst_quant_info };
+    return create(src_image_shape, src_format, dst_image_shape, dst_format, dst_quant_infos);
+}
+
 Expected<std::unique_ptr<InputTransformContext>> InputTransformContext::create(const hailo_stream_info_t &stream_info,
     const hailo_transform_params_t &transform_params)
 {
     return create(stream_info.shape, transform_params.user_buffer_format, stream_info.hw_shape, stream_info.format,
-        stream_info.quant_info);
+        std::vector<hailo_quant_info_t>{stream_info.quant_info});
 }
 
 Expected<std::unique_ptr<InputTransformContext>> InputTransformContext::create(const hailo_stream_info_t &stream_info, bool quantized,
@@ -1718,9 +1740,19 @@ Expected<std::unique_ptr<InputTransformContext>> InputTransformContext::create(c
     return create(stream_info, HailoRTDefaults::get_transform_params(quantized, format_type));
 }
 
+Expected<std::unique_ptr<InputTransformContext>> InputTransformContext::create(InputStream &input_stream,
+    const hailo_transform_params_t &transform_params)
+{
+    auto stream_info = input_stream.get_info();
+    auto src_quant_infos = input_stream.get_quant_infos();
+
+    return create(stream_info.shape, transform_params.user_buffer_format, stream_info.hw_shape,
+        stream_info.format, src_quant_infos);
+}
+
 InputTransformContext::InputTransformContext(size_t src_frame_size, const hailo_3d_image_shape_t &src_image_shape,
     const hailo_format_t &src_format, size_t dst_frame_size, const hailo_3d_image_shape_t &dst_image_shape,
-    const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, Buffer &&quant_buffer,
+    const hailo_format_t &dst_format, const std::vector<hailo_quant_info_t> &dst_quant_infos, Buffer &&quant_buffer,
     Buffer &&transpose_buffer,const bool should_quantize, const bool should_transpose, const bool should_reorder) :
         m_src_frame_size(src_frame_size),
         m_src_image_shape(src_image_shape),
@@ -1728,7 +1760,7 @@ InputTransformContext::InputTransformContext(size_t src_frame_size, const hailo_
         m_dst_frame_size(dst_frame_size),
         m_dst_image_shape(dst_image_shape),
         m_dst_format(dst_format),
-        m_dst_quant_info(dst_quant_info),
+        m_dst_quant_infos(dst_quant_infos),
         m_should_quantize(should_quantize),
         m_should_transpose(should_transpose),
         m_should_reorder(should_reorder),
@@ -1760,51 +1792,96 @@ size_t InputTransformContext::get_dst_frame_size() const
     return m_dst_frame_size;
 }
 
+Expected<bool> OutputTransformContext::is_transformation_required(
+    const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
+    const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format,
+    const std::vector<hailo_quant_info_t> &quant_infos)
+{
+    auto host_format = HailoRTDefaults::expand_auto_format(dst_format, src_format);
+    auto val = TransformContextUtils::is_transformation_required(HAILO_D2H_STREAM, src_image_shape, src_format, 
+        dst_image_shape, host_format, quant_infos);
+    return val;
+}
+
 bool OutputTransformContext::is_transformation_required(
-    const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format, 
-    const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format, 
+    const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
+    const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format,
     const hailo_quant_info_t &quant_info)
 {
-    auto host_format = HailoRTDefaults::expand_auto_format(dst_format, src_format);
-    return TransformContextUtils::is_transformation_required(HAILO_D2H_STREAM, src_image_shape, src_format, 
-        dst_image_shape, host_format, quant_info);
+    LOGGER__WARNING("Using a deprecated function. Use is_transformation_required that recieves a vector of hailo_quant_info_t instead");
+    if (Quantization::is_qp_valid(quant_info)) {
+        LOGGER__ERROR("quant_info is invalid as the model was compiled with multiple quant_infos. Please compile again or provide a vector of quant_infos.");
+        return true;
+    }
+    std::vector<hailo_quant_info_t> quant_infos = { quant_info };
+    auto expected_is_transformation_required = is_transformation_required(src_image_shape, src_format, dst_image_shape, dst_format, quant_infos);
+    if(!expected_is_transformation_required) {
+        return true;
+    }
+    return expected_is_transformation_required.release();
 }
 
 Expected<std::unique_ptr<OutputTransformContext>> OutputTransformContext::create(const hailo_3d_image_shape_t &src_image_shape,
         const hailo_format_t &src_format, const hailo_3d_image_shape_t &dst_image_shape,
-        const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, const hailo_nms_info_t &nms_info)
+        const hailo_format_t &dst_format, const std::vector<hailo_quant_info_t> &dst_quant_infos, const hailo_nms_info_t &nms_info)
 {
     auto status = validate_output_transform_params(src_image_shape, src_format, dst_image_shape, dst_format);
     CHECK_SUCCESS_AS_EXPECTED(status);
-    
+
+    if (dst_quant_infos.size() == 1) {
+        CHECK_AS_EXPECTED(Quantization::is_qp_valid(dst_quant_infos.at(0)), HAILO_INVALID_ARGUMENT,
+            "quant_info is invalid as the model was compiled with multiple quant_infos. Please compile again or provide a vector of quant_infos.");
+    }
+
     if (HAILO_FORMAT_ORDER_HAILO_NMS == src_format.order) {
-        return NMSOutputTransformContext::create(src_format, dst_format, dst_quant_info, nms_info);
+        return NMSOutputTransformContext::create(src_format, dst_format, dst_quant_infos, nms_info);
     }
 
-    return FrameOutputTransformContext::create(src_image_shape, src_format, dst_image_shape, dst_format, dst_quant_info);
+    return FrameOutputTransformContext::create(src_image_shape, src_format, dst_image_shape, dst_format, dst_quant_infos);
+}
+
+Expected<std::unique_ptr<OutputTransformContext>> OutputTransformContext::create(const hailo_3d_image_shape_t &src_image_shape,
+        const hailo_format_t &src_format, const hailo_3d_image_shape_t &dst_image_shape,
+        const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, const hailo_nms_info_t &nms_info)
+{
+    std::vector<hailo_quant_info_t> dst_quant_infos = { dst_quant_info };
+    return create(src_image_shape, src_format, dst_image_shape, dst_format, dst_quant_infos, nms_info);
 }
 
 Expected<std::unique_ptr<OutputTransformContext>> OutputTransformContext::create(const hailo_stream_info_t &stream_info,
     const hailo_transform_params_t &transform_params)
 {
+    std::vector<hailo_quant_info_t> quant_infos = { stream_info.quant_info };
     return create(stream_info.hw_shape, stream_info.format, stream_info.shape,
-        transform_params.user_buffer_format, stream_info.quant_info, stream_info.nms_info);
+        transform_params.user_buffer_format, quant_infos, stream_info.nms_info);
 }
 
 Expected<std::unique_ptr<OutputTransformContext>> OutputTransformContext::create(const hailo_stream_info_t &stream_info, bool quantized,
     hailo_format_type_t format_type)
 {
-    return create(stream_info, HailoRTDefaults::get_transform_params(quantized, format_type));
+    std::vector<hailo_quant_info_t> quant_infos = { stream_info.quant_info };
+    auto transform_params = HailoRTDefaults::get_transform_params(quantized, format_type);
+    return create(stream_info.hw_shape, stream_info.format, stream_info.shape,
+        transform_params.user_buffer_format, quant_infos, stream_info.nms_info);
+}
+
+Expected<std::unique_ptr<OutputTransformContext>> OutputTransformContext::create(OutputStream &output_stream,
+    const hailo_transform_params_t &transform_params)
+{
+    auto stream_info = output_stream.get_info();
+    auto dst_quant_infos = output_stream.get_quant_infos();
+    return create(stream_info.hw_shape, stream_info.format, stream_info.shape,
+        transform_params.user_buffer_format, dst_quant_infos, stream_info.nms_info);
 }
 
 OutputTransformContext::OutputTransformContext(size_t src_frame_size, const hailo_format_t &src_format, size_t dst_frame_size,
-    const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, const bool should_quantize, 
+    const hailo_format_t &dst_format, const std::vector<hailo_quant_info_t> &dst_quant_infos, const bool should_quantize, 
     const bool should_transpose, const bool should_reorder) :
         m_src_frame_size(src_frame_size),
         m_src_format(src_format),
         m_dst_frame_size(dst_frame_size),
         m_dst_format(dst_format),
-        m_dst_quant_info(dst_quant_info),
+        m_dst_quant_infos(dst_quant_infos),
         m_should_quantize(should_quantize),
         m_should_transpose(should_transpose),
         m_should_reorder(should_reorder)
@@ -1812,13 +1889,14 @@ OutputTransformContext::OutputTransformContext(size_t src_frame_size, const hail
 
 FrameOutputTransformContext::FrameOutputTransformContext(size_t src_frame_size, const hailo_3d_image_shape_t &src_image_shape,
     const hailo_format_t &src_format, size_t dst_frame_size, const hailo_3d_image_shape_t &dst_image_shape,
-    const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, Buffer&& transpose_buffer,
+    const hailo_format_t &dst_format, const std::vector<hailo_quant_info_t> &dst_quant_infos, Buffer&& transpose_buffer,
     const bool should_quantize, const bool should_transpose, const bool should_reorder) :
-        OutputTransformContext(src_frame_size, src_format, dst_frame_size, dst_format, dst_quant_info, should_quantize, 
+        OutputTransformContext(src_frame_size, src_format, dst_frame_size, dst_format, dst_quant_infos, should_quantize, 
             should_transpose, should_reorder), m_src_image_shape(src_image_shape), m_dst_image_shape(dst_image_shape), 
             m_transpose_buffer(std::move(transpose_buffer))
 {
-    std::vector<hailo_quant_info_t> dst_quant_infos = { dst_quant_info }; // TODO: Get vector from HEF
+    // TODO: Add verification that quant infos size equals to features count (HRT-11052)
+
     bool are_all_qps_the_same = true;
     if (dst_quant_infos.size() > 1) {
         for (const auto &quant_info : dst_quant_infos) {
@@ -1857,14 +1935,14 @@ FrameOutputTransformContext::FrameOutputTransformContext(size_t src_frame_size,
         m_quant_infos_rep_count = dst_image_shape.width;
         break;
     default:
-        LOGGER__CRITICAL("Got unknown format order = {}", dst_format.order);
+        LOGGER__CRITICAL("Got unknown format order = {}", HailoRTCommon::get_format_order_str(dst_format.order));
         break;
     }
 }
 
 Expected<std::unique_ptr<OutputTransformContext>> FrameOutputTransformContext::create(const hailo_3d_image_shape_t &src_image_shape,
     const hailo_format_t &src_format, const hailo_3d_image_shape_t &dst_image_shape,
-    const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info)
+    const hailo_format_t &dst_format, const std::vector<hailo_quant_info_t> &dst_quant_infos)
 {
     const auto internal_dst_format = HailoRTDefaults::expand_auto_format(dst_format, src_format);
 
@@ -1872,7 +1950,8 @@ Expected<std::unique_ptr<OutputTransformContext>> FrameOutputTransformContext::c
     const auto dst_frame_size = HailoRTCommon::get_frame_size(dst_image_shape, internal_dst_format);
 
     auto should_quantize = TransformContextUtils::should_quantize(HAILO_D2H_STREAM, src_format, dst_format, 
-        dst_quant_info);
+        dst_quant_infos);
+    CHECK_EXPECTED(should_quantize);
 
     Buffer transpose_buffer;
     auto should_transpose = TransformContextUtils::should_transpose(src_format.flags, dst_format.flags);
@@ -1885,8 +1964,8 @@ Expected<std::unique_ptr<OutputTransformContext>> FrameOutputTransformContext::c
     auto should_reorder = TransformContextUtils::should_reorder(src_image_shape, src_format, dst_image_shape, dst_format);
 
     std::unique_ptr<OutputTransformContext> frame_transform_context = std::make_unique<FrameOutputTransformContext>(src_frame_size,
-        src_image_shape, src_format, dst_frame_size, dst_image_shape, internal_dst_format, dst_quant_info, std::move(transpose_buffer),
-        should_quantize, should_transpose, should_reorder);
+        src_image_shape, src_format, dst_frame_size, dst_image_shape, internal_dst_format, dst_quant_infos, std::move(transpose_buffer),
+        *should_quantize, should_transpose, should_reorder);
 
     CHECK_AS_EXPECTED(nullptr != frame_transform_context, HAILO_OUT_OF_HOST_MEMORY);
 
@@ -1894,14 +1973,14 @@ Expected<std::unique_ptr<OutputTransformContext>> FrameOutputTransformContext::c
 }
 
 NMSOutputTransformContext::NMSOutputTransformContext(size_t src_frame_size, const hailo_format_t &src_format, 
-    size_t dst_frame_size, const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info,
+    size_t dst_frame_size, const hailo_format_t &dst_format, const std::vector<hailo_quant_info_t> &dst_quant_infos,
     const hailo_nms_info_t &nms_info, Buffer &&quant_buffer, const bool should_quantize, const bool should_transpose) :
-        OutputTransformContext(src_frame_size, src_format, dst_frame_size, dst_format, dst_quant_info, should_quantize ,should_transpose, 
+        OutputTransformContext(src_frame_size, src_format, dst_frame_size, dst_format, dst_quant_infos, should_quantize ,should_transpose, 
         true), m_nms_info(nms_info), m_chunk_offsets(nms_info.chunks_per_frame, 0), m_quant_buffer(std::move(quant_buffer))
 {}
 
 Expected<std::unique_ptr<OutputTransformContext>> NMSOutputTransformContext::create(const hailo_format_t &src_format,
-    const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, const hailo_nms_info_t &nms_info)
+    const hailo_format_t &dst_format, const std::vector<hailo_quant_info_t> &dst_quant_infos, const hailo_nms_info_t &nms_info)
 {
     // Validate params
     CHECK_AS_EXPECTED(HAILO_FORMAT_ORDER_HAILO_NMS == src_format.order, HAILO_INVALID_ARGUMENT,
@@ -1912,23 +1991,16 @@ Expected<std::unique_ptr<OutputTransformContext>> NMSOutputTransformContext::cre
     CHECK_AS_EXPECTED(HAILO_FORMAT_ORDER_HAILO_NMS == internal_dst_format.order, HAILO_INVALID_ARGUMENT,
         "Format order should be HAILO_FORMAT_ORDER_HAILO_NMS");
 
-    if (internal_dst_format.flags & HAILO_FORMAT_FLAGS_QUANTIZED) {
-        CHECK_AS_EXPECTED(HAILO_FORMAT_TYPE_UINT16 == internal_dst_format.type, HAILO_INVALID_ARGUMENT,
-            "Format order HAILO_FORMAT_ORDER_HAILO_NMS without quantization is allowed only with type HAILO_FORMAT_TYPE_UINT16");
-    }
-    else {
-        CHECK_AS_EXPECTED((HAILO_FORMAT_TYPE_UINT16 == internal_dst_format.type) || (HAILO_FORMAT_TYPE_FLOAT32 == internal_dst_format.type),
-            HAILO_INVALID_ARGUMENT,
-            "Format order HAILO_FORMAT_ORDER_HAILO_NMS with quantization is allowed only with type HAILO_FORMAT_TYPE_UINT16 or HAILO_FORMAT_TYPE_FLOAT32");
-    }
+    CHECK_AS_EXPECTED(HAILO_FORMAT_TYPE_FLOAT32 == internal_dst_format.type, HAILO_INVALID_ARGUMENT,
+        "Format order HAILO_FORMAT_ORDER_HAILO_NMS only supports format type of HAILO_FORMAT_TYPE_FLOAT32");
 
     const auto src_frame_size = HailoRTCommon::get_nms_hw_frame_size(nms_info);
     auto dst_frame_size = HailoRTCommon::get_nms_host_frame_size(nms_info, internal_dst_format);
 
     Buffer quant_buffer;
-    const bool should_quantize = (src_format.flags & HAILO_FORMAT_FLAGS_QUANTIZED) &&
-        !(internal_dst_format.flags & HAILO_FORMAT_FLAGS_QUANTIZED);
-    if (should_quantize) {
+    auto should_quantize = TransformContextUtils::should_quantize(HAILO_D2H_STREAM, src_format, dst_format, dst_quant_infos);
+    CHECK_EXPECTED(should_quantize);
+    if (*should_quantize) {
         dst_frame_size = HailoRTCommon::get_nms_host_frame_size(nms_info, internal_dst_format);
         auto expected_nms_quant_buffer = Buffer::create(dst_frame_size, 0);
         CHECK_EXPECTED(expected_nms_quant_buffer);
@@ -1938,8 +2010,8 @@ Expected<std::unique_ptr<OutputTransformContext>> NMSOutputTransformContext::cre
     auto should_transpose = TransformContextUtils::should_transpose(src_format.flags, dst_format.flags);
 
     std::unique_ptr<OutputTransformContext> nms_transform_context = std::make_unique<NMSOutputTransformContext>(src_frame_size,
-        src_format, dst_frame_size, internal_dst_format, dst_quant_info, nms_info, std::move(quant_buffer),
-        should_quantize, should_transpose);
+        src_format, dst_frame_size, internal_dst_format, dst_quant_infos, nms_info, std::move(quant_buffer),
+        *should_quantize, should_transpose);
     CHECK_AS_EXPECTED(nullptr != nms_transform_context, HAILO_OUT_OF_HOST_MEMORY);
 
     return nms_transform_context;
@@ -1971,49 +2043,18 @@ hailo_status NMSOutputTransformContext::transform(const MemoryView src, MemoryVi
 
     auto shape_size = HailoRTCommon::get_nms_host_shape_size(m_nms_info);
 
-    if (!(HAILO_FORMAT_FLAGS_QUANTIZED & m_src_format.flags) && (HAILO_FORMAT_FLAGS_QUANTIZED & m_dst_format.flags)) {
-        LOGGER__ERROR("Cannot quantize output data");
-        return HAILO_INVALID_OPERATION;
-    }
-
     if ((HAILO_FORMAT_FLAGS_TRANSPOSED & m_src_format.flags) || (HAILO_FORMAT_FLAGS_TRANSPOSED & m_dst_format.flags)) {
-        LOGGER__ERROR("NMS doesn't support transposed format currently");
+        LOGGER__ERROR("NMS doesn't support transposed format");
         return HAILO_INVALID_OPERATION;
     }
 
-    if (!((HAILO_FORMAT_FLAGS_QUANTIZED & m_src_format.flags) &&
-        !(HAILO_FORMAT_FLAGS_QUANTIZED & m_dst_format.flags))) {
-            transform__d2h_NMS((uint8_t*)src.data(), (uint8_t*)dst.data(), m_nms_info, m_chunk_offsets);
-    } 
-    else {
-        transform__d2h_NMS((uint8_t*)src.data(), m_quant_buffer.data(), m_nms_info, m_chunk_offsets);
-    }
+    auto dst_buffer = m_should_quantize ? m_quant_buffer.data() : dst.data();
+    transform__d2h_NMS(src.data(), dst_buffer, m_nms_info, m_chunk_offsets);
 
-    if ((HAILO_FORMAT_FLAGS_QUANTIZED & m_src_format.flags) && !(HAILO_FORMAT_FLAGS_QUANTIZED & m_dst_format.flags)) {
-        // NMS has to be uint16 or float32
-        switch (m_dst_format.type) {
-            case HAILO_FORMAT_TYPE_UINT16:
-                if (HAILO_FORMAT_TYPE_UINT16 == m_src_format.type) {
-                    Quantization::dequantize_output_buffer_nms<uint16_t, uint16_t>((uint16_t*)m_quant_buffer.data(),
-                        (uint16_t*)dst.data(), shape_size, m_dst_quant_info, m_nms_info.number_of_classes);
-                } 
-                else {
-                    return HAILO_INVALID_OPERATION;
-                }
-                break;
-            case HAILO_FORMAT_TYPE_FLOAT32:
-                if (HAILO_FORMAT_TYPE_UINT16 == m_src_format.type) {
-                    Quantization::dequantize_output_buffer_nms<float32_t, uint16_t>((uint16_t*)m_quant_buffer.data(),
-                        (float32_t*)dst.data(), shape_size, m_dst_quant_info, m_nms_info.number_of_classes);
-                }
-                else {
-                    return HAILO_INVALID_OPERATION;
-                }
-                break;
-            default:
-                LOGGER__ERROR("Invalid dst-buffer's type format");
-                return HAILO_INVALID_ARGUMENT;
-        }
+    if (m_should_quantize) {
+        CHECK((HAILO_FORMAT_TYPE_FLOAT32 == m_dst_format.type) && (HAILO_FORMAT_TYPE_UINT16 == m_src_format.type), HAILO_INTERNAL_FAILURE);
+        Quantization::dequantize_output_buffer_nms<float32_t, uint16_t>((uint16_t*)m_quant_buffer.data(),
+            (float32_t*)dst.data(), shape_size, m_dst_quant_infos[0], m_nms_info.number_of_classes); // TODO: Support NMS scale by feature (HRT-11052)
     }
 
     return HAILO_SUCCESS;
@@ -2030,7 +2071,7 @@ std::string FrameOutputTransformContext::description() const
         } else {
             first = false;
         }
-        transform_description << TransformContextUtils::make_quantization_description(m_src_format.type, m_dst_format.type, m_dst_quant_info);
+        transform_description << TransformContextUtils::make_quantization_description(m_src_format.type, m_dst_format.type, m_dst_quant_infos);
     }
 
     if (m_should_transpose) {
@@ -2063,7 +2104,7 @@ std::string NMSOutputTransformContext::description() const
 
     if (m_should_quantize) {
         transform_description << " | " <<
-            TransformContextUtils::make_quantization_description(m_src_format.type, m_dst_format.type, m_dst_quant_info);
+            TransformContextUtils::make_quantization_description(m_src_format.type, m_dst_format.type, m_dst_quant_infos);
     }
 
     return transform_description.str();
@@ -2081,7 +2122,8 @@ size_t OutputTransformContext::get_dst_frame_size() const
 
 Expected<std::unique_ptr<OutputDemuxer>> OutputDemuxer::create(OutputStream &output_stream)
 {
-    auto obj = OutputDemuxerBase::create(output_stream.get_frame_size(), output_stream.get_layer_info());
+    auto &stream_base = static_cast<OutputStreamBase&>(output_stream);
+    auto obj = OutputDemuxerBase::create(stream_base.get_frame_size(), stream_base.get_layer_info());
     CHECK_EXPECTED(obj);
 
     auto obj_ptr = make_unique_nothrow<OutputDemuxerBase>(obj.release());
@@ -2106,7 +2148,9 @@ hailo_status OutputDemuxerBase::get_mux_info_from_layer_info_impl(hailo_mux_info
     uint32_t &offset, uint32_t height_ratio, std::vector<hailo_mux_info_t> &res, size_t &number_of_mux_infos)
 {
     // This is a recursive function with a maximum depth of HailoRTCommon::MUX_INFO_COUNT. 
-    mux_info.info = LayerInfoUtils::get_stream_info_from_layer_info(layer_info);
+    const auto &stream_infos = LayerInfoUtils::get_stream_infos_from_layer_info(layer_info);
+    assert(1 == stream_infos.size());
+    mux_info.info = stream_infos[0];
 
     mux_info.row_size = height_ratio * layer_info.hw_shape.width * layer_info.hw_shape.features * layer_info.hw_data_bytes;
     mux_info.row_counter = 0;
@@ -2156,15 +2200,15 @@ hailo_status fuse_buffers(const std::vector<MemoryView> &buffers,
         auto &info = *frame_pair.first;
         auto &buffer = *frame_pair.second;
         total_num_of_classes += info.number_of_classes * info.chunks_per_frame;
-        total_size_of_buffers += buffer.size();
+        // Remove extra burst at the end of every nms buffer
+        total_size_of_buffers += buffer.size() - (info.bbox_size * info.burst_size);
         CHECK(buffer.size() == HailoRTCommon::get_nms_hw_frame_size(info), HAILO_INVALID_ARGUMENT,
             "Source buffer size is not same as NMS HW frame size! ({} != {})", buffer.size(),
             HailoRTCommon::get_nms_hw_frame_size(info));
     }
 
-    // Each frame contributes 1 extra bbox_size at the end of it which acts as a delimiter, but we don't copy those to the fused buffer.
     // We keep the size of the dst buffer 1 bbox_size too big to stay in the format of not defused nms frames.
-    total_size_of_buffers -= (frames.size() - 1) * frames[0].first->bbox_size;
+    total_size_of_buffers += frames[0].first->bbox_size;
 
     CHECK(dst.size() == total_size_of_buffers, HAILO_INVALID_ARGUMENT,
         "Size of destination buffer is not same as the expected size of the fused frame! (size: {}, expected: {})",
index c1038a86b611663786ba427b543e1d1ce89231e3..4738807cae7b2927cbcdfd8760a7a0934227e697 100644 (file)
 namespace hailort
 {
 
+#define HW_DATA_ALIGNMENT (8)
+
 class HAILORTAPI TransformContextUtils final
 {
 public:
-    static bool is_transformation_required(const hailo_stream_direction_t stream_direction,
+    static Expected<bool> is_transformation_required(const hailo_stream_direction_t stream_direction,
         const hailo_3d_image_shape_t &src_image_shape, 
         const hailo_format_t &src_format, const hailo_3d_image_shape_t &dst_image_shape, 
-        const hailo_format_t &dst_format, const hailo_quant_info_t &quant_info);
-    static bool should_quantize(const hailo_stream_direction_t stream_direction, 
-        const hailo_format_t &src_format, const hailo_format_t &dst_format, const hailo_quant_info_t &quant_info);
+        const hailo_format_t &dst_format, const std::vector<hailo_quant_info_t> &quant_info);
+    static Expected<bool> should_quantize(const hailo_stream_direction_t stream_direction, 
+        const hailo_format_t &src_format, const hailo_format_t &dst_format, const std::vector<hailo_quant_info_t> &quant_info);
     static bool should_transpose(const hailo_format_flags_t &src_flags, const hailo_format_flags_t &dst_flags);
     static bool should_reorder(const hailo_3d_image_shape_t &src_image_shape, const hailo_format_t &src_format,
         const hailo_3d_image_shape_t &dst_image_shape, const hailo_format_t &dst_format);
     static std::string make_quantization_description(hailo_format_type_t src_type, hailo_format_type_t dst_type,
-                                                    hailo_quant_info_t quant_info);
+                                                    const std::vector<hailo_quant_info_t> &quant_info);
     static std::string make_reorder_description(hailo_format_order_t src_order, hailo_3d_image_shape_t src_shape,
                                                 hailo_format_order_t dst_order, hailo_3d_image_shape_t dst_shape);
     static std::string make_transpose_description(hailo_3d_image_shape_t original_shape, hailo_3d_image_shape_t transposed_shape);
+    static bool are_all_quant_infos_identity(const std::vector<hailo_quant_info_t> &quant_infos);
+
+    template<typename T>
+    static hailo_status transform__d2h_NHCW_to_NCHW(
+        const T *src_ptr, hailo_3d_image_shape_t *src_image_shape,
+        T *dst_ptr, hailo_3d_image_shape_t *dst_image_shape)
+    {
+        /* Validate arguments */
+        ASSERT(NULL != src_ptr);
+        ASSERT(NULL != dst_ptr);
+        CHECK(src_image_shape->features == dst_image_shape->features, HAILO_INVALID_ARGUMENT,
+            "NCHW_to_NHCW Transform features src/dst should be the same");
+        CHECK(src_image_shape->height == dst_image_shape->height, HAILO_INVALID_ARGUMENT,
+            "NCHW_to_NHCW Transform height src/dst should be the same");
+        CHECK(dst_image_shape->width <= src_image_shape->width, HAILO_INVALID_ARGUMENT,
+            "NCHW_to_NHCW Transform dst width should be smaller/equal than src width");
+        CHECK(((src_image_shape->width * sizeof(T)) % HW_DATA_ALIGNMENT) == 0, HAILO_INVALID_ARGUMENT,
+            "NCHW_to_NHCW Transform src width must be aligned to {}", HW_DATA_ALIGNMENT);
+
+        size_t width_size = dst_image_shape->width;
+        for (uint32_t r = 0; r < src_image_shape->height; r++) {
+            for (uint32_t c = 0; c < src_image_shape->features; c++) {
+                // Copy width
+                T *dst = dst_ptr +
+                    dst_image_shape->width * dst_image_shape->height * c +
+                    dst_image_shape->width * r;
+                const T *src = src_ptr +
+                    src_image_shape->features * src_image_shape->width * r +
+                    src_image_shape->width * c;
+
+                std::copy_n(src, width_size, dst);
+            }
+        }
+
+        return HAILO_SUCCESS;
+    }
+private:
+    static bool should_quantize_by_flags(const hailo_stream_direction_t stream_direction,
+        const hailo_format_flags_t &src_format_flags, const hailo_format_flags_t &dst_format_flags);
+    static Expected<bool> should_quantize_by_type(const hailo_stream_direction_t stream_direction,
+        const hailo_format_type_t &src_format_type, const hailo_format_type_t &dst_format_type);
 };
 
 class OutputDemuxerBase : public OutputDemuxer {
@@ -88,11 +131,11 @@ class HAILORTAPI FrameOutputTransformContext final : public OutputTransformConte
 public:
     static Expected<std::unique_ptr<OutputTransformContext>> create(const hailo_3d_image_shape_t &src_image_shape,
         const hailo_format_t &src_format, const hailo_3d_image_shape_t &dst_image_shape,
-        const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info);
+        const hailo_format_t &dst_format, const std::vector<hailo_quant_info_t> &dst_quant_info);
 
     FrameOutputTransformContext(size_t src_frame_size, const hailo_3d_image_shape_t &src_image_shape,
         const hailo_format_t &src_format, size_t dst_frame_size, const hailo_3d_image_shape_t &dst_image_shape,
-        const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, Buffer&& transpose_buffer,
+        const hailo_format_t &dst_format, const std::vector<hailo_quant_info_t> &dst_quant_info, Buffer&& transpose_buffer,
         const bool should_quantize, const bool should_transpose, const bool should_reorder);
 
     hailo_status transform_inner(const void *src_ptr, void *dst_ptr, MemoryView transpose_buffer);
@@ -130,10 +173,10 @@ class HAILORTAPI NMSOutputTransformContext final : public OutputTransformContext
 {
 public:
     static Expected<std::unique_ptr<OutputTransformContext>> create(const hailo_format_t &src_format, 
-        const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, const hailo_nms_info_t &nms_info);
+        const hailo_format_t &dst_format, const std::vector<hailo_quant_info_t> &dst_quant_info, const hailo_nms_info_t &nms_info);
 
     NMSOutputTransformContext(size_t src_frame_size, const hailo_format_t &src_format, size_t dst_frame_size,
-        const hailo_format_t &dst_format, const hailo_quant_info_t &dst_quant_info, const hailo_nms_info_t &nms_info, 
+        const hailo_format_t &dst_format, const std::vector<hailo_quant_info_t> &dst_quant_info, const hailo_nms_info_t &nms_info, 
         Buffer &&quant_buffer, const bool should_quantize, const bool should_transpose);
 
     virtual hailo_status transform(const MemoryView src, MemoryView dst) override;
index 21484873055c3877ee75301b0497bdf9d95c73c8..6283fb554719ea994da57137832fc2db21853ba4 100644 (file)
@@ -258,7 +258,7 @@ MemoryView::MemoryView(void *data, size_t size) :
 
 const MemoryView MemoryView::create_const(const void *data, size_t size)
 {
-    return std::move(MemoryView(const_cast<void *>(data), size));
+    return MemoryView(const_cast<void *>(data), size);
 }
 
 uint8_t* MemoryView::data() noexcept
index 7f8f17fc3fa7c1976b0dbdb60c0214a4d74f1d18..c036b392b27bf55735f01a9a9f3ac2b451a290f5 100644 (file)
@@ -15,8 +15,10 @@ namespace hailort
 
 // Needed for the linker
 const uint32_t HailoRTCommon::BBOX_PARAMS;
+const uint32_t HailoRTCommon::MASK_PARAMS;
 const uint32_t HailoRTCommon::MAX_DEFUSED_LAYER_COUNT;
 const size_t HailoRTCommon::HW_DATA_ALIGNMENT;
+const uint32_t HailoRTCommon::MAX_NMS_BURST_SIZE;
 
 Expected<hailo_device_id_t> HailoRTCommon::to_device_id(const std::string &device_id)
 {
@@ -43,4 +45,21 @@ Expected<std::vector<hailo_device_id_t>> HailoRTCommon::to_device_ids_vector(con
     return device_ids_vector;
 }
 
+uint32_t HailoRTCommon::get_nms_host_frame_size(const hailo_nms_shape_t &nms_shape, const hailo_format_t &format)
+{
+    auto shape_size = 0;
+    if (HAILO_FORMAT_ORDER_HAILO_NMS_WITH_BYTE_MASK == format.order) {
+        shape_size = get_nms_with_byte_mask_host_shape_size(nms_shape, format);
+    } else {
+        shape_size = get_nms_host_shape_size(nms_shape);
+    }
+    double frame_size = shape_size * get_format_data_bytes(format);
+    if (frame_size < UINT32_MAX) {
+        return static_cast<uint32_t>(frame_size);
+    } else{
+        LOGGER__WARNING("NMS host frame size calculated is larger then UINT32_MAX. Therefore the frame size is UINT32_MAX");
+        return UINT32_MAX;
+    }
+}
+
 } /* namespace hailort */
index f85abbaf2a8e630eb35d5525b646ce62348c4926..5cf85154ababc3bea1282875b664499b428d5fb1 100644 (file)
@@ -45,7 +45,8 @@ namespace hailort
 #define HAILORT_ANDROID_LOGGER_PATTERN ("%v")               // Android logger will print only message (additional info are built-in)
 
 #define HAILORT_LOGGER_PATH_ENV_VAR ("HAILORT_LOGGER_PATH")
-#define PERIODIC_LOGGER_FLUSH_TIME_IN_SECONDS (5)
+#define HAILORT_LOGGER_FLUSH_EVERY_PRINT_ENV_VAR ("HAILORT_LOGGER_FLUSH_EVERY_PRINT")
+#define PERIODIC_FLUSH_INTERVAL_IN_SECONDS (5)
 
 #ifdef _WIN32
 #define PATH_SEPARATOR "\\"
@@ -172,7 +173,8 @@ std::shared_ptr<spdlog::sinks::sink> HailoRTLogger::create_file_sink(const std::
 HailoRTLogger::HailoRTLogger(spdlog::level::level_enum console_level, spdlog::level::level_enum file_level, spdlog::level::level_enum flush_level) :
     m_console_sink(make_shared_nothrow<spdlog::sinks::stderr_color_sink_mt>()),
 #ifdef __ANDROID__
-    m_main_log_file_sink(make_shared_nothrow<spdlog::sinks::android_sink_mt>(HAILORT_NAME))
+    m_main_log_file_sink(make_shared_nothrow<spdlog::sinks::android_sink_mt>(HAILORT_NAME)),
+    m_local_log_file_sink(make_shared_nothrow<spdlog::sinks::null_sink_mt>())
 #else
     m_main_log_file_sink(create_file_sink(get_main_log_path(), HAILORT_LOGGER_FILENAME, true)),
     m_local_log_file_sink(create_file_sink(get_log_path(HAILORT_LOGGER_PATH_ENV_VAR), HAILORT_LOGGER_FILENAME, true))
@@ -202,14 +204,37 @@ HailoRTLogger::HailoRTLogger(spdlog::level::level_enum console_level, spdlog::le
     spdlog::set_default_logger(m_hailort_logger);
 }
 
+bool HailoRTLogger::should_flush_every_print(const std::string &flush_every_print_env_var)
+{
+    auto flush_every_print_c_str = std::getenv(flush_every_print_env_var.c_str());
+    if ((nullptr == flush_every_print_c_str) || (std::strlen(flush_every_print_c_str) == 0)) {
+        return false;
+    }
+    std::string flush_every_print_c_str_lower_case(flush_every_print_c_str);
+    for (char& ch : flush_every_print_c_str_lower_case) {
+        ch = static_cast<char>(std::tolower(ch));
+    }
+    if (strcmp(flush_every_print_c_str_lower_case.c_str(), "1") == 0) {
+        return true;
+    }
+    return false;
+}
+
 void HailoRTLogger::set_levels(spdlog::level::level_enum console_level, spdlog::level::level_enum file_level,
     spdlog::level::level_enum flush_level)
 {
     m_console_sink->set_level(console_level);
     m_main_log_file_sink->set_level(file_level);
     m_local_log_file_sink->set_level(file_level);
-    m_hailort_logger->flush_on(flush_level);
-    spdlog::flush_every(std::chrono::seconds(PERIODIC_LOGGER_FLUSH_TIME_IN_SECONDS));
+
+    bool flush_every_print = should_flush_every_print(HAILORT_LOGGER_FLUSH_EVERY_PRINT_ENV_VAR);
+    if (flush_every_print){
+        m_hailort_logger->flush_on(spdlog::level::debug);
+        std::cerr << "HailoRT warning: Flushing log file on every print. May reduce HailoRT performance!" << std::endl;
+    } else {
+        m_hailort_logger->flush_on(flush_level);
+    }
+    spdlog::flush_every(std::chrono::seconds(PERIODIC_FLUSH_INTERVAL_IN_SECONDS));
 }
 
 
index c40047e6c1d554f12bff0525825cef0559d127e0..3641b87ac2a511626a0f93b2d31fa87ba5c97fb9 100644 (file)
@@ -45,6 +45,7 @@ public:
     void operator=(HailoRTLogger const&) = delete;
 
     static std::string get_log_path(const std::string &path_env_var);
+    static bool should_flush_every_print(const std::string &flush_every_print_env_var);
     static std::string get_main_log_path();
     static std::shared_ptr<spdlog::sinks::sink> create_file_sink(const std::string &dir_path, const std::string &filename, bool rotate);
 
index cfd8f4120bf7117361975b45947f1273ff858bfb..57285628b4527fd3073c18e0704855885f66a4c2 100644 (file)
@@ -15,7 +15,7 @@
 
 #include "vdevice/scheduler/scheduler_base.hpp"
 
-namespace hailort 
+namespace hailort
 {
 
 struct Trace
@@ -35,11 +35,16 @@ struct InitTrace : Trace
     InitTrace() : Trace("init") {}
 };
 
+struct InitProfilerProtoTrace : Trace
+{
+    InitProfilerProtoTrace () : Trace("init_profiler_proto") {}
+};
+
 struct CoreOpIdleTrace : Trace
 {
     CoreOpIdleTrace(const device_id_t &device_id, scheduler_core_op_handle_t core_op_handle)
         : Trace("core_op_idle"), device_id(device_id), core_op_handle(core_op_handle)
-    {} 
+    {}
 
     device_id_t device_id;
     scheduler_core_op_handle_t core_op_handle;
@@ -49,26 +54,27 @@ struct AddDeviceTrace : Trace
 {
     AddDeviceTrace(const device_id_t &device_id, const std::string &device_arch)
         : Trace("add_device_trace"), device_id(device_id), device_arch(device_arch)
-    {} 
+    {}
 
     device_id_t device_id;
     std::string device_arch;
 };
 
-struct SchedulerStartTrace : Trace
+struct MonitorStartTrace : Trace
 {
-    SchedulerStartTrace(uint32_t device_count)
+    MonitorStartTrace(uint32_t device_count)
         : Trace("scheduler_start"), device_count(device_count)
-    {} 
+    {}
 
     uint32_t device_count = 0;
 };
 
 struct AddCoreOpTrace : Trace
 {
-    AddCoreOpTrace(const device_id_t &device_id, const std::string &core_op_name, uint64_t timeout, uint32_t threshold, scheduler_core_op_handle_t handle,
-    bool is_nms)
-        : Trace("add_core_op"), device_id(device_id), core_op_name(core_op_name), timeout(timeout), threshold(threshold), core_op_handle(handle), is_nms(is_nms) 
+    AddCoreOpTrace(const device_id_t &device_id, const std::string &core_op_name, uint64_t timeout, uint32_t threshold,
+        scheduler_core_op_handle_t handle, bool is_nms, int batch_size)
+        : Trace("add_core_op"), device_id(device_id), core_op_name(core_op_name), timeout(timeout), threshold(threshold),
+            core_op_handle(handle), is_nms(is_nms), batch_size(batch_size)
     {}
 
     device_id_t device_id;
@@ -76,40 +82,46 @@ struct AddCoreOpTrace : Trace
     uint64_t timeout = 0;
     uint32_t threshold = 0;
     scheduler_core_op_handle_t core_op_handle = INVALID_CORE_OP_HANDLE;
-    bool is_nms;
+    bool is_nms = false;
+    int batch_size = 0;
 };
 
 struct CreateCoreOpInputStreamsTrace : Trace
 {
-    CreateCoreOpInputStreamsTrace(const device_id_t &device_id, const std::string &core_op_name, const std::string &stream_name, uint32_t queue_size)
-        : Trace("create_input_stream"), device_id(device_id), core_op_name(core_op_name), stream_name(stream_name), queue_size(queue_size)
+    CreateCoreOpInputStreamsTrace(const device_id_t &device_id, const std::string &core_op_name, const std::string &stream_name, uint32_t queue_size,
+        scheduler_core_op_handle_t core_op_handle)
+        : Trace("create_input_stream"), device_id(device_id), core_op_name(core_op_name), stream_name(stream_name), queue_size(queue_size),
+        core_op_handle(core_op_handle)
     {}
 
     device_id_t device_id;
     std::string core_op_name;
     std::string stream_name;
     uint32_t queue_size;
+    scheduler_core_op_handle_t core_op_handle;
 };
 
 struct CreateCoreOpOutputStreamsTrace : Trace
 {
-    CreateCoreOpOutputStreamsTrace(const device_id_t &device_id, const std::string &core_op_name, const std::string &stream_name, uint32_t queue_size)
-        : Trace("create_output_stream"), device_id(device_id), core_op_name(core_op_name), stream_name(stream_name), queue_size(queue_size)
+    CreateCoreOpOutputStreamsTrace(const device_id_t &device_id, const std::string &core_op_name, const std::string &stream_name, uint32_t queue_size,
+        scheduler_core_op_handle_t core_op_handle)
+        : Trace("create_output_stream"), device_id(device_id), core_op_name(core_op_name), stream_name(stream_name), queue_size(queue_size),
+        core_op_handle(core_op_handle)
     {}
 
     device_id_t device_id;
     std::string core_op_name;
     std::string stream_name;
     uint32_t queue_size;
+    scheduler_core_op_handle_t core_op_handle;
 };
 
 struct WriteFrameTrace : Trace
 {
-    WriteFrameTrace(const device_id_t &device_id, scheduler_core_op_handle_t core_op_handle, const std::string &queue_name)
-        : Trace("write_frame"), device_id(device_id), core_op_handle(core_op_handle), queue_name(queue_name)
+    WriteFrameTrace(scheduler_core_op_handle_t core_op_handle, const std::string &queue_name)
+        : Trace("write_frame"), core_op_handle(core_op_handle), queue_name(queue_name)
     {}
 
-    device_id_t device_id;
     scheduler_core_op_handle_t core_op_handle;
     std::string queue_name;
 };
@@ -127,48 +139,83 @@ struct InputVdmaDequeueTrace : Trace
 
 struct ReadFrameTrace : Trace
 {
-    ReadFrameTrace(const device_id_t &device_id, scheduler_core_op_handle_t core_op_handle, const std::string &queue_name)
-        : Trace("read_frame"), device_id(device_id), core_op_handle(core_op_handle), queue_name(queue_name)
+    ReadFrameTrace(scheduler_core_op_handle_t core_op_handle, const std::string &queue_name)
+        : Trace("read_frame"), core_op_handle(core_op_handle), queue_name(queue_name)
     {}
 
-    std::string device_id;
     scheduler_core_op_handle_t core_op_handle;
     std::string queue_name;
 };
 
 struct OutputVdmaEnqueueTrace : Trace
 {
-    OutputVdmaEnqueueTrace(const device_id_t &device_id, scheduler_core_op_handle_t core_op_handle, const std::string &queue_name, uint32_t frames)
-        : Trace("output_vdma_enqueue"), device_id(device_id), core_op_handle(core_op_handle), queue_name(queue_name), frames(frames)
+    OutputVdmaEnqueueTrace(const device_id_t &device_id, scheduler_core_op_handle_t core_op_handle, const std::string &queue_name)
+        : Trace("output_vdma_enqueue"), device_id(device_id), core_op_handle(core_op_handle), queue_name(queue_name)
     {}
 
     device_id_t device_id;
     scheduler_core_op_handle_t core_op_handle;
     std::string queue_name;
-    uint32_t frames = 0;
 };
 
-struct ChooseCoreOpTrace : Trace
+struct SwitchCoreOpTrace : Trace
 {
-    ChooseCoreOpTrace(const device_id_t &device_id, scheduler_core_op_handle_t handle, bool threshold, bool timeout, core_op_priority_t priority)
-        : Trace("choose_core_op"), device_id(device_id), core_op_handle(handle), threshold(threshold), timeout(timeout), priority(priority)
+    SwitchCoreOpTrace(const device_id_t &device_id, scheduler_core_op_handle_t handle)
+        : Trace("switch_core_op"), device_id(device_id), core_op_handle(handle)
     {}
 
     device_id_t device_id;
     scheduler_core_op_handle_t core_op_handle;
-    bool threshold = false;
-    bool timeout = false;
-    core_op_priority_t priority;
 };
 
-struct SwitchCoreOpTrace : Trace
+struct SetCoreOpTimeoutTrace : Trace
 {
-    SwitchCoreOpTrace(const device_id_t &device_id, scheduler_core_op_handle_t handle)
-        : Trace("switch_core_op"), device_id(device_id), core_op_handle(handle)
+    SetCoreOpTimeoutTrace(vdevice_core_op_handle_t handle, const std::chrono::milliseconds timeout)
+        : Trace("set_timeout"), core_op_handle(handle), timeout(timeout)
+    {}
+
+    vdevice_core_op_handle_t core_op_handle;
+    std::chrono::milliseconds timeout;
+};
+
+struct SetCoreOpThresholdTrace : Trace
+{
+    SetCoreOpThresholdTrace(vdevice_core_op_handle_t handle, uint32_t threshold)
+        : Trace("set_threshold"), core_op_handle(handle), threshold(threshold)
     {}
 
+    vdevice_core_op_handle_t core_op_handle;
+    uint32_t threshold;
+};
+
+struct SetCoreOpPriorityTrace : Trace
+{
+    SetCoreOpPriorityTrace(vdevice_core_op_handle_t handle, uint8_t priority)
+        : Trace("set_priority"), core_op_handle(handle), priority(priority)
+    {}
+
+    vdevice_core_op_handle_t core_op_handle;
+    uint8_t priority;
+};
+
+struct OracleDecisionTrace : Trace
+{
+    OracleDecisionTrace(bool reason_idle, device_id_t device_id, vdevice_core_op_handle_t handle, bool over_threshold,
+        bool over_timeout)
+        : Trace("switch_core_op_decision"), reason_idle(reason_idle), device_id(device_id), core_op_handle(handle),
+        over_threshold(over_threshold), over_timeout(over_timeout)
+    {}
+
+    bool reason_idle;
     device_id_t device_id;
-    scheduler_core_op_handle_t core_op_handle;
+    vdevice_core_op_handle_t core_op_handle;
+    bool over_threshold;
+    bool over_timeout;
+};
+
+struct DumpProfilerState : Trace
+{
+    DumpProfilerState() : Trace("dump_profiler_state") {}
 };
 
 class Handler
@@ -184,11 +231,16 @@ public:
     virtual void handle_trace(const InputVdmaDequeueTrace&) {};
     virtual void handle_trace(const ReadFrameTrace&) {};
     virtual void handle_trace(const OutputVdmaEnqueueTrace&) {};
-    virtual void handle_trace(const ChooseCoreOpTrace&) {};
     virtual void handle_trace(const SwitchCoreOpTrace&) {};
-    virtual void handle_trace(const SchedulerStartTrace&) {};
+    virtual void handle_trace(const MonitorStartTrace&) {};
     virtual void handle_trace(const CoreOpIdleTrace&) {};
     virtual void handle_trace(const AddDeviceTrace&) {};
+    virtual void handle_trace(const SetCoreOpTimeoutTrace&) {};
+    virtual void handle_trace(const SetCoreOpThresholdTrace&) {};
+    virtual void handle_trace(const SetCoreOpPriorityTrace&) {};
+    virtual void handle_trace(const OracleDecisionTrace&) {};
+    virtual void handle_trace(const DumpProfilerState&) {};
+    virtual void handle_trace(const InitProfilerProtoTrace&) {};
 
 };
 
index 79ee27af19980f23b86078ff8398c9ed0b0c1a3f..698d2a227ec496458d1e1eef0f94093563e25ce9 100644 (file)
@@ -35,17 +35,12 @@ void MonitorHandler::clear_monitor() {
     m_core_ops_info.clear();
 }
 
-void MonitorHandler::handle_trace(const SchedulerStartTrace &trace)
+void MonitorHandler::handle_trace(const MonitorStartTrace &trace)
 {
     m_device_count = trace.device_count;
     start_mon();
 }
 
-void MonitorHandler::handle_trace(const CoreOpIdleTrace &trace)
-{
-    update_utilization_read_buffers_finished(trace.device_id, trace.core_op_handle, true);
-}
-
 void MonitorHandler::handle_trace(const AddCoreOpTrace &trace)
 {
     m_core_ops_info[trace.core_op_handle].utilization = 0;
@@ -56,7 +51,7 @@ void MonitorHandler::handle_trace(const AddCoreOpTrace &trace)
 void MonitorHandler::handle_trace(const AddDeviceTrace &trace)
 {
     DeviceInfo device_info(trace.device_id, trace.device_arch);
-    m_devices_info.emplace(trace.device_id, device_info); 
+    m_devices_info.emplace(trace.device_id, device_info);
 }
 
 void MonitorHandler::handle_trace(const SwitchCoreOpTrace &trace)
@@ -71,7 +66,12 @@ void MonitorHandler::handle_trace(const CreateCoreOpInputStreamsTrace &trace)
     if (!m_is_monitor_currently_working) { return; }
     auto core_op_handle = get_core_op_handle_by_name(trace.core_op_name);
     assert(contains(m_core_ops_info, core_op_handle));
-    m_core_ops_info[core_op_handle].input_streams_info[trace.stream_name] = StreamsInfo{trace.queue_size, 0};
+    assert(contains(m_devices_info, trace.device_id));
+    m_core_ops_info[core_op_handle].input_streams_info[trace.stream_name] = StreamsInfo{trace.queue_size};
+    if (!contains(m_devices_info.at(trace.device_id).requested_transferred_frames_h2d, core_op_handle)) {
+        m_devices_info.at(trace.device_id).requested_transferred_frames_h2d.emplace(core_op_handle, make_shared_nothrow<SchedulerCounter>());
+    }
+    m_devices_info.at(trace.device_id).requested_transferred_frames_h2d[core_op_handle]->insert(trace.stream_name);
 }
 
 void MonitorHandler::handle_trace(const CreateCoreOpOutputStreamsTrace &trace)
@@ -80,36 +80,67 @@ void MonitorHandler::handle_trace(const CreateCoreOpOutputStreamsTrace &trace)
     if (!m_is_monitor_currently_working) { return; }
     auto core_op_handle = get_core_op_handle_by_name(trace.core_op_name);
     assert(contains(m_core_ops_info, core_op_handle));
-    m_core_ops_info[core_op_handle].output_streams_info[trace.stream_name] = StreamsInfo{trace.queue_size, 0};
+    assert(contains(m_devices_info, trace.device_id));
+    m_core_ops_info[core_op_handle].output_streams_info[trace.stream_name] = StreamsInfo{trace.queue_size};
+    if (!contains(m_devices_info.at(trace.device_id).finished_transferred_frames_d2h, core_op_handle)) {
+        m_devices_info.at(trace.device_id).finished_transferred_frames_d2h.emplace(core_op_handle, make_shared_nothrow<SchedulerCounter>());
+    }
+    m_devices_info.at(trace.device_id).finished_transferred_frames_d2h[core_op_handle]->insert(trace.stream_name);
 }
 
 void MonitorHandler::handle_trace(const WriteFrameTrace &trace)
 {
     assert(contains(m_core_ops_info, trace.core_op_handle));
     assert(contains(m_core_ops_info[trace.core_op_handle].input_streams_info, trace.queue_name));
-    m_core_ops_info[trace.core_op_handle].input_streams_info[trace.queue_name].pending_frames_count++;
+    auto &queue = m_core_ops_info[trace.core_op_handle].input_streams_info[trace.queue_name];
+    queue.pending_frames_count->fetch_add(1);
+    queue.pending_frames_count_acc->add_data_point(queue.pending_frames_count->load());
 }
 
 void MonitorHandler::handle_trace(const ReadFrameTrace &trace)
 {
     assert(contains(m_core_ops_info, trace.core_op_handle));
     assert(contains(m_core_ops_info[trace.core_op_handle].output_streams_info, trace.queue_name));
-    m_core_ops_info[trace.core_op_handle].output_streams_info[trace.queue_name].pending_frames_count--;
-    m_core_ops_info[trace.core_op_handle].output_streams_info[trace.queue_name].total_frames_count++;
+    auto &queue = m_core_ops_info[trace.core_op_handle].output_streams_info[trace.queue_name];
+    queue.pending_frames_count->fetch_sub(1);
+    queue.pending_frames_count_acc->add_data_point(queue.pending_frames_count->load());
+    queue.total_frames_count->fetch_add(1);
 }
 
 void MonitorHandler::handle_trace(const OutputVdmaEnqueueTrace &trace)
 {
     assert(contains(m_core_ops_info, trace.core_op_handle));
     assert(contains(m_core_ops_info[trace.core_op_handle].output_streams_info, trace.queue_name));
-    m_core_ops_info[trace.core_op_handle].output_streams_info[trace.queue_name].pending_frames_count += trace.frames;
+
+    assert(contains(m_devices_info, trace.device_id));
+    assert(contains(m_devices_info.at(trace.device_id).requested_transferred_frames_h2d, trace.core_op_handle));
+
+    auto &queue = m_core_ops_info[trace.core_op_handle].output_streams_info[trace.queue_name];
+    queue.pending_frames_count->fetch_add(1);
+    queue.pending_frames_count_acc->add_data_point(queue.pending_frames_count->load());
+
+    m_devices_info.at(trace.device_id).finished_transferred_frames_d2h[trace.core_op_handle]->increase(trace.queue_name);
+
+    const auto max_transferred_h2d = m_devices_info.at(trace.device_id).requested_transferred_frames_h2d[trace.core_op_handle]->get_max_value();
+    const auto min_transferred_d2h = m_devices_info.at(trace.device_id).finished_transferred_frames_d2h[trace.core_op_handle]->get_min_value();
+    if(max_transferred_h2d == min_transferred_d2h) {
+            update_utilization_read_buffers_finished(trace.device_id, trace.core_op_handle, true);
+    }
 }
 
 void MonitorHandler::handle_trace(const InputVdmaDequeueTrace &trace)
 {
     assert(contains(m_core_ops_info, trace.core_op_handle));
     assert(contains(m_core_ops_info[trace.core_op_handle].input_streams_info, trace.queue_name));
-    m_core_ops_info[trace.core_op_handle].input_streams_info[trace.queue_name].pending_frames_count--;
+    assert(contains(m_devices_info, trace.device_id));
+    assert(contains(m_devices_info.at(trace.device_id).requested_transferred_frames_h2d, trace.core_op_handle));
+
+    auto &queue = m_core_ops_info[trace.core_op_handle].input_streams_info[trace.queue_name];
+    queue.pending_frames_count->fetch_sub(1);
+    queue.pending_frames_count_acc->add_data_point(queue.pending_frames_count->load());
+
+    m_devices_info.at(trace.device_id).requested_transferred_frames_h2d[trace.core_op_handle]->increase(trace.queue_name);
+
     update_utilization_send_started(trace.device_id);
 }
 
@@ -127,16 +158,17 @@ hailo_status MonitorHandler::start_mon()
 {
 #if defined(__GNUC__)
 
-    /* Clearing monitor members. Since the owner of monitor_handler is tracer, which is static, 
+    /* Clearing monitor members. Since the owner of monitor_handler is tracer, which is static,
     the monitor may get rerun without destructor being called. */
     if (m_is_monitor_currently_working) {
         clear_monitor();
     }
     m_is_monitor_currently_working = true;
 
-    m_mon_shutdown_event = Event::create_shared(Event::State::not_signalled);
+    auto event_exp = Event::create_shared(Event::State::not_signalled);
+    CHECK_EXPECTED_AS_STATUS(event_exp);
+    m_mon_shutdown_event = event_exp.release();
     m_last_measured_timestamp = std::chrono::steady_clock::now();
-    CHECK(nullptr != m_mon_shutdown_event, HAILO_OUT_OF_HOST_MEMORY);
 
     auto tmp_file = open_temp_mon_file();
     CHECK_EXPECTED_AS_STATUS(tmp_file);
@@ -175,7 +207,7 @@ Expected<std::shared_ptr<TempFile>> MonitorHandler::open_temp_mon_file()
     std::string file_name = get_curr_pid_as_str();
     auto tmp_file = TempFile::create(file_name, SCHEDULER_MON_TMP_DIR);
     CHECK_EXPECTED(tmp_file);
-    
+
     auto tmp_file_ptr = make_shared_nothrow<TempFile>(tmp_file.release());
     CHECK_AS_EXPECTED(nullptr != tmp_file_ptr, HAILO_OUT_OF_HOST_MEMORY);
 
@@ -221,14 +253,13 @@ void MonitorHandler::time_dependent_events_cycle_calc()
 void MonitorHandler::log_monitor_device_infos(ProtoMon &mon)
 {
     for (auto const &device_info_pair : m_devices_info) {
-        auto device_info = device_info_pair.second;
-        auto curr_device_utilization = device_info.device_utilization_duration;
+        auto curr_device_utilization = device_info_pair.second.device_utilization_duration;
         auto utilization_percentage = ((curr_device_utilization * 100) /  m_last_measured_time_duration);
 
         auto device_infos = mon.add_device_infos();
-        device_infos->set_device_id(device_info.device_id);
+        device_infos->set_device_id(device_info_pair.second.device_id);
         device_infos->set_utilization(utilization_percentage);
-        device_infos->set_device_arch(device_info.device_arch);
+        device_infos->set_device_arch(device_info_pair.second.device_arch);
     }
 }
 
@@ -240,7 +271,7 @@ void MonitorHandler::log_monitor_networks_infos(ProtoMon &mon)
         double min_fps = std::numeric_limits<double>::max();
 
         for (auto const &stream : m_core_ops_info[core_op_handle].output_streams_info) {
-            double fps = stream.second.total_frames_count / m_last_measured_time_duration;
+            double fps = stream.second.total_frames_count->load() / m_last_measured_time_duration;
             min_fps = (fps < min_fps) ? fps : min_fps;
         }
 
@@ -250,7 +281,7 @@ void MonitorHandler::log_monitor_networks_infos(ProtoMon &mon)
         net_info->set_fps(min_fps);
     }
 }
+
 void MonitorHandler::log_monitor_frames_infos(ProtoMon &mon)
 {
     for (uint32_t core_op_handle = 0; core_op_handle < m_core_ops_info.size(); core_op_handle++) {
@@ -262,9 +293,32 @@ void MonitorHandler::log_monitor_frames_infos(ProtoMon &mon)
             stream_frames_info->set_stream_name(stream.first);
             stream_frames_info->set_stream_direction(PROTO__STREAM_DIRECTION__HOST_TO_DEVICE);
             stream_frames_info->set_buffer_frames_size(static_cast<int32_t>(stream.second.queue_size * m_device_count));
-            stream_frames_info->set_pending_frames_count(static_cast<int32_t>(stream.second.pending_frames_count));
+            stream_frames_info->set_pending_frames_count(static_cast<int32_t>(stream.second.pending_frames_count->load()));
+
+            auto expected_min_val = stream.second.pending_frames_count_acc->min();
+            if (expected_min_val.status() == HAILO_SUCCESS) {
+                stream_frames_info->set_min_pending_frames_count(static_cast<int32_t>(expected_min_val.release()));
+            } else {
+                stream_frames_info->set_min_pending_frames_count(-1);
+            }
+
+            auto expected_max_val = stream.second.pending_frames_count_acc->max();
+            if (expected_max_val.status() == HAILO_SUCCESS) {
+                stream_frames_info->set_max_pending_frames_count(static_cast<int32_t>(expected_max_val.release()));
+            } else {
+                stream_frames_info->set_max_pending_frames_count(-1);
+            }
+
+            auto expected_avg_val = stream.second.pending_frames_count_acc->mean();
+            if (expected_avg_val.status() == HAILO_SUCCESS) {
+                stream_frames_info->set_avg_pending_frames_count(expected_avg_val.release());
+            } else {
+                stream_frames_info->set_avg_pending_frames_count(-1);
+            }
+
+            stream.second.pending_frames_count_acc->get_and_clear();
         }
-        
+
         for (auto const &stream : m_core_ops_info[core_op_handle].output_streams_info) {
             net_frames_info->set_network_name(m_core_ops_info[core_op_handle].core_op_name);
             auto stream_frames_info = net_frames_info->add_streams_frames_infos();
@@ -274,8 +328,31 @@ void MonitorHandler::log_monitor_frames_infos(ProtoMon &mon)
                 stream_frames_info->set_pending_frames_count(SCHEDULER_MON_NAN_VAL);
                 stream_frames_info->set_buffer_frames_size(SCHEDULER_MON_NAN_VAL);
             } else {
-                stream_frames_info->set_pending_frames_count(static_cast<int32_t>(stream.second.pending_frames_count));
+                stream_frames_info->set_pending_frames_count(static_cast<int32_t>(stream.second.pending_frames_count->load()));
                 stream_frames_info->set_buffer_frames_size(static_cast<int32_t>(stream.second.queue_size * m_device_count));
+
+                auto expected_min_val = stream.second.pending_frames_count_acc->min();
+                if (expected_min_val.status() == HAILO_SUCCESS) {
+                    stream_frames_info->set_min_pending_frames_count(static_cast<int32_t>(expected_min_val.release()));
+                } else {
+                    stream_frames_info->set_min_pending_frames_count(-1);
+                }
+
+                auto expected_max_val = stream.second.pending_frames_count_acc->max();
+                if (expected_max_val.status() == HAILO_SUCCESS) {
+                    stream_frames_info->set_max_pending_frames_count(static_cast<int32_t>(expected_max_val.release()));
+                } else {
+                    stream_frames_info->set_max_pending_frames_count(-1);
+                }
+
+                auto expected_avg_val = stream.second.pending_frames_count_acc->mean();
+                if (expected_avg_val.status() == HAILO_SUCCESS) {
+                    stream_frames_info->set_avg_pending_frames_count(expected_avg_val.release());
+                } else {
+                    stream_frames_info->set_avg_pending_frames_count(-1);
+                }
+
+                stream.second.pending_frames_count_acc->get_and_clear();
             }
         }
     }
@@ -314,7 +391,7 @@ void MonitorHandler::update_device_drained_state(const device_id_t &device_id, b
     m_devices_info.at(device_id).device_has_drained_everything = state;
 }
 
-void MonitorHandler::update_utilization_read_buffers_finished(const device_id_t &device_id, 
+void MonitorHandler::update_utilization_read_buffers_finished(const device_id_t &device_id,
     scheduler_core_op_handle_t core_op_handle, bool is_drained_everything)
 {
     update_utilization_timers(device_id, core_op_handle);
@@ -332,7 +409,7 @@ void MonitorHandler::clear_accumulators()
 
     for (auto &handle_core_op_pair : m_core_ops_info) {
         for (auto &handle_streams_pair : handle_core_op_pair.second.output_streams_info) {
-            handle_streams_pair.second.total_frames_count = 0;
+            handle_streams_pair.second.total_frames_count->store(0);
         }
         handle_core_op_pair.second.utilization = 0;
     }
index e6c188c6f1b5c0732831b2a4ba8574119aacbe94..8ee18ef4a5c5e6a2df2a47c22e00c98f182b19a1 100644 (file)
@@ -18,6 +18,7 @@
 
 #include "common/filesystem.hpp"
 #include "common/utils.hpp"
+#include "common/runtime_statistics_internal.hpp"
 
 #include "vdevice/scheduler/scheduler_base.hpp"
 
 #endif
 #include "scheduler_mon.pb.h"
 #if defined(_MSC_VER)
-#pragma warning( pop ) 
+#pragma warning( pop )
 #else
 #pragma GCC diagnostic pop
 #endif
 
-namespace hailort 
+namespace hailort
 {
 
 #define SCHEDULER_MON_TMP_DIR ("/tmp/hmon_files/")
 #define SCHEDULER_MON_ENV_VAR ("HAILO_MONITOR")
+#define SCHEDULER_MON_ENV_VAR_VALUE ("1")
 #define DEFAULT_SCHEDULER_MON_INTERVAL (std::chrono::seconds(1))
 #define SCHEDULER_MON_NAN_VAL (-1)
 
 using stream_name = std::string;
 
 struct DeviceInfo {
-    DeviceInfo(const device_id_t &device_id, const std::string &device_arch) : 
+    DeviceInfo(const device_id_t &device_id, const std::string &device_arch) :
         device_id(device_id), device_arch(device_arch), device_has_drained_everything(true),
-        device_utilization_duration(0), last_measured_utilization_timestamp(std::chrono::steady_clock::now()), 
-        current_core_op_handle(INVALID_CORE_OP_HANDLE)
+        device_utilization_duration(0), last_measured_utilization_timestamp(std::chrono::steady_clock::now()),
+        current_core_op_handle(INVALID_CORE_OP_HANDLE), requested_transferred_frames_h2d(), finished_transferred_frames_d2h()
     {}
     std::string device_id;
     std::string device_arch;
@@ -61,12 +63,15 @@ struct DeviceInfo {
     double device_utilization_duration;
     std::chrono::time_point<std::chrono::steady_clock> last_measured_utilization_timestamp;
     scheduler_core_op_handle_t current_core_op_handle;
+    std::unordered_map<scheduler_core_op_handle_t, std::shared_ptr<SchedulerCounter>> requested_transferred_frames_h2d;
+    std::unordered_map<scheduler_core_op_handle_t, std::shared_ptr<SchedulerCounter>> finished_transferred_frames_d2h;
 };
 
 struct StreamsInfo {
     uint32_t queue_size;
-    uint32_t pending_frames_count;
-    uint32_t total_frames_count = 0;
+    std::shared_ptr<FullAccumulator<double>> pending_frames_count_acc = make_shared_nothrow<FullAccumulator<double>>("frames_acc");
+    std::shared_ptr<std::atomic_uint32_t> pending_frames_count = make_shared_nothrow<std::atomic_uint32_t>(0);
+    std::shared_ptr<std::atomic_uint32_t> total_frames_count = make_shared_nothrow<std::atomic_uint32_t>(0);
 };
 
 struct CoreOpInfo {
@@ -95,8 +100,7 @@ public:
     virtual void handle_trace(const InputVdmaDequeueTrace&) override;
     virtual void handle_trace(const OutputVdmaEnqueueTrace&) override;
     virtual void handle_trace(const SwitchCoreOpTrace&) override;
-    virtual void handle_trace(const SchedulerStartTrace&) override;
-    virtual void handle_trace(const CoreOpIdleTrace&) override;
+    virtual void handle_trace(const MonitorStartTrace&) override;
     virtual void handle_trace(const AddDeviceTrace&) override;
 
 private:
@@ -113,10 +117,10 @@ private:
     void update_utilization_timestamp(const device_id_t &device_id);
     void update_utilization_send_started(const device_id_t &device_id);
     void update_device_drained_state(const device_id_t &device_id, bool state);
-    void update_utilization_read_buffers_finished(const device_id_t &device_id, scheduler_core_op_handle_t core_op_hanle, bool is_drained_everything);
+    void update_utilization_read_buffers_finished(const device_id_t &device_id, scheduler_core_op_handle_t core_op_handle, bool is_drained_everything);
     void clear_accumulators();
     scheduler_core_op_handle_t get_core_op_handle_by_name(const std::string &name);
-    
+
     bool m_is_monitor_currently_working = false;
     uint32_t m_device_count;
     std::thread m_mon_thread;
diff --git a/hailort/libhailort/src/utils/profiler/profiler_utils.hpp b/hailort/libhailort/src/utils/profiler/profiler_utils.hpp
new file mode 100644 (file)
index 0000000..a71037c
--- /dev/null
@@ -0,0 +1,110 @@
+/**
+ * Copyright (c) 2020-2023 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+ **/
+/**
+ * @file profiler_utils.hpp
+ * @brief Utils for profiling mechanism for HailoRT + FW events
+ **/
+
+#ifndef _HAILO_PROFILER_UTILS_HPP_
+#define _HAILO_PROFILER_UTILS_HPP_
+
+#include "utils/hailort_logger.hpp"
+
+#if defined(__linux__)
+#include <sys/sysinfo.h>
+#include <sys/utsname.h>
+#endif
+
+namespace hailort
+{
+
+struct ProfilerTime {
+    uint32_t year;
+    uint32_t month;
+    uint32_t day;
+    uint32_t hour;
+    uint32_t min;
+};
+
+#if defined(__linux__)
+std::string os_name()
+{
+    struct utsname uts;
+    if (uname(&uts) != 0) {
+        LOGGER__ERROR("Failed to fetch os name.");
+        return "";
+    }
+    return uts.sysname;
+}
+
+std::string os_ver()
+{
+    struct utsname uts;
+    if (uname(&uts) != 0) {
+        LOGGER__ERROR("Failed to fetch os ver.");
+        return "";
+    }
+    return uts.version;
+}
+
+std::string cpu_arch()
+{
+    struct utsname uts;
+    if (uname(&uts) != 0) {
+        LOGGER__ERROR("Failed to fetch cpu architecture.");
+        return "";
+    }
+    return uts.machine;
+}
+
+std::uint64_t system_ram_size()
+{
+    struct sysinfo sys_info;
+
+    if (sysinfo(&sys_info) != 0) {
+        LOGGER__ERROR("Failed to fetch system ram size.");
+        return 1;
+    }
+
+    return sys_info.totalram;
+}
+#endif
+
+ProfilerTime get_curr_time()
+{
+    ProfilerTime curr_time = {};
+    auto now = std::chrono::system_clock::now();
+    auto time = std::chrono::system_clock::to_time_t(now);
+    struct std::tm t_time = *std::localtime(&time);
+
+    curr_time.day = t_time.tm_mday;
+    // Months in std::tm are 0-based
+    curr_time.month = t_time.tm_mon + 1;
+    // Years since 1900
+    curr_time.year = t_time.tm_year + 1900;
+    curr_time.hour = t_time.tm_hour;
+    curr_time.min = t_time.tm_min;
+
+    return curr_time;
+}
+
+std::string get_libhailort_version_representation()
+{
+    std::string result = "";
+    hailo_version_t libhailort_version = {};
+    auto status = hailo_get_library_version(&libhailort_version);
+    if (HAILO_SUCCESS != status) {
+        LOGGER__ERROR("Failed to fetch libhailort version");
+        return result;
+    }
+
+    result = result + std::to_string(libhailort_version.major) + "." + std::to_string(libhailort_version.minor) + "." +
+        std::to_string(libhailort_version.revision);
+    return result;
+}
+
+}
+
+#endif // _HAILO_PROFILER_UTILS_HPP_
\ No newline at end of file
index 86bd76bca154932b948aa0b74931c99a85008276..f81da4d10d55886581a19c95840dfbfa64f34091 100644 (file)
@@ -8,6 +8,7 @@
  **/
 
 #include "scheduler_profiler_handler.hpp"
+#include "profiler_utils.hpp"
 
 #include "common/logger_macros.hpp"
 
 #include <spdlog/sinks/android_sink.h>
 #include <spdlog/sinks/null_sink.h>
 
+#include <google/protobuf/io/zero_copy_stream.h>
+#include <google/protobuf/io/zero_copy_stream_impl.h>
+
+#include <fstream>
 #include <iomanip>
-#include <sstream>
 
+#define PROFILER_DEFAULT_FILE_NAME ("hailo.tracer")
 #define SCHEDULER_PROFILER_NAME ("SchedulerProfiler")
+#define PROFILER_FILE_ENV_VAR ("HAILO_TRACE_FILE")
 #define SCHEDULER_PROFILER_LOGGER_FILENAME ("scheduler_profiler.json")
 #define SCHEDULER_PROFILER_LOGGER_PATTERN ("%v")
 
@@ -33,7 +39,7 @@ namespace hailort
 SchedulerProfilerHandler::SchedulerProfilerHandler(int64_t &start_time)
 #ifndef __ANDROID__
     : m_file_sink(HailoRTLogger::create_file_sink(HailoRTLogger::get_log_path(SCHEDULER_PROFILER_LOGGER_PATH), SCHEDULER_PROFILER_LOGGER_FILENAME, false)),
-      m_first_write(true)
+      m_first_write(true), m_start_time(start_time)
 #endif
 {
 #ifndef __ANDROID__
@@ -54,6 +60,22 @@ SchedulerProfilerHandler::~SchedulerProfilerHandler()
     m_profiler_logger->info("]\n}");
 }
 
+void SchedulerProfilerHandler::serialize_and_dump_proto()
+{
+    auto file_env_var = std::getenv(PROFILER_FILE_ENV_VAR);
+    std::string file_name = PROFILER_DEFAULT_FILE_NAME;
+    if (nullptr != file_env_var) {
+        file_name = std::string(file_env_var);
+    }
+
+    std::ofstream output_file(std::string(file_name), std::ios::out |std::ios::binary);
+    google::protobuf::io::OstreamOutputStream stream(&output_file);
+
+    if(!m_profiler_trace_proto.SerializeToZeroCopyStream(&stream)) {
+        LOGGER__ERROR("Failed writing profiling data to file {}.", file_name);
+    }
+}
+
 struct JSON
 {
     std::unordered_map<std::string, std::string> members;
@@ -108,7 +130,28 @@ bool SchedulerProfilerHandler::comma()
 
 void SchedulerProfilerHandler::log(JSON json)
 {
-    m_profiler_logger->info("{}{}", comma() ? ",\n" : "", json_to_string(json));    
+    m_profiler_logger->info("{}{}", comma() ? ",\n" : "", json_to_string(json));
+}
+
+void SchedulerProfilerHandler::handle_trace(const InitProfilerProtoTrace &trace)
+{
+    ProfilerTime curr_time = get_curr_time();
+
+    auto init = m_profiler_trace_proto.mutable_top_header();
+    #if defined(__linux__)
+    init->set_os_name(os_name());
+    init->set_os_ver(os_ver());
+    init->set_cpu_arch(cpu_arch());
+    init->set_sys_ram_size(system_ram_size());
+    #endif
+    init->set_hailort_ver(get_libhailort_version_representation());
+    init->mutable_time()->set_day(curr_time.day);
+    init->mutable_time()->set_month(curr_time.month);
+    init->mutable_time()->set_year(curr_time.year);
+    init->mutable_time()->set_hour(curr_time.hour);
+    init->mutable_time()->set_min(curr_time.min);
+    init->set_time_stamp(trace.timestamp);
+    init->set_time_stamp_since_epoch(m_start_time);
 }
 
 void SchedulerProfilerHandler::handle_trace(const AddCoreOpTrace &trace)
@@ -122,6 +165,21 @@ void SchedulerProfilerHandler::handle_trace(const AddCoreOpTrace &trace)
         {"timeout", json_to_string((uint64_t)trace.timeout)},
         {"threshold", json_to_string((uint64_t)trace.threshold)}
     }));
+
+    std::lock_guard<std::mutex> lock(m_proto_lock);
+    auto added_trace = m_profiler_trace_proto.add_added_trace();
+    added_trace->mutable_added_core_op()->set_time_stamp(trace.timestamp);
+    added_trace->mutable_added_core_op()->set_core_op_handle(trace.core_op_handle);
+    added_trace->mutable_added_core_op()->set_core_op_name(trace.core_op_name);
+}
+
+void SchedulerProfilerHandler::handle_trace(const AddDeviceTrace &trace)
+{
+    std::lock_guard<std::mutex> lock(m_proto_lock);
+    auto added_trace = m_profiler_trace_proto.add_added_trace();
+    added_trace->mutable_added_device()->mutable_device_info()->set_device_id(trace.device_id);
+    added_trace->mutable_added_device()->mutable_device_info()->set_device_arch(trace.device_arch);
+    added_trace->mutable_added_device()->set_time_stamp(trace.timestamp);
 }
 
 void SchedulerProfilerHandler::handle_trace(const CreateCoreOpInputStreamsTrace &trace)
@@ -134,6 +192,15 @@ void SchedulerProfilerHandler::handle_trace(const CreateCoreOpInputStreamsTrace
         {"stream_name", json_to_string(trace.stream_name)},
         {"queue_size", json_to_string(trace.queue_size)}
     }));
+
+    std::lock_guard<std::mutex> lock(m_proto_lock);
+    auto added_trace = m_profiler_trace_proto.add_added_trace();
+    added_trace->mutable_added_stream()->set_device_id(trace.device_id);
+    added_trace->mutable_added_stream()->set_direction(ProtoProfilerStreamDirection::PROTO__STREAM_DIRECTION__H2D);
+    added_trace->mutable_added_stream()->set_queue_size(trace.queue_size);
+    added_trace->mutable_added_stream()->set_stream_name(trace.stream_name);
+    added_trace->mutable_added_stream()->set_core_op_handle(trace.core_op_handle);
+    added_trace->mutable_added_stream()->set_time_stamp(trace.timestamp);
 }
 
 void SchedulerProfilerHandler::handle_trace(const CreateCoreOpOutputStreamsTrace &trace)
@@ -146,6 +213,15 @@ void SchedulerProfilerHandler::handle_trace(const CreateCoreOpOutputStreamsTrace
         {"stream_name", json_to_string(trace.stream_name)},
         {"queue_size", json_to_string(trace.queue_size)}
     }));
+
+    std::lock_guard<std::mutex> lock(m_proto_lock);
+    auto added_trace = m_profiler_trace_proto.add_added_trace();
+    added_trace->mutable_added_stream()->set_device_id(trace.device_id);
+    added_trace->mutable_added_stream()->set_direction(ProtoProfilerStreamDirection::PROTO__STREAM_DIRECTION__D2H);
+    added_trace->mutable_added_stream()->set_stream_name(trace.stream_name);
+    added_trace->mutable_added_stream()->set_queue_size(trace.queue_size);
+    added_trace->mutable_added_stream()->set_core_op_handle(trace.core_op_handle);
+    added_trace->mutable_added_stream()->set_time_stamp(trace.timestamp);
 }
 
 void SchedulerProfilerHandler::handle_trace(const WriteFrameTrace &trace)
@@ -153,10 +229,16 @@ void SchedulerProfilerHandler::handle_trace(const WriteFrameTrace &trace)
     log(JSON({
         {"action", json_to_string(trace.name)},
         {"timestamp", json_to_string(trace.timestamp)},
-        {"device_id", json_to_string(trace.device_id)},
         {"core_op_handle", json_to_string(trace.core_op_handle)},
         {"queue_name", json_to_string(trace.queue_name)}
     }));
+
+    std::lock_guard<std::mutex> lock(m_proto_lock);
+    auto added_trace = m_profiler_trace_proto.add_added_trace();
+    added_trace->mutable_frame_enqueue()->set_direction(ProtoProfilerStreamDirection::PROTO__STREAM_DIRECTION__H2D);
+    added_trace->mutable_frame_enqueue()->set_stream_name(trace.queue_name);
+    added_trace->mutable_frame_enqueue()->set_core_op_handle(trace.core_op_handle);
+    added_trace->mutable_frame_enqueue()->set_time_stamp(trace.timestamp);
 }
 
 void SchedulerProfilerHandler::handle_trace(const InputVdmaDequeueTrace &trace)
@@ -168,6 +250,14 @@ void SchedulerProfilerHandler::handle_trace(const InputVdmaDequeueTrace &trace)
         {"core_op_handle", json_to_string(trace.core_op_handle)},
         {"queue_name", json_to_string(trace.queue_name)}
     }));
+
+    std::lock_guard<std::mutex> lock(m_proto_lock);
+    auto added_trace = m_profiler_trace_proto.add_added_trace();
+    added_trace->mutable_frame_dequeue()->set_direction(ProtoProfilerStreamDirection::PROTO__STREAM_DIRECTION__H2D);
+    added_trace->mutable_frame_dequeue()->set_device_id(trace.device_id);
+    added_trace->mutable_frame_dequeue()->set_stream_name(trace.queue_name);
+    added_trace->mutable_frame_dequeue()->set_core_op_handle(trace.core_op_handle);
+    added_trace->mutable_frame_dequeue()->set_time_stamp(trace.timestamp);
 }
 
 void SchedulerProfilerHandler::handle_trace(const ReadFrameTrace &trace)
@@ -175,10 +265,16 @@ void SchedulerProfilerHandler::handle_trace(const ReadFrameTrace &trace)
     log(JSON({
         {"action", json_to_string(trace.name)},
         {"timestamp", json_to_string(trace.timestamp)},
-        {"device_id", json_to_string(trace.device_id)},
         {"core_op_handle", json_to_string(trace.core_op_handle)},
         {"queue_name", json_to_string(trace.queue_name)}
     }));
+
+    std::lock_guard<std::mutex> lock(m_proto_lock);
+    auto added_trace = m_profiler_trace_proto.add_added_trace();
+    added_trace->mutable_frame_dequeue()->set_direction(ProtoProfilerStreamDirection::PROTO__STREAM_DIRECTION__D2H);
+    added_trace->mutable_frame_dequeue()->set_stream_name(trace.queue_name);
+    added_trace->mutable_frame_dequeue()->set_core_op_handle(trace.core_op_handle);
+    added_trace->mutable_frame_dequeue()->set_time_stamp(trace.timestamp);
 }
 
 void SchedulerProfilerHandler::handle_trace(const OutputVdmaEnqueueTrace &trace)
@@ -188,32 +284,98 @@ void SchedulerProfilerHandler::handle_trace(const OutputVdmaEnqueueTrace &trace)
         {"timestamp", json_to_string(trace.timestamp)},
         {"device_id", json_to_string(trace.device_id)},
         {"core_op_handle", json_to_string(trace.core_op_handle)},
-        {"queue_name", json_to_string(trace.queue_name)},
-        {"frames", json_to_string(trace.frames)}
+        {"queue_name", json_to_string(trace.queue_name)}
     }));
+
+    std::lock_guard<std::mutex> lock(m_proto_lock);
+    auto added_trace = m_profiler_trace_proto.add_added_trace();
+    added_trace->mutable_frame_enqueue()->set_direction(ProtoProfilerStreamDirection::PROTO__STREAM_DIRECTION__D2H);
+    added_trace->mutable_frame_enqueue()->set_device_id(trace.device_id);
+    added_trace->mutable_frame_enqueue()->set_stream_name(trace.queue_name);
+    added_trace->mutable_frame_enqueue()->set_core_op_handle(trace.core_op_handle);
+    added_trace->mutable_frame_enqueue()->set_time_stamp(trace.timestamp);
 }
 
-void SchedulerProfilerHandler::handle_trace(const ChooseCoreOpTrace &trace)
+void SchedulerProfilerHandler::handle_trace(const SwitchCoreOpTrace &trace)
 {
     log(JSON({
         {"action", json_to_string(trace.name)},
         {"timestamp", json_to_string(trace.timestamp)},
         {"device_id", json_to_string(trace.device_id)},
-        {"chosen_core_op_handle", json_to_string(trace.core_op_handle)},
-        {"threshold", json_to_string(trace.threshold)},
-        {"timeout", json_to_string(trace.timeout)},
-        {"priority", json_to_string(trace.priority)}
+        {"core_op_handle", json_to_string(trace.core_op_handle)}
     }));
+
+    std::lock_guard<std::mutex> lock(m_proto_lock);
+    auto added_trace = m_profiler_trace_proto.add_added_trace();
+    added_trace->mutable_switched_core_op()->set_device_id(trace.device_id);
+    added_trace->mutable_switched_core_op()->set_new_core_op_handle(trace.core_op_handle);
+    added_trace->mutable_switched_core_op()->set_time_stamp(trace.timestamp);
 }
 
-void SchedulerProfilerHandler::handle_trace(const SwitchCoreOpTrace &trace)
+void SchedulerProfilerHandler::handle_trace(const SetCoreOpTimeoutTrace &trace)
 {
     log(JSON({
         {"action", json_to_string(trace.name)},
-        {"timestamp", json_to_string(trace.timestamp)},
-        {"device_id", json_to_string(trace.device_id)},
         {"core_op_handle", json_to_string(trace.core_op_handle)}
     }));
+
+    std::lock_guard<std::mutex> lock(m_proto_lock);
+    auto added_trace = m_profiler_trace_proto.add_added_trace();
+    added_trace->mutable_core_op_set_value()->set_timeout((trace.timeout).count());
+    added_trace->mutable_core_op_set_value()->set_core_op_handle(trace.core_op_handle);
+    added_trace->mutable_core_op_set_value()->set_time_stamp(trace.timestamp);
+}
+
+void SchedulerProfilerHandler::handle_trace(const SetCoreOpThresholdTrace &trace)
+{
+    log(JSON({
+        {"action", json_to_string(trace.name)},
+        {"core_op_handle", json_to_string(trace.core_op_handle)}
+    }));
+
+    std::lock_guard<std::mutex> lock(m_proto_lock);
+    auto added_trace = m_profiler_trace_proto.add_added_trace();
+    added_trace->mutable_core_op_set_value()->set_threshold(trace.threshold);
+    added_trace->mutable_core_op_set_value()->set_core_op_handle(trace.core_op_handle);
+    added_trace->mutable_core_op_set_value()->set_time_stamp(trace.timestamp);
+}
+
+void SchedulerProfilerHandler::handle_trace(const SetCoreOpPriorityTrace &trace)
+{
+    log(JSON({
+        {"action", json_to_string(trace.name)},
+        {"core_op_handle", json_to_string(trace.core_op_handle)}
+    }));
+
+    std::lock_guard<std::mutex> lock(m_proto_lock);
+    auto added_trace = m_profiler_trace_proto.add_added_trace();
+    added_trace->mutable_core_op_set_value()->set_priority(trace.priority);
+    added_trace->mutable_core_op_set_value()->set_core_op_handle(trace.core_op_handle);
+    added_trace->mutable_core_op_set_value()->set_time_stamp(trace.timestamp);
+}
+
+void SchedulerProfilerHandler::handle_trace(const OracleDecisionTrace &trace)
+{
+    log(JSON({
+        {"action", json_to_string(trace.name)},
+        {"reason", json_to_string(trace.reason_idle)},
+        {"core_op_handle", json_to_string(trace.core_op_handle)}
+    }));
+
+    std::lock_guard<std::mutex> lock(m_proto_lock);
+    auto added_trace = m_profiler_trace_proto.add_added_trace();
+    added_trace->mutable_switch_core_op_decision()->set_core_op_handle(trace.core_op_handle);
+    added_trace->mutable_switch_core_op_decision()->set_time_stamp(trace.timestamp);
+    added_trace->mutable_switch_core_op_decision()->set_over_threshold(trace.over_threshold);
+    added_trace->mutable_switch_core_op_decision()->set_switch_because_idle(trace.reason_idle);
+    added_trace->mutable_switch_core_op_decision()->set_over_timeout(trace.over_timeout);
+}
+
+void SchedulerProfilerHandler::handle_trace(const DumpProfilerState &trace)
+{
+    (void)trace;
+    serialize_and_dump_proto();
+    m_profiler_trace_proto.Clear();
 }
 
 }
\ No newline at end of file
index 24178aea09ad6028293be451cf0245025d6b98f0..b5a16994d8c9feb03326cf49422c8acecdcabffc 100644 (file)
 #define _HAILO_SCHEDULER_PROFILER_HANDLER_HPP_
 
 #include "hailo/hailort.h"
+#if defined(_MSC_VER)
+#pragma warning(push)
+#pragma warning(disable: 4244 4267 4127)
+#else
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wconversion"
+#endif
+#include "tracer_profiler.pb.h"
+#if defined(_MSC_VER)
+#pragma warning( pop )
+#else
+#pragma GCC diagnostic pop
+#endif
 
 #include "handler.hpp"
 
-namespace hailort 
+namespace hailort
 {
 class SchedulerProfilerHandler : public Handler
 {
@@ -32,16 +45,26 @@ public:
     virtual void handle_trace(const InputVdmaDequeueTrace&) override;
     virtual void handle_trace(const ReadFrameTrace&) override;
     virtual void handle_trace(const OutputVdmaEnqueueTrace&) override;
-    virtual void handle_trace(const ChooseCoreOpTrace&) override;
     virtual void handle_trace(const SwitchCoreOpTrace&) override;
+    virtual void handle_trace(const AddDeviceTrace&) override;
+    virtual void handle_trace(const SetCoreOpTimeoutTrace&) override;
+    virtual void handle_trace(const SetCoreOpThresholdTrace&) override;
+    virtual void handle_trace(const SetCoreOpPriorityTrace&) override;
+    virtual void handle_trace(const OracleDecisionTrace&) override;
+    virtual void handle_trace(const DumpProfilerState&) override;
+    virtual void handle_trace(const InitProfilerProtoTrace&) override;
 
 private:
     void log(JSON json);
     bool comma();
+    void serialize_and_dump_proto();
 
     std::shared_ptr<spdlog::sinks::sink> m_file_sink;
     std::shared_ptr<spdlog::logger> m_profiler_logger;
     std::atomic<bool> m_first_write;
+    ProtoProfiler m_profiler_trace_proto;
+    std::mutex m_proto_lock;
+    int64_t m_start_time;
 };
 
 }
index 14fce8fa8a6601b1ee8cd08ff2bb99a906e8010c..4bad590125c5388fe5b655dec5d21ecb43ced89e 100644 (file)
@@ -11,7 +11,8 @@
 
 #include "utils/profiler/tracer.hpp"
 
-#define PROFILER_ENV_VAR ("HAILO_ENABLE_PROFILER")
+#define PROFILER_ENV_VAR ("HAILO_TRACE")
+#define PROFILER_ENV_VAR_VALUE ("scheduler")
 
 namespace hailort
 {
@@ -25,7 +26,7 @@ Tracer::Tracer()
 void Tracer::init_scheduler_profiler_handler()
 {
     const char* env_var_name = PROFILER_ENV_VAR;
-    m_should_trace = is_env_variable_on(env_var_name);
+    m_should_trace = is_env_variable_on(env_var_name, PROFILER_ENV_VAR_VALUE, sizeof(PROFILER_ENV_VAR_VALUE));
     if (m_should_trace) {
         m_start_time = std::chrono::high_resolution_clock::now();
         int64_t time_since_epoch = std::chrono::duration_cast<std::chrono::nanoseconds>(m_start_time.time_since_epoch()).count();
@@ -36,7 +37,7 @@ void Tracer::init_scheduler_profiler_handler()
 void Tracer::init_monitor_handler()
 {
     const char* env_var_name = SCHEDULER_MON_ENV_VAR;
-    m_should_monitor = is_env_variable_on(env_var_name);
+    m_should_monitor = is_env_variable_on(env_var_name, SCHEDULER_MON_ENV_VAR_VALUE, sizeof(SCHEDULER_MON_ENV_VAR_VALUE));
     if (m_should_monitor) {
         m_handlers.push_back(std::make_unique<MonitorHandler>());
     }
index 687f66ece5d0c72ecc6e5e99280e0afb4a7cb359..afcad3a227bb87378316db71434e7912f0d06217 100644 (file)
@@ -79,7 +79,7 @@ public:
             // Create a new resource and register
             auto expected_resource = create();
             CHECK_EXPECTED(expected_resource);
-            m_resources.at(available_index) = std::make_shared<ResourceRef<Key, T>>(user_key, expected_resource.release());
+            m_resources.at(available_index) = std::make_unique<ResourceRef<Key, T>>(user_key, expected_resource.release());
             m_resources.at(available_index)->count++;
             return available_index;
         }
@@ -98,9 +98,23 @@ public:
 
 private:
     SharedResourceManager()
-        : m_resources(max_resources(), nullptr)
+        : m_resources(max_resources())
     {}
 
+#ifdef _WIN32
+    // On windows, when the process terminates, all threads are and only then the static variable are destroyed.
+    // If the user hasn't called release_resource, we will leak its objects (since otherwise the object destructor may
+    // wait on some terminated threads and hang).
+    // Notice that on graceful cleanup m_resources should be empty.
+    ~SharedResourceManager()
+    {
+        for (auto &resource : m_resources) {
+            // Releasing resource will leak its memory
+            resource.release();
+        }
+    }
+#endif /* _WIN32 */
+
     static uint32_t max_resources()
     {
         // This method can be "overriden" with template specialization
@@ -116,7 +130,7 @@ private:
     }
 
     std::mutex m_mutex;
-    std::vector<std::shared_ptr<ResourceRef<Key, T>>> m_resources;
+    std::vector<std::unique_ptr<ResourceRef<Key, T>>> m_resources;
 };
 
 }
index 757105eea43e91a6c1555d0fd6348898c102d337..cc1093b1490f95a3216023486be99a0d1f3d2f57 100644 (file)
 
 #include <map>
 #include <mutex>
+#include <unordered_map>
+#include <shared_mutex>
 
 namespace hailort
 {
 
-template<class K, class V>
-class SafeMap {
+/// Thread safe map is a wrapper to std::unordered_map std::map that allows multi-thread access to the map.
+/// This class guards the map structure itself in thread safe way, and not the members.
+template<typename Key, typename Value, typename MapType=std::unordered_map<Key, Value>>
+class ThreadSafeMap final {
 public:
-    SafeMap() : m_map(), m_mutex() {}
-    virtual ~SafeMap() = default;
-    SafeMap(SafeMap &&map) : m_map(std::move(map.m_map)), m_mutex() {};
 
-    V& operator[](const K& k) {
-        std::lock_guard<std::mutex> lock(m_mutex);
-        return m_map[k];
+    template <typename... Args>
+    auto emplace(const Key& key, Args&&... args)
+    {
+        std::unique_lock<std::shared_timed_mutex> lock(m_mutex);
+        return m_map.emplace(key, std::forward<Args>(args)...);
     }
 
-    V& operator[](K&& k) {
-        std::lock_guard<std::mutex> lock(m_mutex);
-        return m_map[k];
+    // Return by value (and not by reference) since after the mutex is unlocked, the reference may change.
+    Value at(const Key &key) const
+    {
+        std::shared_lock<std::shared_timed_mutex> lock(m_mutex);
+        return m_map.at(key);
     }
 
-    V& at(K& k) {
-        std::unique_lock<std::mutex> lock(m_mutex);
-        return m_map.at(k);
+    template<typename Func>
+    void for_each(Func &&func) const
+    {
+        std::shared_lock<std::shared_timed_mutex> lock(m_mutex);
+        std::for_each(m_map.begin(), m_map.end(), func);
     }
 
-    V& at(const K& k) {
-        std::unique_lock<std::mutex> lock(m_mutex);
-        return m_map.at(k);
-    }
-
-    std::size_t size() {
-        std::unique_lock<std::mutex> lock(m_mutex);
-        return m_map.size();
-    }
-
-    typename std::map<K, V>::iterator find(K& k) {
-        std::unique_lock<std::mutex> lock(m_mutex);
-        return m_map.find(k);
-    }
-
-    typename std::map<K, V>::iterator find(const K& k) {
-        std::unique_lock<std::mutex> lock(m_mutex);
-        return m_map.find(k);
-    }
-
-    bool contains(const K &k) {
-        std::unique_lock<std::mutex> lock(m_mutex);
-        return m_map.find(k) != m_map.end();
-    }
-
-    void clear() {
-        std::unique_lock<std::mutex> lock(m_mutex);
-        m_map.clear();
-    }
-
-    typename std::map<K, V>::iterator begin() {
-        std::unique_lock<std::mutex> lock(m_mutex);
-        return m_map.begin();
-    }
-
-    typename std::map<K, V>::iterator end() {
-        std::unique_lock<std::mutex> lock(m_mutex);
-        return m_map.end();
-    }
-
-protected:
-    std::map<K, V> m_map;
-    mutable std::mutex m_mutex;
+private:
+    // Const operation on the map can be executed on parallel, hence we can use shared_lock, while non-const operations
+    // (such as emplace) must have unique access.
+    mutable std::shared_timed_mutex m_mutex;
+    MapType m_map;
 };
 
 } /* namespace hailort */
index 3be5f5c215b5ab795e11685e2580bd8c74a83c66..b736d15c61679ef31ac372156e307a3e70bcf42c 100644 (file)
@@ -44,73 +44,57 @@ namespace hailort
 
 #define DEFAULT_TIMEOUT_MS (1000)
 
-// A threadsafe-queue. - https://stackoverflow.com/a/16075550
 template <class T>
-class SafeQueue {
+class SafeQueue final {
 public:
-    SafeQueue() : m_queue(), m_mutex(), m_queue_not_empty(), m_timeout(DEFAULT_TIMEOUT_MS) {}
-    virtual ~SafeQueue() = default;
+    static constexpr size_t UNLIMITED_QUEUE_SIZE = std::numeric_limits<size_t>::max();
 
-    // Add an element to the queue.
-    virtual void push(T t) {
+    SafeQueue(size_t max_size) :
+        m_max_size(max_size)
+    {}
+
+    SafeQueue() :
+        SafeQueue(UNLIMITED_QUEUE_SIZE)
+    {}
+
+
+    ~SafeQueue() = default;
+
+    hailo_status enqueue(T &&t)
+    {
         std::lock_guard<std::mutex> lock(m_mutex);
-        m_queue.push(t);
-        m_queue_not_empty.notify_one();
+        if ((m_max_size != UNLIMITED_QUEUE_SIZE) && (m_queue.size() >= m_max_size)) {
+            return HAILO_QUEUE_IS_FULL;
+        }
+        m_queue.push(std::move(t));
+        return HAILO_SUCCESS;
     }
 
-    // Get the "front"-element.
-    // If the queue is empty, wait till a element is available.
-    virtual T pop() {
-        std::unique_lock<std::mutex> lock(m_mutex);
-        while (m_queue.empty()) {
-            // release lock as long as the wait and require it afterwards.
-            m_queue_not_empty.wait_for(lock, m_timeout);
-        }
+    Expected<T> dequeue()
+    {
+        std::lock_guard<std::mutex> lock(m_mutex);
+        CHECK_AS_EXPECTED(!m_queue.empty(), HAILO_INTERNAL_FAILURE, "Can't dequeue if queue is empty");
         T val = m_queue.front();
         m_queue.pop();
         return val;
     }
 
-protected:
-    std::queue<T> m_queue;
-    mutable std::mutex m_mutex;
-    std::condition_variable m_queue_not_empty;
-    const std::chrono::milliseconds m_timeout;
-};
-
- template <class T>
- class SafeQueueMaxSize : public SafeQueue<T> {
- public:
-    SafeQueueMaxSize(uint32_t max_size) :
-        SafeQueue<T>::SafeQueue(),
-        m_max_size(max_size),
-        m_queue_not_full()
-    {}
-    virtual ~SafeQueueMaxSize() = default;
-
-    virtual void push(T t) override {
-        std::unique_lock<std::mutex> lock(this->m_mutex);
-        m_queue_not_full.wait(lock, [&]{return this->m_queue.size() < m_max_size;});
-
-        this->m_queue.push(t);
-        this->m_queue_not_empty.notify_one();
-    }
-
-    virtual T pop() override {
-        std::unique_lock<std::mutex> lock(this->m_mutex);
-        this->m_queue_not_empty.wait(lock, [&]{return !this->m_queue.empty();});
-        
-        T val = this->m_queue.front();
-        this->m_queue.pop();
-        
-        if (this->m_queue.size() < m_max_size) {
-            m_queue_not_full.notify_one();
+    void clear()
+    {
+        std::lock_guard<std::mutex> lock(m_mutex);
+        while (!m_queue.empty()) {
+            m_queue.pop();
         }
-        return val;
     }
+
+    bool empty() const { return m_queue.empty(); }
+    size_t size() const { return m_queue.size(); }
+    size_t max_size() const { return m_max_size; }
+
 protected:
-    const uint32_t m_max_size;
-    std::condition_variable m_queue_not_full;
+    const size_t m_max_size;
+    std::queue<T> m_queue;
+    mutable std::mutex m_mutex;
 };
 
 // Single-Producer Single-Consumer Queue
index f9535d3c99cfa5b74c3195a71ab27f238a716d6a..2fd62f0dc31fa6e632b1724059145272ff766c6a 100644 (file)
@@ -5,7 +5,6 @@ set(SRC_FILES
     ${CMAKE_CURRENT_SOURCE_DIR}/vdevice_core_op.cpp
 
     ${CMAKE_CURRENT_SOURCE_DIR}/pipeline_multiplexer.cpp
-    ${CMAKE_CURRENT_SOURCE_DIR}/vdevice_stream.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/vdevice_native_stream.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/vdevice_stream_multiplexer_wrapper.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/callback_reorder_queue.cpp
@@ -14,7 +13,6 @@ set(SRC_FILES
     ${CMAKE_CURRENT_SOURCE_DIR}/scheduler/scheduler_oracle.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/scheduler/scheduled_core_op_state.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/scheduler/scheduled_stream.cpp
-    ${CMAKE_CURRENT_SOURCE_DIR}/scheduler/multi_device_scheduled_stream.cpp
 )
 
 set(HAILORT_CPP_SOURCES ${HAILORT_CPP_SOURCES} ${SRC_FILES} PARENT_SCOPE)
index e5df53e7db953484d91c051434fbb3cca40e52a5..7f672b023d6ea182545067f3831291fe969e1132 100644 (file)
@@ -12,7 +12,7 @@
 #ifndef _HAILO_CALLBACK_REORDER_QUEUE_HPP_
 #define _HAILO_CALLBACK_REORDER_QUEUE_HPP_
 
-#include "stream_common/async_common.hpp"
+#include "stream_common/transfer_common.hpp"
 
 #include <mutex>
 #include <queue>
diff --git a/hailort/libhailort/src/vdevice/scheduler/multi_device_scheduled_stream.cpp b/hailort/libhailort/src/vdevice/scheduler/multi_device_scheduled_stream.cpp
deleted file mode 100644 (file)
index d8d236c..0000000
+++ /dev/null
@@ -1,146 +0,0 @@
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file multi_device_scheduled_stream.cpp
- * @brief TODO: brief
- *
- * TODO: doc
- **/
-
-#include "vdevice/scheduler/multi_device_scheduled_stream.hpp"
-
-namespace hailort
-{
-
-Expected<std::unique_ptr<MultiDeviceScheduledInputStream>> MultiDeviceScheduledInputStream::create(
-    std::map<device_id_t, std::reference_wrapper<VdmaInputStreamBase>> &&streams,
-    const scheduler_core_op_handle_t &core_op_handle,
-    EventPtr &&core_op_activated_event,
-    const LayerInfo &layer_info,
-    CoreOpsSchedulerWeakPtr core_ops_scheduler)
-{
-    auto buffer_frame_size = streams.begin()->second.get().get_buffer_frames_size();
-    CHECK_EXPECTED(buffer_frame_size);
-    auto frame_size = streams.begin()->second.get().get_frame_size();
-    auto buffers_queue_ptr = BuffersQueue::create_unique(frame_size, (streams.size() * buffer_frame_size.value()));
-    CHECK_EXPECTED(buffers_queue_ptr);
-
-    auto status = HAILO_UNINITIALIZED;
-    auto stream = make_unique_nothrow<MultiDeviceScheduledInputStream>(std::move(streams),
-        core_op_handle, std::move(core_op_activated_event), layer_info,
-        core_ops_scheduler, buffers_queue_ptr.release(), status);
-    CHECK_AS_EXPECTED((nullptr != stream), HAILO_OUT_OF_HOST_MEMORY);
-    CHECK_SUCCESS_AS_EXPECTED(status);
-    return stream;
-}
-
-hailo_status MultiDeviceScheduledInputStream::send_pending_buffer(const device_id_t &device_id)
-{
-    auto buffer = m_queue->front(get_timeout()); // Counting on scheduler to not allow paralle calls to this function
-    if (HAILO_STREAM_ABORTED_BY_USER == buffer.status()) {
-        LOGGER__INFO("'front' was aborted.");
-        return buffer.status();
-    }
-    CHECK_EXPECTED_AS_STATUS(buffer);
-    assert(contains(m_streams, device_id));
-    auto status = m_streams.at(device_id).get().write_buffer_only(buffer.value());
-    if (HAILO_STREAM_ABORTED_BY_USER == status) {
-        LOGGER__INFO("send_pending_buffer was aborted.");
-        return status;
-    }
-    CHECK_SUCCESS(status);
-    m_queue->pop(); // Release buffer to free the queue for other dequeues
-
-    auto &vdma_input = dynamic_cast<VdmaInputStreamBase&>(m_streams.at(device_id).get());
-    return vdma_input.send_pending_buffer(device_id);
-}
-
-hailo_status MultiDeviceScheduledInputStream::write_impl(const MemoryView &buffer,
-    const std::function<bool()> &should_cancel)
-{
-    if (should_cancel()) {
-        return HAILO_STREAM_ABORTED_BY_USER;
-    }
-
-    auto core_ops_scheduler = m_core_ops_scheduler.lock();
-    CHECK(core_ops_scheduler, HAILO_INTERNAL_FAILURE);
-
-    auto status = m_queue->push(buffer, get_timeout());
-    if (HAILO_STREAM_ABORTED_BY_USER == status) {
-        LOGGER__INFO("'push' was aborted.");
-        return status;
-    }
-    CHECK_SUCCESS(status);
-
-    auto write_finish_status = core_ops_scheduler->signal_frame_pending_to_send(m_core_op_handle, name());
-    if (HAILO_STREAM_ABORTED_BY_USER == write_finish_status) {
-        return write_finish_status;
-    }
-    CHECK_SUCCESS(write_finish_status);
-
-    return HAILO_SUCCESS;
-}
-
-Expected<size_t> MultiDeviceScheduledInputStream::get_pending_frames_count() const
-{
-    return get_queue_size();
-}
-
-size_t MultiDeviceScheduledInputStream::get_queue_size() const
-{
-    return m_queue->size();
-}
-
-hailo_status MultiDeviceScheduledInputStream::abort()
-{
-    auto status = HAILO_SUCCESS; // Best effort
-    for (const auto &pair : m_streams) {
-        auto &stream = pair.second;
-        auto abort_status = stream.get().abort();
-        if (HAILO_SUCCESS != status) {
-            LOGGER__ERROR("Failed to abort input stream. (status: {} device: {})", status, stream.get().get_dev_id());
-            status = abort_status;
-        }
-    }
-    m_queue->abort();
-
-    auto core_ops_scheduler = m_core_ops_scheduler.lock();
-    CHECK(core_ops_scheduler, HAILO_INTERNAL_FAILURE);
-
-    auto disable_status = core_ops_scheduler->disable_stream(m_core_op_handle, name());
-    if (HAILO_SUCCESS != disable_status) {
-        LOGGER__ERROR("Failed to disable stream in the core-op scheduler. (status: {})", disable_status);
-        status = disable_status;
-    }
-
-    return status;
-}
-
-hailo_status MultiDeviceScheduledInputStream::clear_abort()
-{
-    auto status = HAILO_SUCCESS; // Best effort
-    for (const auto &pair : m_streams) {
-        auto &stream = pair.second;
-        auto clear_abort_status = stream.get().clear_abort();
-        if ((HAILO_SUCCESS != clear_abort_status) && (HAILO_STREAM_NOT_ACTIVATED != clear_abort_status)) {
-            LOGGER__ERROR("Failed to clear abort input stream. (status: {} device: {})", clear_abort_status, stream.get().get_dev_id());
-            status = clear_abort_status;
-        }
-    }
-    m_queue->clear_abort();
-
-    auto core_ops_scheduler = m_core_ops_scheduler.lock();
-    CHECK(core_ops_scheduler, HAILO_INTERNAL_FAILURE);
-
-    auto enable_status = core_ops_scheduler->enable_stream(m_core_op_handle, name());
-    if (HAILO_SUCCESS != enable_status) {
-        LOGGER__ERROR("Failed to enable stream in the core-op scheduler. (status: {})", enable_status);
-        status = enable_status;
-    }
-
-    return status;
-}
-
-} /* namespace hailort */
diff --git a/hailort/libhailort/src/vdevice/scheduler/multi_device_scheduled_stream.hpp b/hailort/libhailort/src/vdevice/scheduler/multi_device_scheduled_stream.hpp
deleted file mode 100644 (file)
index 63eadf8..0000000
+++ /dev/null
@@ -1,194 +0,0 @@
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file  multi_device_stream.hpp
- * @brief Internal multi device stream implementation for scheduled streams
- *
- **/
-
-#ifndef HAILO_MULTI_DEVICE_SCHEDULED_STREAM_HPP_
-#define HAILO_MULTI_DEVICE_SCHEDULED_STREAM_HPP_
-
-#include "hailo/hailort.h"
-#include "hailo/expected.hpp"
-
-#include "stream_common/stream_internal.hpp"
-#include "vdevice/vdevice_internal.hpp"
-#include "vdevice/scheduler/scheduled_stream.hpp"
-#include "vdma/vdma_device.hpp"
-
-
-namespace hailort
-{
-
-class BuffersQueue
-{
-public:
-    static Expected<std::unique_ptr<BuffersQueue>> create_unique(size_t buffer_size, size_t buffers_count)
-    {
-        std::vector<Buffer> queue;
-        queue.reserve(buffers_count);
-        for (size_t i = 0; i < (buffers_count); i++) {
-            auto buff = Buffer::create(buffer_size);
-            CHECK_EXPECTED(buff);
-            queue.emplace_back(buff.release());
-        }
-
-        auto ptr = make_unique_nothrow<BuffersQueue>(std::move(queue));
-        CHECK_NOT_NULL_AS_EXPECTED(ptr, HAILO_OUT_OF_HOST_MEMORY);
-        return ptr;
-    }
-
-    hailo_status push(const MemoryView &buff, const std::chrono::milliseconds &timeout)
-    {
-        auto status = HAILO_SUCCESS;
-        {
-            std::unique_lock<std::mutex> lock(m_mutex);
-
-            // TODO: this validation is done in scheduler logic. can be removed?
-            auto wait_res = m_cv.wait_for(lock, timeout, [this, &status] {
-                if (m_should_stop) {
-                    status = HAILO_STREAM_ABORTED_BY_USER;
-                    return true;
-                }
-                return size() < m_queue.size();
-            });
-            CHECK(wait_res, HAILO_TIMEOUT, "Failed to enqueue frame with status={}, timeout={}ms", HAILO_TIMEOUT, timeout.count());
-            if (HAILO_STREAM_ABORTED_BY_USER == status) {
-                LOGGER__INFO("'push' was aborted by user");
-                return status;
-            }
-
-            std::memcpy(m_queue[m_head].data(), buff.data(), buff.size());
-            m_head = static_cast<uint32_t>((m_head + 1) % m_queue.size());
-            m_is_empty = false;
-        }
-        m_cv.notify_all();
-
-        return HAILO_SUCCESS;
-    }
-
-    Expected<MemoryView> front(const std::chrono::milliseconds &timeout)
-    {
-        auto status = HAILO_SUCCESS;
-        {
-            std::unique_lock<std::mutex> lock(m_mutex);
-
-            auto wait_res = m_cv.wait_for(lock, timeout, [this, &status] {
-                if (m_should_stop) {
-                    status = HAILO_STREAM_ABORTED_BY_USER;
-                    return true;
-                }
-                return 0 < size();
-            });
-            CHECK_AS_EXPECTED(wait_res, HAILO_TIMEOUT, "Failed to dequeue frame with status={}, timeout={}ms", HAILO_TIMEOUT, timeout.count());
-            if (HAILO_STREAM_ABORTED_BY_USER == status) {
-                LOGGER__INFO("'front' was aborted by user");
-                return make_unexpected(status);
-            }
-        }
-        m_cv.notify_all();
-
-        return MemoryView(m_queue[m_tail]);
-    }
-
-    void pop()
-    {
-        {
-            std::unique_lock<std::mutex> lock(m_mutex);
-            m_tail = static_cast<uint32_t>((m_tail + 1) % m_queue.size());
-            if (m_tail == m_head) {
-                m_is_empty = true;
-            }
-        }
-        m_cv.notify_all();
-    }
-
-    size_t size()
-    {
-        if (m_head == m_tail) {
-            return m_is_empty ? 0 : m_queue.size();
-        } else if (m_head > m_tail) {
-            return (m_head - m_tail);
-        } else {
-            return (m_queue.size() - m_tail) + m_head;
-        }
-    }
-
-    void abort()
-    {
-        {
-            std::unique_lock<std::mutex> lock(m_mutex);
-            m_should_stop = true;
-        }
-        m_cv.notify_all();
-    }
-
-    void clear_abort()
-    {
-        {
-            std::unique_lock<std::mutex> lock(m_mutex);
-            m_should_stop = false;
-        }
-        m_cv.notify_all();
-    }
-
-    BuffersQueue(std::vector<Buffer> &&queue) : m_queue(std::move(queue)), m_head(0), m_tail(0),
-        m_is_empty(true), m_should_stop(false)
-    {}
-
-private:
-    std::vector<Buffer> m_queue;
-    std::atomic_uint32_t m_head;
-    std::atomic_uint32_t m_tail;
-
-    std::atomic_bool m_is_empty;
-
-    std::condition_variable m_cv;
-    std::mutex m_mutex;
-    std::atomic_bool m_should_stop;
-};
-
-// Stream used on scheduler input multiple device with SYNC api (On async api, the ScheduledAsyncInputStream handles
-// both single and multiple devices).
-class MultiDeviceScheduledInputStream : public ScheduledInputStreamBase {
-public:
-    static Expected<std::unique_ptr<MultiDeviceScheduledInputStream>> create(
-        std::map<device_id_t, std::reference_wrapper<VdmaInputStreamBase>> &&streams,
-        const scheduler_core_op_handle_t &core_op_handle,
-        EventPtr &&core_op_activated_event,
-        const LayerInfo &layer_info,
-        CoreOpsSchedulerWeakPtr core_ops_scheduler);
-
-    MultiDeviceScheduledInputStream(
-        std::map<device_id_t, std::reference_wrapper<VdmaInputStreamBase>> &&streams,
-        const scheduler_core_op_handle_t &core_op_handle,
-        EventPtr &&core_op_activated_event,
-        const LayerInfo &layer_info,
-        CoreOpsSchedulerWeakPtr core_ops_scheduler,
-        std::unique_ptr<BuffersQueue> &&frames_queue,
-        hailo_status &status) :
-            ScheduledInputStreamBase(std::move(streams), core_op_handle,
-                std::move(core_op_activated_event), layer_info, core_ops_scheduler, status),
-                m_queue(std::move(frames_queue))
-    {}
-
-    virtual hailo_status send_pending_buffer(const device_id_t &device_id) override;
-    virtual Expected<size_t> get_pending_frames_count() const override;
-
-protected:
-    virtual hailo_status write_impl(const MemoryView &buffer, const std::function<bool()> &should_cancel) override;
-    virtual hailo_status abort() override;
-    virtual hailo_status clear_abort() override;
-
-private:
-    size_t get_queue_size() const;
-
-    std::unique_ptr<BuffersQueue> m_queue;
-};
-
-} /* namespace hailort */
-
-#endif /* HAILO_MULTI_DEVICE_SCHEDULED_STREAM_HPP_ */
diff --git a/hailort/libhailort/src/vdevice/scheduler/scheduled_core_op_cv.hpp b/hailort/libhailort/src/vdevice/scheduler/scheduled_core_op_cv.hpp
deleted file mode 100644 (file)
index ef314a0..0000000
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file scheduled_core_op_cv.hpp
- * @brief Class declaration for scheduled core-ops conditional variables
- **/
-
-#ifndef _HAILO_SCHEDULED_CORE_OP_CV_HPP_
-#define _HAILO_SCHEDULED_CORE_OP_CV_HPP_
-
-#include "hailo/hailort.h"
-#include "hailo/expected.hpp"
-
-#include "common/utils.hpp"
-
-#include <condition_variable>
-
-
-namespace hailort
-{
-
-class ScheduledCoreOpCV
-{
-public:
-    static Expected<std::shared_ptr<ScheduledCoreOpCV>> create(std::shared_ptr<CoreOp> added_cng)
-    {
-        auto stream_infos = added_cng->get_all_stream_infos();
-        CHECK_EXPECTED(stream_infos);
-
-        std::unordered_map<stream_name_t, std::shared_ptr<std::condition_variable>> cv_per_stream;
-        for (const auto &stream_info : stream_infos.value()) {
-            auto cv = make_shared_nothrow<std::condition_variable>();
-            CHECK_NOT_NULL_AS_EXPECTED(cv, HAILO_OUT_OF_HOST_MEMORY);
-            cv_per_stream[stream_info.name] = std::move(cv);
-        }
-
-        auto scheduled_core_op_cv = make_shared_nothrow<ScheduledCoreOpCV>(cv_per_stream);
-        CHECK_NOT_NULL_AS_EXPECTED(scheduled_core_op_cv, HAILO_OUT_OF_HOST_MEMORY);
-
-        return scheduled_core_op_cv;
-    }
-
-    virtual ~ScheduledCoreOpCV()  = default;
-    ScheduledCoreOpCV(const ScheduledCoreOpCV &other) = delete;
-    ScheduledCoreOpCV &operator=(const ScheduledCoreOpCV &other) = delete;
-    ScheduledCoreOpCV &operator=(ScheduledCoreOpCV &&other) = delete;
-    ScheduledCoreOpCV(ScheduledCoreOpCV &&other) noexcept = delete;
-
-    void notify_one(const stream_name_t &name)
-    {
-        assert(contains(m_map, name));
-        m_map[name]->notify_one();
-    }
-
-    void notify_all()
-    {
-        for (auto &cv : m_map) {
-            cv.second->notify_one();
-        }
-    }
-
-    template<typename _Rep, typename _Period, typename _Predicate>
-    bool wait_for(const stream_name_t &name, std::unique_lock<std::mutex>& __lock, const std::chrono::duration<_Rep, _Period>& __rtime, _Predicate __p)
-    {
-        assert(contains(m_map, name));
-        return m_map[name]->wait_for(__lock, __rtime, __p);
-    }
-
-    ScheduledCoreOpCV(std::unordered_map<stream_name_t, std::shared_ptr<std::condition_variable>> cv_map) : m_map(std::move(cv_map))
-    {}
-
-private:
-    std::unordered_map<stream_name_t, std::shared_ptr<std::condition_variable>> m_map;
-};
-
-} /* namespace hailort */
-
-#endif /* _HAILO_SCHEDULED_CORE_OP_CV_HPP_ */
index bc4fa216a36777090bb88f640a04e6910f665c43..4fc0fcbb379383b004a1d0ebb2993b3d4243c165 100644 (file)
 namespace hailort
 {
 
-#define SINGLE_CONTEXT_BATCH_SIZE (1)
-
 ScheduledCoreOp::ScheduledCoreOp(std::shared_ptr<CoreOp> core_op, std::chrono::milliseconds timeout,
-    uint16_t max_batch_size, bool use_dynamic_batch_flow, StreamInfoVector &stream_infos, std::string core_op_name) :
+    uint16_t max_batch_size, bool use_dynamic_batch_flow, StreamInfoVector &stream_infos) :
     m_core_op(core_op),
     m_last_run_time_stamp(std::chrono::steady_clock::now()),
     m_timeout(std::move(timeout)),
@@ -28,27 +26,18 @@ ScheduledCoreOp::ScheduledCoreOp(std::shared_ptr<CoreOp> core_op, std::chrono::m
     m_use_dynamic_batch_flow(use_dynamic_batch_flow),
     m_priority(HAILO_SCHEDULER_PRIORITY_NORMAL),
     m_last_device_id(INVALID_DEVICE_ID),
-    m_core_op_name(core_op_name),
     m_inputs_names(),
-    m_outputs_names(),
-    m_is_nms(false)
+    m_outputs_names()
 {
     // Prepare empty counters for the added core-op
     for (const auto &stream_info : stream_infos) {
         m_min_threshold_per_stream[stream_info.name] = DEFAULT_SCHEDULER_MIN_THRESHOLD;
+        m_is_stream_enabled[stream_info.name] = true;
+        m_pending_frames.insert(stream_info.name);
         if (HAILO_H2D_STREAM == stream_info.direction) {
-            m_pending_to_send_frames.insert(stream_info.name);
-            m_h2d_finished_transferred_frames.insert(stream_info.name);
             m_inputs_names.push_back(stream_info.name);
         } else {
-            m_requested_read_frames.insert(stream_info.name);
-            m_finished_read_frames.insert(stream_info.name);
-            m_d2h_finished_transferred_frames.insert(stream_info.name);
             m_outputs_names.push_back(stream_info.name);
-
-            if (HAILO_FORMAT_ORDER_HAILO_NMS == stream_info.format.order) {
-                m_is_nms = true;
-            }
         }
     }
 }
@@ -63,10 +52,19 @@ Expected<std::shared_ptr<ScheduledCoreOp>> ScheduledCoreOp::create(std::shared_p
 
     // DEFAULT_BATCH_SIZE and SINGLE_CONTEXT_BATCH_SIZE support streaming and therfore we are not using dynamic batch flow
     auto use_dynamic_batch_flow = added_core_op->get_supported_features().multi_context && (max_batch_size > SINGLE_CONTEXT_BATCH_SIZE);
-    return make_shared_nothrow<ScheduledCoreOp>(added_core_op, timeout, max_batch_size, use_dynamic_batch_flow, stream_infos, added_core_op->name());
+    auto res = make_shared_nothrow<ScheduledCoreOp>(added_core_op, timeout, max_batch_size, use_dynamic_batch_flow,
+        stream_infos);
+    CHECK_NOT_NULL_AS_EXPECTED(res, HAILO_OUT_OF_HOST_MEMORY);
+
+    return res;
+}
+
+uint32_t ScheduledCoreOp::get_max_ongoing_frames_per_device() const
+{
+    return std::min(get_min_input_buffers_count(), get_min_output_buffers_count());
 }
 
-uint16_t ScheduledCoreOp::get_min_input_buffers_count()
+uint16_t ScheduledCoreOp::get_min_input_buffers_count() const
 {
     auto input_streams = m_core_op->get_input_streams();
     uint16_t buffers_count = UINT16_MAX;
@@ -79,7 +77,7 @@ uint16_t ScheduledCoreOp::get_min_input_buffers_count()
     return buffers_count;
 }
 
-uint16_t ScheduledCoreOp::get_min_output_buffers_count()
+uint16_t ScheduledCoreOp::get_min_output_buffers_count() const
 {
     auto output_streams = m_core_op->get_output_streams();
     uint16_t buffers_count = UINT16_MAX;
@@ -92,46 +90,18 @@ uint16_t ScheduledCoreOp::get_min_output_buffers_count()
     return buffers_count;
 }
 
-bool ScheduledCoreOp::use_dynamic_batch_flow()
+bool ScheduledCoreOp::use_dynamic_batch_flow() const
 {
     return m_use_dynamic_batch_flow;
 }
 
-bool ScheduledCoreOp::has_core_op_drained_everything()
-{
-    uint32_t written_frames = m_h2d_finished_transferred_frames.get_max_value();
-    for (const auto &name : get_outputs_names()) {
-        if ((m_finished_read_frames[name] + m_d2h_finished_transferred_frames[name]) < written_frames) {
-            return false;
-        }
-    }
-    return true;
-}
-
-void ScheduledCoreOp::decrease_current_core_op_counters()
-{
-    if (!m_h2d_finished_transferred_frames.all_values_bigger_or_equal(1)) {
-            return;
-    }
-    if (!m_finished_read_frames.all_values_bigger_or_equal(1)) {
-            return;
-    }
-
-    for (const auto &name : get_inputs_names()) {
-        m_h2d_finished_transferred_frames[name]--;
-    }
-    for (const auto &name : get_outputs_names()) {
-        m_finished_read_frames[name]--;
-    }
-}
-
 hailo_status ScheduledCoreOp::set_timeout(const std::chrono::milliseconds &timeout, const stream_name_t &stream_name)
 {
     CHECK(!m_frame_was_sent, HAILO_INVALID_OPERATION,
         "Setting scheduler timeout is allowed only before sending / receiving frames on the core-op.");
     m_timeout = timeout;
 
-    auto name = (stream_name.empty()) ? get_core_op_name() : stream_name;
+    auto name = (stream_name.empty()) ? m_core_op->name() : stream_name;
     LOGGER__INFO("Setting scheduler timeout of {} to {}ms", name, timeout.count());
 
     return HAILO_SUCCESS;
@@ -150,7 +120,7 @@ hailo_status ScheduledCoreOp::set_threshold(uint32_t threshold, const stream_nam
         threshold_per_stream_pair.second = threshold;
     }
 
-    auto name = (stream_name.empty()) ? get_core_op_name() : stream_name;
+    auto name = (stream_name.empty()) ? m_core_op->name() : stream_name;
     LOGGER__INFO("Setting scheduler threshold of {} to {} frames", name, threshold);
 
     return HAILO_SUCCESS;
@@ -176,11 +146,6 @@ void ScheduledCoreOp::set_last_device(const device_id_t &device_id)
     m_last_device_id = device_id;
 }
 
-std::string ScheduledCoreOp::get_core_op_name()
-{
-    return m_core_op_name;
-}
-
 std::shared_ptr<CoreOp> ScheduledCoreOp::get_core_op()
 {
     return m_core_op;
@@ -214,78 +179,58 @@ Expected<uint32_t> ScheduledCoreOp::get_threshold(const stream_name_t &stream_na
     return m_min_threshold_per_stream[stream_name].load();
 }
 
-uint16_t ScheduledCoreOp::get_max_batch_size()
+uint16_t ScheduledCoreOp::get_max_batch_size() const
 {
-    if (CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE == m_max_batch_size) {
-        // In nms networks we dont know the output buffers count and therfore we are using the input buffer count
-        return is_nms() ? get_min_input_buffers_count() : get_min_output_buffers_count();
-    }
     return m_max_batch_size;
 }
 
-Counter &ScheduledCoreOp::pending_to_send_frames()
-{
-    return m_pending_to_send_frames;
-}
-
-std::atomic_uint32_t &ScheduledCoreOp::pending_to_send_frames(const stream_name_t &stream_name)
+uint16_t ScheduledCoreOp::get_burst_size() const
 {
-    return m_pending_to_send_frames[stream_name];
+    // When the user don't explicitly pass batch size, in order to preserve performance from previous scheduler version,
+    // we don't want to stop streaming until we transferred at least get_min_output_buffers_count() frames (This was
+    // the behaviour in previous scheduler versions).
+    return m_core_op->is_default_batch_size() ? get_min_output_buffers_count() : get_max_batch_size();
 }
 
-uint32_t ScheduledCoreOp::pending_to_send_frames_min_value()
+SchedulerCounter &ScheduledCoreOp::pending_frames()
 {
-    return m_pending_to_send_frames.get_min_value();
+    return m_pending_frames;
 }
 
-Counter &ScheduledCoreOp::h2d_finished_transferred_frames()
+uint32_t ScheduledCoreOp::get_min_input_pending_frames() const
 {
-    return m_h2d_finished_transferred_frames;
-}
-
-std::atomic_uint32_t &ScheduledCoreOp::h2d_finished_transferred_frames(const stream_name_t &stream_name)
-{
-    return m_h2d_finished_transferred_frames[stream_name];
-}
-
-uint32_t ScheduledCoreOp::h2d_finished_transferred_frames_max_value()
-{
-    return m_h2d_finished_transferred_frames.get_max_value();
-}
-
-Counter &ScheduledCoreOp::requested_read_frames()
-{
-    return m_requested_read_frames;
-}
-
-std::atomic_uint32_t &ScheduledCoreOp::requested_read_frames(const stream_name_t &stream_name)
-{
-    return m_requested_read_frames[stream_name];
+    uint32_t min_count = std::numeric_limits<uint32_t>::max();
+    for (const auto &input_name : m_inputs_names) {
+        min_count = std::min(min_count, m_pending_frames[input_name]);
+    }
+    return min_count;
 }
 
-Counter &ScheduledCoreOp::d2h_finished_transferred_frames()
+bool ScheduledCoreOp::is_stream_enabled(const stream_name_t &stream_name) const
 {
-    return m_d2h_finished_transferred_frames;
+    return m_is_stream_enabled.at(stream_name);
 }
 
-std::atomic_uint32_t &ScheduledCoreOp::d2h_finished_transferred_frames(const stream_name_t &stream_name)
+void ScheduledCoreOp::enable_stream(const stream_name_t &stream_name)
 {
-    return m_d2h_finished_transferred_frames[stream_name];
+    m_is_stream_enabled.at(stream_name) = true;
 }
 
-Counter &ScheduledCoreOp::finished_read_frames()
+void ScheduledCoreOp::disable_stream(const stream_name_t &stream_name)
 {
-    return m_finished_read_frames;
+    m_is_stream_enabled.at(stream_name) = false;
 }
 
-std::atomic_uint32_t &ScheduledCoreOp::finished_read_frames(const stream_name_t &stream_name)
+bool ScheduledCoreOp::any_stream_disabled() const
 {
-    return m_finished_read_frames[stream_name];
+    auto is_disabled = [](const std::pair<const stream_name_t, std::atomic_bool> &is_enabled) { return !is_enabled.second; };
+    return std::any_of(m_is_stream_enabled.begin(), m_is_stream_enabled.end(), is_disabled);
 }
 
-uint32_t ScheduledCoreOp::finished_read_frames_min_value()
+bool ScheduledCoreOp::all_stream_disabled() const
 {
-    return m_finished_read_frames.get_min_value();
+    auto is_disabled = [](const std::pair<const stream_name_t, std::atomic_bool> &is_enabled) { return !is_enabled.second; };
+    return std::all_of(m_is_stream_enabled.begin(), m_is_stream_enabled.end(), is_disabled);
 }
 
 const std::vector<stream_name_t> &ScheduledCoreOp::get_inputs_names()
index e18eefa1a7ca52a34820cf6be7f8f9951ba1f606..d68a97a32be2f43b3aaf8898a3bd9ac114778520 100644 (file)
@@ -18,7 +18,7 @@
 
 #include "core_op/core_op.hpp"
 
-#include "scheduler_base.hpp"
+#include "vdevice/scheduler/scheduler_counter.hpp"
 
 #include <condition_variable>
 #include <queue>
@@ -29,83 +29,18 @@ namespace hailort
 
 #define DEFAULT_SCHEDULER_TIMEOUT (std::chrono::milliseconds(0))
 #define DEFAULT_SCHEDULER_MIN_THRESHOLD (0)
-#define INVALID_DEVICE_ID (std::to_string(UINT32_MAX))
+constexpr const char *INVALID_DEVICE_ID = "";
 
-using stream_name_t = std::string;
 using core_op_priority_t = uint8_t;
 
-#define SINGLE_CONTEXT_BATCH_SIZE (1)
+constexpr const uint16_t SINGLE_CONTEXT_BATCH_SIZE = 1;
 
-class Counter
-{
-public:
-    Counter() : m_map()
-        {};
-
-    void insert(const stream_name_t &name)
-    {
-        assert(!contains(m_map, name));
-        m_map[name] = 0;
-    }
-
-    std::atomic_uint32_t &operator [](const stream_name_t &name)
-    {
-        assert(contains(m_map, name));
-        return m_map[name];
-    }
-
-    void increase(const stream_name_t &name)
-    {
-        assert(contains(m_map, name));
-        m_map[name]++;
-    }
-
-    void decrease(const stream_name_t &name)
-    {
-        assert(contains(m_map, name));
-        if (0 != m_map[name]) {
-            m_map[name]--;
-        }
-    }
-
-    uint32_t get_min_value()
-    {
-        return get_min_value_of_unordered_map(m_map);
-    }
-
-    uint32_t get_max_value()
-    {
-        return get_max_value_of_unordered_map(m_map);
-    }
-
-    bool all_values_bigger_or_equal(uint32_t value)
-    {
-        for (const auto &pair : m_map) {
-            if (value > pair.second) {
-                return false;
-            }
-        }
-        return true;
-    }
-
-    bool empty()
-    {
-        for (const auto &pair : m_map) {
-            if (0 != pair.second) {
-                return false;
-            }
-        }
-        return true;
-    }
-
-private:
-    std::unordered_map<stream_name_t, std::atomic_uint32_t> m_map;
-};
 
 class ScheduledCoreOp
 {
 public:
-    static Expected<std::shared_ptr<ScheduledCoreOp>> create(std::shared_ptr<CoreOp> added_core_op, StreamInfoVector &stream_infos);
+    static Expected<std::shared_ptr<ScheduledCoreOp>> create(std::shared_ptr<CoreOp> added_core_op,
+        StreamInfoVector &stream_infos);
 
     virtual ~ScheduledCoreOp()  = default;
     ScheduledCoreOp(const ScheduledCoreOp &other) = delete;
@@ -113,17 +48,18 @@ public:
     ScheduledCoreOp &operator=(ScheduledCoreOp &&other) = delete;
     ScheduledCoreOp(ScheduledCoreOp &&other) noexcept = delete;
 
-    std::string get_core_op_name();
     std::shared_ptr<CoreOp> get_core_op();
     const std::vector<stream_name_t> &get_outputs_names();
     const std::vector<stream_name_t> &get_inputs_names();
 
-    uint16_t get_min_input_buffers_count();
-    uint16_t get_min_output_buffers_count();
+    uint32_t get_max_ongoing_frames_per_device() const;
 
-    uint16_t get_max_batch_size();
-    bool use_dynamic_batch_flow();
-    bool has_core_op_drained_everything();
+    uint16_t get_min_input_buffers_count() const;
+    uint16_t get_min_output_buffers_count() const;
+
+    uint16_t get_max_batch_size() const;
+    uint16_t get_burst_size() const;
+    bool use_dynamic_batch_flow() const;
 
     device_id_t get_last_device();
     void set_last_device(const device_id_t &device_id);
@@ -139,34 +75,18 @@ public:
     void set_last_run_timestamp(const std::chrono::time_point<std::chrono::steady_clock> &timestamp);
 
     void mark_frame_sent();
-    void decrease_current_core_op_counters();
-
-    Counter &pending_to_send_frames();
-    std::atomic_uint32_t &pending_to_send_frames(const stream_name_t &stream_name);
-    uint32_t pending_to_send_frames_min_value();
-
-    Counter &h2d_finished_transferred_frames();
-    std::atomic_uint32_t &h2d_finished_transferred_frames(const stream_name_t &stream_name);
-    uint32_t h2d_finished_transferred_frames_max_value();
-
-    Counter &requested_read_frames();
-    std::atomic_uint32_t &requested_read_frames(const stream_name_t &stream_name);
 
-    Counter &d2h_finished_transferred_frames();
-    std::atomic_uint32_t &d2h_finished_transferred_frames(const stream_name_t &stream_name);
+    SchedulerCounter &pending_frames();
+    uint32_t get_min_input_pending_frames() const;
 
-    Counter &finished_read_frames();
-    std::atomic_uint32_t &finished_read_frames(const stream_name_t &stream_name);
-    uint32_t finished_read_frames_min_value();
-
-
-    bool is_nms()
-    {
-        return m_is_nms;
-    }
+    bool is_stream_enabled(const stream_name_t &stream_name) const;
+    void enable_stream(const stream_name_t &stream_name);
+    void disable_stream(const stream_name_t &stream_name);
+    bool any_stream_disabled() const;
+    bool all_stream_disabled() const;
 
     ScheduledCoreOp(std::shared_ptr<CoreOp> core_op, std::chrono::milliseconds timeout,
-        uint16_t max_batch_size, bool use_dynamic_batch_flow, StreamInfoVector &stream_infos, std::string core_op_name);
+        uint16_t max_batch_size, bool use_dynamic_batch_flow, StreamInfoVector &stream_infos);
 
 private:
     std::shared_ptr<CoreOp> m_core_op;
@@ -176,29 +96,23 @@ private:
     uint16_t m_max_batch_size;
     bool m_use_dynamic_batch_flow;
 
-    Counter m_pending_to_send_frames; // 'signal_frame_pending_to_send()' has been called - frame is written in buffer (writes are a-sync)
-
-    Counter m_h2d_finished_transferred_frames; // Frame has been transferred to device (intrpt was raised)
-
-    Counter m_requested_read_frames; // 'wait_for_read()' has been called
-
-    Counter m_d2h_finished_transferred_frames; // Frame has been transferred from device (intrpt was raised)
-    Counter m_finished_read_frames; // 'signal_finish_read()' has been called - user finished getting the frame
+    // For each stream, amount of frames pending (for launch_transfer call)
+    SchedulerCounter m_pending_frames;
 
     std::unordered_map<stream_name_t, std::atomic_uint32_t> m_min_threshold_per_stream;
+    std::unordered_map<stream_name_t, std::atomic_bool> m_is_stream_enabled;
 
     core_op_priority_t m_priority;
 
     device_id_t m_last_device_id;
 
-    std::string m_core_op_name;
-
     std::vector<stream_name_t> m_inputs_names;
     std::vector<stream_name_t> m_outputs_names;
-
-    bool m_is_nms;
 };
 
+
+using ScheduledCoreOpPtr = std::shared_ptr<ScheduledCoreOp>;
+
 } /* namespace hailort */
 
 #endif /* _HAILO_SCHEDULED_CORE_OP_HPP_ */
index f1b64f40ae8bbb455580c861b1e2092e01a555fe..e522cc5546359ac8e2b2d8f25b5b83b36971e02e 100644 (file)
 
 #include "scheduled_stream.hpp"
 
+#include "stream_common/queued_stream_buffer_pool.hpp"
+
 #include "utils/profiler/tracer_macros.hpp"
 
+#include "stream_common/queued_stream_buffer_pool.hpp"
+
 namespace hailort
 {
 
 /** Input stream **/
 Expected<std::unique_ptr<ScheduledInputStream>> ScheduledInputStream::create(
-    std::map<device_id_t, std::reference_wrapper<VdmaInputStreamBase>> &&streams,
-    const scheduler_core_op_handle_t &core_op_handle,
-    EventPtr &&core_op_activated_event,
+    std::map<device_id_t, std::reference_wrapper<InputStreamBase>> &&streams,
     const LayerInfo &layer_info,
-    CoreOpsSchedulerWeakPtr core_ops_scheduler)
+    const scheduler_core_op_handle_t &core_op_handle,
+    CoreOpsSchedulerWeakPtr core_ops_scheduler,
+    EventPtr core_op_activated_event)
 {
+    auto max_queue_size_per_stream = streams.begin()->second.get().get_buffer_frames_size();
+    CHECK_EXPECTED(max_queue_size_per_stream);
+    const auto max_queue_size = max_queue_size_per_stream.value() * streams.size();
+
+    // In all cases, the buffer mode of the low level streams is always NOT_OWNING (the buffer is owned either by
+    // ScheduledInputStream or by the user)
+    for (auto &stream : streams) {
+        auto status = stream.second.get().set_buffer_mode(StreamBufferMode::NOT_OWNING);
+        CHECK_SUCCESS_AS_EXPECTED(status);
+    }
+
     auto status = HAILO_UNINITIALIZED;
     auto local_vdevice_stream = make_unique_nothrow<ScheduledInputStream>(std::move(streams),
         core_op_handle, std::move(core_op_activated_event), layer_info,
-        core_ops_scheduler, status);
+        core_ops_scheduler, max_queue_size, status);
     CHECK_NOT_NULL_AS_EXPECTED(local_vdevice_stream, HAILO_OUT_OF_HOST_MEMORY);
     CHECK_SUCCESS_AS_EXPECTED(status);
 
     return local_vdevice_stream;
 }
 
-hailo_status ScheduledInputStreamBase::abort()
+hailo_status ScheduledInputStream::launch_transfer(const device_id_t &device_id)
 {
-    return abort_impl(m_core_op_handle);
-}
+    auto core_ops_scheduler = m_core_ops_scheduler.lock();
+    CHECK(core_ops_scheduler, HAILO_INTERNAL_FAILURE, "core_op_scheduler was destructed");
 
-hailo_status ScheduledInputStreamBase::abort_impl(scheduler_core_op_handle_t core_op_handle)
-{
-    auto status = HAILO_SUCCESS; // Best effort
-    assert(1 == m_streams.size());
-    auto abort_status = m_streams.begin()->second.get().abort();
-    if (HAILO_SUCCESS != status) {
-        LOGGER__ERROR("Failed to abort input stream. (status: {} device: {})", status, m_streams.begin()->second.get().get_dev_id());
-        status = abort_status;
-    }
+    auto pending_buffer = m_transfer_requests.dequeue();
+    CHECK_EXPECTED_AS_STATUS(pending_buffer);
 
-    auto core_ops_scheduler = m_core_ops_scheduler.lock();
-    CHECK(core_ops_scheduler, HAILO_INTERNAL_FAILURE);
+    auto reorder_queue_callback = m_callback_reorder_queue.wrap_callback(pending_buffer->callback);
+    pending_buffer->callback = reorder_queue_callback;
 
-    auto disable_status = core_ops_scheduler->disable_stream(core_op_handle, name());
-    if (HAILO_SUCCESS != disable_status) {
-        LOGGER__ERROR("Failed to disable stream in the core-op scheduler. (status: {})", disable_status);
-        status = disable_status;
-    }
+    // Wrap callback with scheduler signal read finish.
+    pending_buffer->callback = [this, device_id, callback=reorder_queue_callback](hailo_status status) {
+        if (HAILO_SUCCESS == status) {
+            auto scheduler = m_core_ops_scheduler.lock();
+            assert(scheduler);
+            scheduler->signal_frame_transferred(m_core_op_handle, name(), device_id, HAILO_H2D_STREAM);
+        }
 
+        callback(status);
+    };
+
+    assert(contains(m_streams, device_id));
+    auto status = m_streams.at(device_id).get().write_async(pending_buffer.release());
+    if (HAILO_SUCCESS != status) {
+        LOGGER__ERROR("write_async on device {} failed with {}", device_id, status);
+        // The pending_buffer was already registered so we must call the callback to give the error back to the user.
+        reorder_queue_callback(status);
+    }
     return status;
 }
 
-hailo_status ScheduledInputStreamBase::clear_abort()
+hailo_stream_interface_t ScheduledInputStream::get_interface() const
 {
-    return clear_abort_impl(m_core_op_handle);
+    // All interface values of m_streams should be the same
+    return m_streams.begin()->second.get().get_interface();
 }
 
-hailo_status ScheduledInputStreamBase::flush()
+Expected<std::unique_ptr<StreamBufferPool>> ScheduledInputStream::allocate_buffer_pool()
 {
-    auto core_ops_scheduler = m_core_ops_scheduler.lock();
-    CHECK(core_ops_scheduler, HAILO_INTERNAL_FAILURE);
-
-    auto status = core_ops_scheduler->flush_pending_buffers(m_core_op_handle, name(), get_timeout());
-    if (HAILO_STREAM_ABORTED_BY_USER == status) {
-        LOGGER__INFO("Got HAILO_STREAM_ABORTED_BY_USER in flush of stream {}", name());
-        return status;
+    if (m_streams.size() == 1) {
+        // On single device, we use the stream allocate_buffer_pool for best optimization (The buffer can be circular
+        // dma buffer)
+        auto &async_stream = dynamic_cast<AsyncInputStreamBase&>(m_streams.begin()->second.get());
+        return async_stream.allocate_buffer_pool();
+    } else {
+        auto queued_pool = QueuedStreamBufferPool::create(m_transfer_requests.max_size(), get_frame_size(),
+            BufferStorageParams::create_dma());
+        CHECK_EXPECTED(queued_pool);
+
+        return std::unique_ptr<StreamBufferPool>(queued_pool.release());
     }
-    CHECK_SUCCESS(status);
-
-    return VDeviceInputStreamBase::flush();
 }
 
-hailo_status ScheduledInputStreamBase::clear_abort_impl(scheduler_core_op_handle_t core_op_handle)
+size_t ScheduledInputStream::get_max_ongoing_transfers() const
 {
-    auto status = HAILO_SUCCESS; // Best effort
-    assert(1 == m_streams.size());
-    auto clear_abort_status = m_streams.begin()->second.get().clear_abort();
-    if ((HAILO_SUCCESS != clear_abort_status) && (HAILO_STREAM_NOT_ACTIVATED != clear_abort_status)) {
-            LOGGER__ERROR("Failed to clear abort input stream. (status: {} device: {})", clear_abort_status, m_streams.begin()->second.get().get_dev_id());
-            status = clear_abort_status;
-    }
-
-    auto core_ops_scheduler = m_core_ops_scheduler.lock();
-    CHECK(core_ops_scheduler, HAILO_INTERNAL_FAILURE);
-
-    auto enable_status = core_ops_scheduler->enable_stream(core_op_handle, name());
-    if (HAILO_SUCCESS != enable_status) {
-        LOGGER__ERROR("Failed to enable stream in the core-op scheduler. (status: {})", enable_status);
-        status = enable_status;
-    }
-
-    return status;
+    return m_transfer_requests.max_size();
 }
 
-hailo_status ScheduledInputStream::write_impl(const MemoryView &buffer, const std::function<bool()> &should_cancel)
+hailo_status ScheduledInputStream::write_async_impl(TransferRequest &&transfer_request)
 {
     auto core_ops_scheduler = m_core_ops_scheduler.lock();
-    CHECK(core_ops_scheduler, HAILO_INTERNAL_FAILURE);
+    CHECK(core_ops_scheduler, HAILO_INTERNAL_FAILURE, "core_op_scheduler was destructed");
 
-    assert(1 == m_streams.size());
-    auto status = m_streams.begin()->second.get().write_buffer_only(buffer, should_cancel);
-    if (HAILO_SUCCESS != status) {
-        LOGGER__INFO("Write to stream has failed! status = {}", status);
+    auto status = m_transfer_requests.enqueue(std::move(transfer_request));
+    if (HAILO_QUEUE_IS_FULL == status) {
         return status;
     }
+    CHECK_SUCCESS(status);
 
-    auto write_finish_status = core_ops_scheduler->signal_frame_pending_to_send(m_core_op_handle, name());
-    if (HAILO_STREAM_ABORTED_BY_USER == write_finish_status) {
-        return write_finish_status;
+    status = core_ops_scheduler->signal_frame_pending(m_core_op_handle, name(), HAILO_H2D_STREAM);
+    if (HAILO_STREAM_ABORTED_BY_USER == status) {
+        return status;
     }
-    CHECK_SUCCESS(write_finish_status);
+    CHECK_SUCCESS(status);
 
     return HAILO_SUCCESS;
 }
 
-Expected<std::unique_ptr<ScheduledAsyncInputStream>> ScheduledAsyncInputStream::create(
-    std::map<device_id_t, std::reference_wrapper<VdmaInputStreamBase>> &&streams,
-    const scheduler_core_op_handle_t &core_op_handle,
-    EventPtr &&core_op_activated_event,
-    const LayerInfo &layer_info,
-    CoreOpsSchedulerWeakPtr core_ops_scheduler)
+hailo_status ScheduledInputStream::abort()
 {
-    auto max_queue_size_per_stream = streams.begin()->second.get().get_buffer_frames_size();
-    CHECK_EXPECTED(max_queue_size_per_stream);
-    const auto max_queue_size = max_queue_size_per_stream.value() * streams.size();
-
-    auto status = HAILO_UNINITIALIZED;
-    auto local_vdevice_stream = make_unique_nothrow<ScheduledAsyncInputStream>(std::move(streams),
-        core_op_handle, std::move(core_op_activated_event), layer_info,
-        core_ops_scheduler, max_queue_size, status);
-    CHECK_NOT_NULL_AS_EXPECTED(local_vdevice_stream, HAILO_OUT_OF_HOST_MEMORY);
-    CHECK_SUCCESS_AS_EXPECTED(status);
-
-    return local_vdevice_stream;
-}
+    auto core_ops_scheduler = m_core_ops_scheduler.lock();
+    CHECK(core_ops_scheduler, HAILO_INTERNAL_FAILURE, "core_op_scheduler was destructed");
 
-hailo_status ScheduledAsyncInputStream::send_pending_buffer(const device_id_t &device_id)
-{
-    // TODO HRT-10583 - allow option to remove reorder queue
-    auto pending_buffer = m_pending_buffers.dequeue();
-    CHECK_EXPECTED_AS_STATUS(pending_buffer);
+    core_ops_scheduler->disable_stream(m_core_op_handle, name());
 
-    pending_buffer->callback = m_callback_reorder_queue.wrap_callback(pending_buffer->callback);
-    assert(contains(m_streams, device_id));
-    auto status = m_streams.at(device_id).get().write_async(pending_buffer.release());
-    if (HAILO_SUCCESS != status) {
-        m_callback_reorder_queue.cancel_last_callback();
-    }
-    return status;
+    return AsyncInputStreamBase::abort();
 }
 
-hailo_status ScheduledAsyncInputStream::wait_for_async_ready(size_t transfer_size, std::chrono::milliseconds timeout)
-{
-    (void)transfer_size;
-    return m_pending_buffers.wait_for_room(timeout);
-}
-
-hailo_status ScheduledAsyncInputStream::write_async(TransferRequest &&transfer_request)
+hailo_status ScheduledInputStream::clear_abort()
 {
     auto core_ops_scheduler = m_core_ops_scheduler.lock();
     CHECK(core_ops_scheduler, HAILO_INTERNAL_FAILURE);
 
-    auto status = m_pending_buffers.enqueue(std::move(transfer_request));
-    CHECK_SUCCESS(status);
-
-    auto write_finish_status = core_ops_scheduler->signal_frame_pending_to_send(m_core_op_handle, name());
-    if (HAILO_STREAM_ABORTED_BY_USER == write_finish_status) {
-        return write_finish_status;
-    }
-    CHECK_SUCCESS(write_finish_status);
-
-    return HAILO_SUCCESS;
-}
-
-Expected<size_t> ScheduledAsyncInputStream::get_async_max_queue_size() const
-{
-    return m_pending_buffers.max_size();
-}
-
-
-hailo_status ScheduledAsyncInputStream::abort()
-{
-    m_pending_buffers.abort();
-    return ScheduledInputStreamBase::abort();
-}
+    core_ops_scheduler->enable_stream(m_core_op_handle, name());
 
-hailo_status ScheduledAsyncInputStream::clear_abort()
-{
-    m_pending_buffers.clear_abort();
-    return ScheduledInputStreamBase::clear_abort();
-}
-
-hailo_status ScheduledAsyncInputStream::write_impl(const MemoryView &, const std::function<bool()> &)
-{
-    LOGGER__ERROR("Sync write is not supported by async streams");
-    return HAILO_NOT_SUPPORTED;
+    return AsyncInputStreamBase::clear_abort();
 }
 
 /** Output stream **/
 Expected<std::unique_ptr<ScheduledOutputStream>> ScheduledOutputStream::create(
-    std::map<device_id_t, std::reference_wrapper<VdmaOutputStreamBase>> &&streams,
+    std::map<device_id_t, std::reference_wrapper<OutputStreamBase>> &&streams,
     const scheduler_core_op_handle_t &core_op_handle,
     const LayerInfo &layer_info,
-    EventPtr &&core_op_activated_event,
+    EventPtr core_op_activated_event,
     CoreOpsSchedulerWeakPtr core_ops_scheduler)
 {
+    auto max_queue_size_per_stream = streams.begin()->second.get().get_buffer_frames_size();
+    CHECK_EXPECTED(max_queue_size_per_stream);
+    const auto max_queue_size = max_queue_size_per_stream.value() * streams.size();
+
+    // In all cases, the buffer mode of the low level streams is always NOT_OWNING (the buffer is owned either by
+    // ScheduledOutputStream or by the user)
+    for (auto &stream : streams) {
+        auto status = stream.second.get().set_buffer_mode(StreamBufferMode::NOT_OWNING);
+        CHECK_SUCCESS_AS_EXPECTED(status);
+    }
+
+
     auto status = HAILO_UNINITIALIZED;
     auto stream = make_unique_nothrow<ScheduledOutputStream>(std::move(streams), core_op_handle,
-        layer_info, std::move(core_op_activated_event), core_ops_scheduler, status);
+        layer_info, std::move(core_op_activated_event), core_ops_scheduler, max_queue_size, status);
     CHECK_NOT_NULL_AS_EXPECTED(stream, HAILO_OUT_OF_HOST_MEMORY);
     CHECK_SUCCESS_AS_EXPECTED(status);
+
     return stream;
 }
 
-ScheduledOutputStream::ScheduledOutputStream(
-        std::map<device_id_t, std::reference_wrapper<VdmaOutputStreamBase>> &&streams,
-        const scheduler_core_op_handle_t &core_op_handle,
-        const LayerInfo &layer_info,
-        EventPtr &&core_op_activated_event,
-        CoreOpsSchedulerWeakPtr core_ops_scheduler,
-        hailo_status &status) : ScheduledOutputStreamBase(std::move(streams), core_op_handle, layer_info,
-            std::move(core_op_activated_event), core_ops_scheduler, status)
-    {
-        for (auto &stream_pair : m_streams) {
-            stream_pair.second.get().register_interrupt_callback(
-                [scheduler_weak=m_core_ops_scheduler, core_op_handle=m_core_op_handle, name=name(), device_id=stream_pair.first]() {
-                    auto scheduler = scheduler_weak.lock();
-                    assert(scheduler);
-                    scheduler->signal_frame_transferred_d2h(core_op_handle, name, device_id);
-                }
-            );
-        }
-    }
-
-hailo_status ScheduledOutputStream::set_next_device_to_read(const device_id_t &device_id)
+hailo_status ScheduledOutputStream::launch_transfer(const device_id_t &device_id)
 {
-    std::lock_guard<std::mutex> lock(m_device_read_order_mutex);
-    m_device_read_order.push(device_id);
-    return HAILO_SUCCESS;
-}
+    auto core_ops_scheduler = m_core_ops_scheduler.lock();
+    CHECK(core_ops_scheduler, HAILO_INTERNAL_FAILURE, "core_op_scheduler was destructed");
 
-hailo_status ScheduledOutputStreamBase::abort()
-{
-    return abort_impl(m_core_op_handle);
-}
+    auto pending_buffer = m_transfer_requests.dequeue();
+    CHECK_EXPECTED_AS_STATUS(pending_buffer);
 
-hailo_status ScheduledOutputStreamBase::abort_impl(scheduler_core_op_handle_t core_op_handle)
-{
-    auto status = HAILO_SUCCESS; // Best effort
-    for (const auto &pair : m_streams) {
-        auto &stream = pair.second;
-        auto abort_status = stream.get().abort();
-        if (HAILO_SUCCESS != status) {
-            LOGGER__ERROR("Failed to abort output stream. (status: {} device: {})", status, stream.get().get_dev_id());
-            status = abort_status;
+    // Wrap callback with reorder queue.
+    auto reorder_queue_callback = m_callback_reorder_queue.wrap_callback(pending_buffer->callback);
+
+    // Wrap callback with scheduler signal read finish.
+    pending_buffer->callback = [this, device_id, callback=reorder_queue_callback](hailo_status status) {
+        if (HAILO_SUCCESS == status) {
+            auto scheduler = m_core_ops_scheduler.lock();
+            assert(scheduler);
+            scheduler->signal_frame_transferred(m_core_op_handle, name(), device_id, HAILO_D2H_STREAM);
+
+            if (buffer_mode() == StreamBufferMode::NOT_OWNING) {
+                // On OWNING mode this trace is called after read_impl is called.
+                TRACE(ReadFrameTrace, m_core_op_handle, name());
+            }
         }
-    }
-    auto core_ops_scheduler = m_core_ops_scheduler.lock();
-    CHECK(core_ops_scheduler, HAILO_INTERNAL_FAILURE);
 
-    auto disable_status = core_ops_scheduler->disable_stream(core_op_handle, name());
-    if (HAILO_SUCCESS != disable_status) {
-        LOGGER__ERROR("Failed to disable stream in the core-op scheduler. (status: {})", disable_status);
-        status = disable_status;
-    }
+        callback(status);
+    };
 
+    assert(contains(m_streams, device_id));
+    auto status = m_streams.at(device_id).get().read_async(pending_buffer.release());
+    if (HAILO_SUCCESS != status) {
+        LOGGER__ERROR("read_async on device {} failed with {}", device_id, status);
+        // The pending_buffer was already registered so we must call the callback to give the error back to the user.
+        reorder_queue_callback(status);
+    }
     return status;
 }
 
-hailo_status ScheduledOutputStreamBase::clear_abort()
+hailo_stream_interface_t ScheduledOutputStream::get_interface() const
 {
-    return clear_abort_impl(m_core_op_handle);
+    // All interface values of m_streams should be the same
+    return m_streams.begin()->second.get().get_interface();
 }
 
-hailo_status ScheduledOutputStreamBase::clear_abort_impl(scheduler_core_op_handle_t core_op_handle)
+Expected<std::unique_ptr<StreamBufferPool>> ScheduledOutputStream::allocate_buffer_pool()
 {
-    auto status = HAILO_SUCCESS; // Best effort
-    for (const auto &pair : m_streams) {
-        auto &stream = pair.second;
-        auto clear_abort_status = stream.get().clear_abort();
-        if ((HAILO_SUCCESS != clear_abort_status) && (HAILO_STREAM_NOT_ACTIVATED != clear_abort_status)) {
-            LOGGER__ERROR("Failed to clear abort output stream. (status: {} device: {})", clear_abort_status, stream.get().get_dev_id());
-            status = clear_abort_status;
-        }
+    if (m_streams.size() == 1) {
+        // On single device, we use the stream allocate_buffer_pool for best optimization (The buffer can be circular
+        // dma buffer)
+        auto &async_stream = dynamic_cast<AsyncOutputStreamBase&>(m_streams.begin()->second.get());
+        return async_stream.allocate_buffer_pool();
+    } else {
+        auto queued_pool = QueuedStreamBufferPool::create(m_transfer_requests.max_size(), get_frame_size(),
+            BufferStorageParams::create_dma());
+        CHECK_EXPECTED(queued_pool);
+
+        return std::unique_ptr<StreamBufferPool>(queued_pool.release());
     }
-
-    auto core_ops_scheduler = m_core_ops_scheduler.lock();
-    CHECK(core_ops_scheduler, HAILO_INTERNAL_FAILURE);
-
-    auto enable_status = core_ops_scheduler->enable_stream(core_op_handle, name());
-    if (HAILO_SUCCESS != enable_status) {
-        LOGGER__ERROR("Failed to enable stream in the core-op scheduler. (status: {})", enable_status);
-        status = enable_status;
-    }
-
-    return status;
 }
 
-hailo_status ScheduledOutputStream::read(MemoryView buffer)
+size_t ScheduledOutputStream::get_max_ongoing_transfers() const
 {
-    auto core_ops_scheduler = m_core_ops_scheduler.lock();
-    CHECK(core_ops_scheduler, HAILO_INTERNAL_FAILURE);
+    return m_transfer_requests.max_size();
+}
 
-    auto status = core_ops_scheduler->signal_frame_pending_to_read(m_core_op_handle, name());
-    CHECK_SUCCESS(status);
 
-    auto device_id = wait_for_read();
-    if (HAILO_STREAM_ABORTED_BY_USER == device_id.status()) {
-        LOGGER__INFO("Read from stream was aborted.");
-        return device_id.status();
-    }
-    CHECK_EXPECTED_AS_STATUS(device_id);
+hailo_status ScheduledOutputStream::read_async_impl(TransferRequest &&transfer_request)
+{
+    auto core_ops_scheduler = m_core_ops_scheduler.lock();
+    CHECK(core_ops_scheduler, HAILO_INTERNAL_FAILURE, "core_op_scheduler was destructed");
 
-    assert(contains(m_streams, device_id.value()));
-    status = m_streams.at(device_id.value()).get().read(buffer);
-    if (HAILO_SUCCESS != status) {
-        LOGGER__INFO("Read from stream has failed! status = {}", status);
+    auto status = m_transfer_requests.enqueue(std::move(transfer_request));
+    if (HAILO_QUEUE_IS_FULL == status) {
         return status;
     }
+    CHECK_SUCCESS(status);
 
-    status = core_ops_scheduler->signal_read_finish(m_core_op_handle, name(), device_id.value());
+    status = core_ops_scheduler->signal_frame_pending(m_core_op_handle, name(), HAILO_D2H_STREAM);
     if (HAILO_STREAM_ABORTED_BY_USER == status) {
         return status;
     }
@@ -337,25 +261,24 @@ hailo_status ScheduledOutputStream::read(MemoryView buffer)
     return HAILO_SUCCESS;
 }
 
-Expected<device_id_t> ScheduledOutputStream::wait_for_read()
+hailo_status ScheduledOutputStream::abort()
 {
     auto core_ops_scheduler = m_core_ops_scheduler.lock();
-    CHECK_AS_EXPECTED(core_ops_scheduler, HAILO_INTERNAL_FAILURE);
+    CHECK(core_ops_scheduler, HAILO_INTERNAL_FAILURE, "core_op_scheduler was destructed");
 
-    auto status = core_ops_scheduler->wait_for_read(m_core_op_handle, name(), get_timeout(), [this]() {
-        std::lock_guard<std::mutex> lock(m_device_read_order_mutex);
-        return !m_device_read_order.empty();
-    });
-    if (HAILO_STREAM_ABORTED_BY_USER == status) {
-        LOGGER__INFO("Read from stream was aborted.");
-        return make_unexpected(status);
-    }
-    CHECK_SUCCESS_AS_EXPECTED(status);
+    core_ops_scheduler->disable_stream(m_core_op_handle, name());
+
+    return AsyncOutputStreamBase::abort();
+}
+
+hailo_status ScheduledOutputStream::clear_abort()
+{
+    auto core_ops_scheduler = m_core_ops_scheduler.lock();
+    CHECK(core_ops_scheduler, HAILO_INTERNAL_FAILURE);
+
+    core_ops_scheduler->enable_stream(m_core_op_handle, name());
 
-    std::lock_guard<std::mutex> lock(m_device_read_order_mutex);
-    auto device_id = m_device_read_order.front();
-    m_device_read_order.pop();
-    return device_id;
+    return AsyncOutputStreamBase::clear_abort();
 }
 
 } /* namespace hailort */
index 5aa530c6400acb2108104176c419d6d08b77c383..ce994d68f8d6d9cb1c38a1b348c269ec3c18c42a 100644 (file)
 #include "hailo/expected.hpp"
 
 #include "stream_common/stream_internal.hpp"
+#include "stream_common/async_stream_base.hpp"
 #include "vdevice/vdevice_internal.hpp"
-#include "vdevice/vdevice_stream.hpp"
 #include "vdevice/callback_reorder_queue.hpp"
+#include "vdevice/scheduler/scheduler.hpp"
+#include "stream_common/stream_buffer_pool.hpp"
+#include "stream_common/async_stream_base.hpp"
 #include "vdma/vdma_device.hpp"
 
 
 namespace hailort
 {
 
-
-class ScheduledInputStreamBase : public VDeviceInputStreamBase {
+class ScheduledInputStream : public AsyncInputStreamBase {
 public:
-    ScheduledInputStreamBase(
-        std::map<device_id_t, std::reference_wrapper<VdmaInputStreamBase>> &&streams,
-        const scheduler_core_op_handle_t &core_op_handle,
-        EventPtr &&core_op_activated_event,
-        const LayerInfo &layer_info,
-        CoreOpsSchedulerWeakPtr core_ops_scheduler,
-        hailo_status &status) :
-            VDeviceInputStreamBase(std::move(streams), std::move(core_op_activated_event), layer_info, status),
-            m_core_op_handle(core_op_handle),
-            m_core_ops_scheduler(core_ops_scheduler)
-    {}
-
-    virtual bool is_scheduled() override final { return true; };
-
-    virtual void notify_all() override
-    {
-        auto scheduler = m_core_ops_scheduler.lock();
-        if (nullptr == scheduler) {
-            LOGGER__CRITICAL("Failed to acquire scheduler");
-            return;
-        }
-        scheduler->notify_all();
-
-        for (const auto &pair : m_streams) {
-            auto &stream = pair.second;
-            stream.get().notify_all();
-        }
-    }
 
-    virtual hailo_status abort() override;
-    virtual hailo_status clear_abort() override;
-    virtual hailo_status flush() override;
-
-protected:
-    scheduler_core_op_handle_t m_core_op_handle;
-    CoreOpsSchedulerWeakPtr m_core_ops_scheduler;
-
-private:
-    hailo_status abort_impl(scheduler_core_op_handle_t core_op_handle);
-    hailo_status clear_abort_impl(scheduler_core_op_handle_t core_op_handle);
-};
-
-class ScheduledInputStream : public ScheduledInputStreamBase {
-public:
     static Expected<std::unique_ptr<ScheduledInputStream>> create(
-        std::map<device_id_t, std::reference_wrapper<VdmaInputStreamBase>> &&streams,
-        const scheduler_core_op_handle_t &core_op_handle,
-        EventPtr &&core_op_activated_event,
+        std::map<device_id_t, std::reference_wrapper<InputStreamBase>> &&streams,
         const LayerInfo &layer_info,
-        CoreOpsSchedulerWeakPtr core_ops_scheduler);
+        const scheduler_core_op_handle_t &core_op_handle,
+        CoreOpsSchedulerWeakPtr core_ops_scheduler,
+        EventPtr core_op_activated_event);
 
     ScheduledInputStream(
-        std::map<device_id_t, std::reference_wrapper<VdmaInputStreamBase>> &&streams,
+        std::map<device_id_t, std::reference_wrapper<InputStreamBase>> &&streams,
         const scheduler_core_op_handle_t &core_op_handle,
         EventPtr &&core_op_activated_event,
         const LayerInfo &layer_info,
         CoreOpsSchedulerWeakPtr core_ops_scheduler,
+        size_t max_queue_size,
         hailo_status &status) :
-            ScheduledInputStreamBase(std::move(streams), core_op_handle, std::move(core_op_activated_event), layer_info,
-                core_ops_scheduler, status)
-    {}
-
-protected:
-    virtual hailo_status write_impl(const MemoryView &buffer, const std::function<bool()> &should_cancel) override;
-};
-
-class TransferRequestsQueue final {
-public:
-    TransferRequestsQueue(size_t max_size) :
-        m_max_size(max_size)
+            AsyncInputStreamBase(layer_info, streams.begin()->second.get().get_interface(),
+                                   std::move(core_op_activated_event), status),
+            m_streams(std::move(streams)),
+            m_core_ops_scheduler(core_ops_scheduler),
+            m_core_op_handle(core_op_handle),
+            m_transfer_requests(max_queue_size),
+            m_callback_reorder_queue(max_queue_size) // TODO HRT-1058 - use reorder queue only when needed
     {}
 
-    ~TransferRequestsQueue()
-    {
-        while (!m_queue.empty()) {
-            auto &request = m_queue.front();
-            request.callback(HAILO_STREAM_ABORTED_BY_USER);
-            m_queue.pop();
-        }
-    }
-
-    TransferRequestsQueue(const TransferRequestsQueue &) = delete;
-    TransferRequestsQueue &operator=(const TransferRequestsQueue &) = delete;
+    virtual hailo_stream_interface_t get_interface() const override;
 
-    hailo_status wait_for_room(std::chrono::milliseconds timeout)
-    {
-        std::unique_lock<std::mutex> lock(m_mutex);
-        auto result = m_dequeue_cv.wait_for(lock, timeout,
-            [&] {
-                return m_is_aborted || (m_queue.size() < m_max_size);
-            });
-        if (!result) {
-            return HAILO_TIMEOUT;
-        }
-        if (m_is_aborted) {
-            return HAILO_STREAM_ABORTED_BY_USER;
-        }
-        return HAILO_SUCCESS;
-    }
+    virtual Expected<std::unique_ptr<StreamBufferPool>> allocate_buffer_pool() override;
+    virtual size_t get_max_ongoing_transfers() const override;
+    virtual hailo_status write_async_impl(TransferRequest &&transfer_request) override;
 
-    hailo_status enqueue(TransferRequest &&transfer_request)
-    {
-        std::unique_lock<std::mutex> lock(m_mutex);
-        if (m_is_aborted) {
-            return HAILO_STREAM_ABORTED_BY_USER;
-        }
-        CHECK(m_queue.size() < m_max_size, HAILO_QUEUE_IS_FULL, "No space left in stream queue");
-        m_queue.emplace(std::move(transfer_request));
-        return HAILO_SUCCESS;
-    }
-
-    Expected<TransferRequest> dequeue()
-    {
-        TransferRequest transfer_request{};
-        {
-            std::unique_lock<std::mutex> lock(m_mutex);
-            if (m_is_aborted) {
-                return make_unexpected(HAILO_STREAM_ABORTED_BY_USER);
-            }
-            CHECK_AS_EXPECTED(!m_queue.empty(), HAILO_INTERNAL_FAILURE, "Queue should not be empty");
-            transfer_request = m_queue.front();
-            m_queue.pop();
-        }
-        m_dequeue_cv.notify_one();
-        return transfer_request;
-    }
-
-    void abort()
-    {
-        {
-            std::unique_lock<std::mutex> lock(m_mutex);
-            m_is_aborted = true;
-        }
+    virtual hailo_status launch_transfer(const device_id_t &device_id) override;
+    virtual hailo_status abort() override;
+    virtual hailo_status clear_abort() override;
 
-        m_dequeue_cv.notify_all();
-    }
+    virtual bool is_scheduled() override final { return true; };
 
-    void clear_abort()
+    // Returns the amount of frames buffered on a single device.
+    virtual Expected<size_t> get_buffer_frames_size() const override
     {
-        std::unique_lock<std::mutex> lock(m_mutex);
-        m_is_aborted = false;
+        return m_streams.begin()->second.get().get_buffer_frames_size();
     }
 
-    size_t max_size() const { return m_max_size; }
-
 private:
-    // TODO: use SpscQueue (HRT-10554)
-    const size_t m_max_size;
-    std::mutex m_mutex;
-    bool m_is_aborted = false;
-    std::condition_variable m_dequeue_cv;
-    std::queue<TransferRequest> m_queue;
-};
-
-class ScheduledAsyncInputStream : public ScheduledInputStreamBase {
-public:
-
-    static Expected<std::unique_ptr<ScheduledAsyncInputStream>> create(
-        std::map<device_id_t, std::reference_wrapper<VdmaInputStreamBase>> &&streams,
-        const scheduler_core_op_handle_t &core_op_handle,
-        EventPtr &&core_op_activated_event,
-        const LayerInfo &layer_info,
-        CoreOpsSchedulerWeakPtr core_ops_scheduler);
-
-    ScheduledAsyncInputStream(
-        std::map<device_id_t, std::reference_wrapper<VdmaInputStreamBase>> &&streams,
-        const scheduler_core_op_handle_t &core_op_handle,
-        EventPtr &&core_op_activated_event,
-        const LayerInfo &layer_info,
-        CoreOpsSchedulerWeakPtr core_ops_scheduler,
-        size_t max_queue_size,
-        hailo_status &status) :
-            ScheduledInputStreamBase(std::move(streams), core_op_handle, std::move(core_op_activated_event), layer_info,
-                core_ops_scheduler, status),
-            m_pending_buffers(max_queue_size),
-            m_callback_reorder_queue(max_queue_size) // TODO HRT-1058 - use reorder queue only when needed
-    {}
-
-    virtual hailo_status send_pending_buffer(const device_id_t &device_id) override;
-    virtual hailo_status wait_for_async_ready(size_t transfer_size, std::chrono::milliseconds timeout) override;
-    virtual hailo_status write_async(TransferRequest &&transfer_request) override;
-    virtual Expected<size_t> get_async_max_queue_size() const override;
-    virtual hailo_status abort() override;
-    virtual hailo_status clear_abort() override;
-
-protected:
-    virtual hailo_status write_impl(const MemoryView &, const std::function<bool()> &) override;
+    std::map<device_id_t, std::reference_wrapper<InputStreamBase>> m_streams;
+    CoreOpsSchedulerWeakPtr m_core_ops_scheduler;
+    scheduler_core_op_handle_t m_core_op_handle;
 
     // All buffers written by the user using write_async are first stored in this queue.
     // When the scheduler decides to activate the network on a specific device, send_pending_buffer is called, and
     // the buffers are sent to the underlying stream.
-    TransferRequestsQueue m_pending_buffers;
+    SafeQueue<TransferRequest> m_transfer_requests;
+
     CallbackReorderQueue m_callback_reorder_queue;
 };
 
-class ScheduledOutputStreamBase : public VDeviceOutputStreamBase {
+class ScheduledOutputStream : public AsyncOutputStreamBase {
 public:
-    ScheduledOutputStreamBase(
-        std::map<device_id_t, std::reference_wrapper<VdmaOutputStreamBase>> &&streams,
+    static Expected<std::unique_ptr<ScheduledOutputStream>> create(
+        std::map<device_id_t, std::reference_wrapper<OutputStreamBase>> &&streams,
+        const scheduler_core_op_handle_t &core_op_handle,
+        const LayerInfo &layer_info,
+        EventPtr core_op_activated_event,
+        CoreOpsSchedulerWeakPtr core_ops_scheduler);
+
+    ScheduledOutputStream(
+        std::map<device_id_t, std::reference_wrapper<OutputStreamBase>> &&streams,
         const scheduler_core_op_handle_t &core_op_handle,
         const LayerInfo &layer_info,
         EventPtr &&core_op_activated_event,
         CoreOpsSchedulerWeakPtr core_ops_scheduler,
+        size_t max_queue_size,
         hailo_status &status) :
-            VDeviceOutputStreamBase(std::move(streams), layer_info, std::move(core_op_activated_event), status),
+            AsyncOutputStreamBase(layer_info, streams.begin()->second.get().get_interface(),
+                                  std::move(core_op_activated_event), status),
+            m_streams(std::move(streams)),
+            m_core_ops_scheduler(core_ops_scheduler),
             m_core_op_handle(core_op_handle),
-            m_core_ops_scheduler(core_ops_scheduler)
+            m_transfer_requests(max_queue_size),
+            m_callback_reorder_queue(max_queue_size) // TODO HRT-1058 - use reorder queue only when needed
     {}
 
-    virtual bool is_scheduled() override { return true; };
+    virtual hailo_status launch_transfer(const device_id_t &device_id) override;
 
     virtual hailo_status abort() override;
     virtual hailo_status clear_abort() override;
 
-protected:
-
-    scheduler_core_op_handle_t m_core_op_handle;
-    CoreOpsSchedulerWeakPtr m_core_ops_scheduler;
-
-private:
-    hailo_status abort_impl(scheduler_core_op_handle_t core_op_handle);
-    hailo_status clear_abort_impl(scheduler_core_op_handle_t core_op_handle);
-};
-
+    virtual hailo_stream_interface_t get_interface() const override;
 
-class ScheduledOutputStream : public ScheduledOutputStreamBase {
-public:
-    static Expected<std::unique_ptr<ScheduledOutputStream>> create(
-        std::map<device_id_t, std::reference_wrapper<VdmaOutputStreamBase>> &&streams,
-        const scheduler_core_op_handle_t &core_op_handle,
-        const LayerInfo &layer_info,
-        EventPtr &&core_op_activated_event,
-        CoreOpsSchedulerWeakPtr core_ops_scheduler);
+    virtual Expected<std::unique_ptr<StreamBufferPool>> allocate_buffer_pool() override;
+    virtual size_t get_max_ongoing_transfers() const override;
+    virtual hailo_status read_async_impl(TransferRequest &&transfer_request) override;
 
-    ScheduledOutputStream(
-        std::map<device_id_t, std::reference_wrapper<VdmaOutputStreamBase>> &&streams,
-        const scheduler_core_op_handle_t &core_op_handle,
-        const LayerInfo &layer_info,
-        EventPtr &&core_op_activated_event,
-        CoreOpsSchedulerWeakPtr core_ops_scheduler,
-        hailo_status &status);
+    virtual bool is_scheduled() override final { return true; };
 
-    virtual hailo_status set_next_device_to_read(const device_id_t &device_id) override;
+    // Returns the amount of frames buffered on a single device.
+    virtual Expected<size_t> get_buffer_frames_size() const override
+    {
+        return m_streams.begin()->second.get().get_buffer_frames_size();
+    }
 
-protected:
-    virtual hailo_status read(MemoryView buffer) override;
+    virtual hailo_status read_impl(MemoryView user_buffer) override
+    {
+        auto status = AsyncOutputStreamBase::read_impl(user_buffer);
+        if (HAILO_SUCCESS == status) {
+            TRACE(ReadFrameTrace, m_core_op_handle, name());
+        }
+        return status;
+    }
 
 private:
+    std::map<device_id_t, std::reference_wrapper<OutputStreamBase>> m_streams;
+    CoreOpsSchedulerWeakPtr m_core_ops_scheduler;
+    scheduler_core_op_handle_t m_core_op_handle;
 
-    // Returns device id to read from
-    Expected<device_id_t> wait_for_read();
+    // All buffers written by the user using write_async are first stored in this queue.
+    // When the scheduler decides to activate the network on a specific device, send_pending_buffer is called, and
+    // the buffers are sent to the underlying stream.
+    SafeQueue<TransferRequest> m_transfer_requests;
 
-    std::queue<device_id_t> m_device_read_order;
-    std::mutex m_device_read_order_mutex;
+    CallbackReorderQueue m_callback_reorder_queue;
 };
 
 } /* namespace hailort */
index c14f49a5b3a9b74a81fa252cbc3da1a6d90ed0a6..1656d1eed60f5eb96312449977b83e1c7b459818 100644 (file)
@@ -15,7 +15,6 @@
 #include "vdevice/scheduler/scheduler_oracle.hpp"
 #include "vdevice/vdevice_stream_multiplexer_wrapper.hpp"
 #include "hef/hef_internal.hpp"
-#include "utils/profiler/tracer_macros.hpp"
 
 #include <fstream>
 
 namespace hailort
 {
 
-#define SINGLE_CONTEXT_BATCH_SIZE (1)
 #define DEFAULT_BURST_SIZE (1)
 
-// TODO: use device handles instead device count
-CoreOpsScheduler::CoreOpsScheduler(hailo_scheduling_algorithm_t algorithm, std::vector<std::string> &devices_ids, 
+CoreOpsScheduler::CoreOpsScheduler(hailo_scheduling_algorithm_t algorithm, std::vector<std::string> &devices_ids,
     std::vector<std::string> &devices_arch) :
     SchedulerBase(algorithm, devices_ids, devices_arch),
-    m_should_core_op_stop(),
-    m_before_read_write_mutex(),
-    m_core_ops_cvs(),
-    m_scheduler_cv()
-{
-    TRACE(SchedulerStartTrace, get_device_count());
-    for (const auto &pair : m_devices) {
-        auto &device_info = pair.second;
-        TRACE(AddDeviceTrace, device_info->device_id, device_info->device_arch);
-    }
-
-    m_is_running = true;
-    m_scheduler_thread = std::thread(&CoreOpsScheduler::worker_thread_main, this);
-    m_execute_worker_thread = true;
-}
+    m_scheduler_thread(*this)
+{}
 
 CoreOpsScheduler::~CoreOpsScheduler()
 {
-    for (const auto &pair : m_devices) {
-        auto &device_info = pair.second;
-        if (INVALID_CORE_OP_HANDLE != device_info->current_core_op_handle) {
-            auto current_core_op = m_scheduled_core_ops[device_info->current_core_op_handle]->get_core_op();
-            auto current_core_op_bundle = std::dynamic_pointer_cast<VDeviceCoreOp>(current_core_op);
-            assert(nullptr != current_core_op_bundle);
-            auto vdma_core_op = current_core_op_bundle->get_core_op_by_device_id(device_info->device_id);
-            if (!vdma_core_op) {
-                LOGGER__ERROR("Error retrieving core-op in scheduler destructor");
-            } else {
-                if (HAILO_SUCCESS != VdmaConfigManager::deactivate_core_op(vdma_core_op.value())) {
-                    LOGGER__ERROR("Error deactivating core-op when destroying scheduler");
-                }
-            }
-        }
-    }
-
-    // signal scheduler thread to stop and join
-    {
-        std::unique_lock<std::mutex> lock(m_before_read_write_mutex);
-        m_is_running = false;
-        m_execute_worker_thread = true;
-    }
-    m_scheduler_cv.notify_one();
-    if (m_scheduler_thread.joinable()) {
-        m_scheduler_thread.join();
-    }
+    shutdown();
 }
 
 Expected<CoreOpsSchedulerPtr> CoreOpsScheduler::create_round_robin(std::vector<std::string> &devices_bdf_id, std::vector<std::string> &devices_arch)
@@ -85,129 +43,85 @@ Expected<CoreOpsSchedulerPtr> CoreOpsScheduler::create_round_robin(std::vector<s
     return ptr;
 }
 
-std::string CoreOpsScheduler::get_core_op_name(const scheduler_core_op_handle_t &core_op_handle)
-{
-    assert(m_scheduled_core_ops.size() > core_op_handle);
-    return m_scheduled_core_ops[core_op_handle]->get_core_op_name();
-}
-
-Expected<scheduler_core_op_handle_t > CoreOpsScheduler::add_core_op(std::shared_ptr<CoreOp> added_cng)
+hailo_status CoreOpsScheduler::add_core_op(scheduler_core_op_handle_t core_op_handle,
+     std::shared_ptr<CoreOp> added_cng)
 {
-    scheduler_core_op_handle_t core_op_handle = INVALID_CORE_OP_HANDLE;
-    {
-        std::unique_lock<std::mutex> lock(m_before_read_write_mutex);
-        core_op_handle = static_cast<uint32_t>(m_scheduled_core_ops.size());
-
-        auto stream_infos = added_cng->get_all_stream_infos();
-        CHECK_EXPECTED(stream_infos);
+    std::unique_lock<std::shared_timed_mutex> lock(m_scheduler_mutex);
 
-        auto scheduled_core_op = ScheduledCoreOp::create(added_cng, stream_infos.value());
-        CHECK_EXPECTED(scheduled_core_op);
+    auto stream_infos = added_cng->get_all_stream_infos();
+    CHECK_EXPECTED_AS_STATUS(stream_infos);
 
-        bool is_nms = scheduled_core_op->get()->is_nms();
-        TRACE(AddCoreOpTrace, "", added_cng->name(), DEFAULT_SCHEDULER_TIMEOUT.count(), DEFAULT_SCHEDULER_MIN_THRESHOLD,
-            core_op_handle, is_nms);
-
-        m_scheduled_core_ops.emplace_back(scheduled_core_op.release());
+    auto scheduled_core_op = ScheduledCoreOp::create(added_cng, stream_infos.value());
+    CHECK_EXPECTED_AS_STATUS(scheduled_core_op);
 
+    m_scheduled_core_ops.emplace(core_op_handle, scheduled_core_op.release());
 
+    for (const auto &pair : m_devices) {
+        auto &device_info = pair.second;
         for (const auto &stream_info : stream_infos.value()) {
-            m_should_core_op_stop[core_op_handle][stream_info.name] = false;
-        }
-
-        for (const auto &pair : m_devices) {
-               auto &device_info = pair.second;
-            for (const auto &stream_info : stream_infos.value()) {
-                if (HAILO_H2D_STREAM == stream_info.direction) {
-                    device_info->current_cycle_requested_transferred_frames_h2d[core_op_handle][stream_info.name] = 0;
-                } else {
-                    device_info->current_cycle_finished_transferred_frames_d2h[core_op_handle][stream_info.name] = 0;
-                    device_info->pending_to_read_frames[core_op_handle][stream_info.name] = 0;
-                }
-            }
+            device_info->ongoing_frames[core_op_handle].insert(stream_info.name);
         }
+    }
 
-        auto network_cvs = ScheduledCoreOpCV::create(added_cng);
-        CHECK_EXPECTED(network_cvs);
-        m_core_ops_cvs[core_op_handle] = network_cvs.release();
-        m_core_op_priority[HAILO_SCHEDULER_PRIORITY_NORMAL].emplace_back(core_op_handle);
+    const core_op_priority_t normal_priority = HAILO_SCHEDULER_PRIORITY_NORMAL;
+    m_core_op_priority[normal_priority].emplace_back(core_op_handle);
+    if (!contains(m_next_core_op, normal_priority)) {
+        m_next_core_op[normal_priority] = 0;
     }
 
-    return core_op_handle;
+    return HAILO_SUCCESS;
 }
 
-bool CoreOpsScheduler::is_core_op_active(const scheduler_core_op_handle_t &core_op_handle)
+void CoreOpsScheduler::shutdown()
 {
+    // Locking shared_lock since we don't touch the internal scheduler structures.
+    std::shared_lock<std::shared_timed_mutex> lock(m_scheduler_mutex);
+    m_scheduler_thread.stop();
+
+    // After the scheduler thread have stopped, we can safely deactivate all core ops
     for (const auto &pair : m_devices) {
         auto &device_info = pair.second;
-        if (core_op_handle == device_info->current_core_op_handle) {
-            return true;
+        if (INVALID_CORE_OP_HANDLE != device_info->current_core_op_handle) {
+            auto current_core_op = m_scheduled_core_ops.at(device_info->current_core_op_handle)->get_core_op();
+            auto current_core_op_bundle = std::dynamic_pointer_cast<VDeviceCoreOp>(current_core_op);
+            assert(nullptr != current_core_op_bundle);
+            auto vdma_core_op = current_core_op_bundle->get_core_op_by_device_id(device_info->device_id);
+            if (!vdma_core_op) {
+                LOGGER__ERROR("Error retrieving core-op in scheduler destructor");
+            } else {
+                if (HAILO_SUCCESS != VdmaConfigManager::deactivate_core_op(vdma_core_op.value())) {
+                    LOGGER__ERROR("Error deactivating core-op when destroying scheduler");
+                }
+                device_info->current_core_op_handle = INVALID_CORE_OP_HANDLE;
+            }
         }
     }
-
-    return false;
-}
-
-bool CoreOpsScheduler::is_multi_device()
-{
-    return m_devices.size() > 1;
 }
 
-hailo_status CoreOpsScheduler::signal_frame_pending_to_send(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name)
+hailo_status CoreOpsScheduler::switch_core_op(const scheduler_core_op_handle_t &core_op_handle, const device_id_t &device_id)
 {
-    {
-        std::unique_lock<std::mutex> lock(m_before_read_write_mutex);
-        assert(m_scheduled_core_ops.size() > core_op_handle);
-        auto scheduled_core_op = m_scheduled_core_ops[core_op_handle];
-
-        if (should_core_op_stop(core_op_handle)) {
-            return HAILO_STREAM_ABORTED_BY_USER;
-        }
-
-        TRACE(WriteFrameTrace, "", core_op_handle, stream_name);
-
-        m_scheduled_core_ops[core_op_handle]->mark_frame_sent();
-        scheduled_core_op->pending_to_send_frames().increase(stream_name);
-        m_execute_worker_thread = true;
-    }
-    m_scheduler_cv.notify_one();
-
-    return HAILO_SUCCESS;
-}
-
-hailo_status CoreOpsScheduler::switch_core_op(const scheduler_core_op_handle_t &core_op_handle, const device_id_t &device_id, bool /*keep_nn_config*/)
-{
-    auto scheduled_core_op = m_scheduled_core_ops[core_op_handle];
+    auto scheduled_core_op = m_scheduled_core_ops.at(core_op_handle);
     assert(contains(m_devices, device_id));
+    assert(is_device_idle(device_id));
     auto curr_device_info = m_devices[device_id];
     curr_device_info->is_switching_core_op = false;
 
-    // initialize current cycle maps
-    for (const auto &name : scheduled_core_op->get_inputs_names()) {
-        curr_device_info->current_cycle_requested_transferred_frames_h2d[core_op_handle][name] = 0;
-    }
-
-    for (const auto &name : scheduled_core_op->get_outputs_names()) {
-        curr_device_info->current_cycle_finished_transferred_frames_d2h[core_op_handle][name] = 0;
-    }
-
-    uint16_t batch_size = std::min(scheduled_core_op->get_max_batch_size(), get_min_avail_buffers_count(core_op_handle, device_id));
-    uint16_t hw_batch_size = SINGLE_CONTEXT_BATCH_SIZE;
+    const auto burst_size = scheduled_core_op->get_burst_size();
 
-    if (scheduled_core_op->use_dynamic_batch_flow()) {
-        batch_size = std::min(static_cast<uint16_t>(scheduled_core_op->pending_to_send_frames_min_value()), batch_size);
-        hw_batch_size = batch_size;
-    }
+    auto frames_count = std::min(get_frames_ready_to_transfer(core_op_handle, device_id), burst_size);
+    auto hw_batch_size = scheduled_core_op->use_dynamic_batch_flow() ? frames_count : SINGLE_CONTEXT_BATCH_SIZE;
 
-    if (batch_size == 0) {
+    if (frames_count == 0) {
+        // TODO HRT-11753: don't allow this flow
         return HAILO_SUCCESS;
     }
 
-    bool has_same_hw_batch_size_as_previous = scheduled_core_op->use_dynamic_batch_flow() ? (curr_device_info->current_batch_size == batch_size) : true;
-    curr_device_info->current_batch_size = batch_size;
+    curr_device_info->frames_left_before_stop_streaming = burst_size;
+
+    bool has_same_hw_batch_size_as_previous = curr_device_info->current_batch_size == hw_batch_size;
+    curr_device_info->current_batch_size = hw_batch_size;
 
     if ((core_op_handle != curr_device_info->current_core_op_handle) || (!has_same_hw_batch_size_as_previous)) {
-        assert(m_scheduled_core_ops.size() > core_op_handle);
         auto next_active_cng = scheduled_core_op->get_core_op();
         auto next_active_cng_wrapper = std::dynamic_pointer_cast<VDeviceCoreOp>(next_active_cng);
         assert(nullptr != next_active_cng_wrapper);
@@ -216,34 +130,24 @@ hailo_status CoreOpsScheduler::switch_core_op(const scheduler_core_op_handle_t &
 
         std::shared_ptr<VdmaConfigCoreOp> current_active_vdma_cng = nullptr;
         if (curr_device_info->current_core_op_handle != INVALID_CORE_OP_HANDLE) {
-            auto current_active_cng = m_scheduled_core_ops[curr_device_info->current_core_op_handle]->get_core_op();
+            auto current_active_cng = m_scheduled_core_ops.at(curr_device_info->current_core_op_handle)->get_core_op();
             auto current_active_cng_bundle = std::dynamic_pointer_cast<VDeviceCoreOp>(current_active_cng);
             assert(nullptr != current_active_cng_bundle);
             auto current_active_cng_expected = current_active_cng_bundle->get_core_op_by_device_id(curr_device_info->device_id);
             CHECK_EXPECTED_AS_STATUS(current_active_cng_expected);
             current_active_vdma_cng = current_active_cng_expected.release();
-
-            // Flushing h2d channel in order to make sure we got all interrupts before switching the network.
-            for (auto &stream : current_active_vdma_cng->get_input_streams()) {
-                auto status = stream.get().flush();
-                if (HAILO_STREAM_ABORTED_BY_USER == status) {
-                    continue;
-                }
-                CHECK_SUCCESS(status);
-            }
         }
 
-        TRACE(SwitchCoreOpTrace, device_id, core_op_handle);
-        static const auto RESUME_PENDING_STREAM_TRANSFERS = true;
+        const bool is_batch_switch = (core_op_handle == curr_device_info->current_core_op_handle);
         auto status = VdmaConfigManager::switch_core_op(current_active_vdma_cng, next_active_cng_expected.value(), hw_batch_size,
-            RESUME_PENDING_STREAM_TRANSFERS);
+            is_batch_switch);
         CHECK_SUCCESS(status, "Failed switching core-op");
     }
 
     scheduled_core_op->set_last_run_timestamp(std::chrono::steady_clock::now()); // Mark timestamp on activation
     curr_device_info->current_core_op_handle = core_op_handle;
 
-    auto status = send_all_pending_buffers(core_op_handle, device_id, batch_size);
+    auto status = send_all_pending_buffers(core_op_handle, device_id, frames_count);
     if (HAILO_STREAM_ABORTED_BY_USER == status) {
         LOGGER__INFO("send_all_pending_buffers has failed with status=HAILO_STREAM_ABORTED_BY_USER");
         return status;
@@ -253,40 +157,6 @@ hailo_status CoreOpsScheduler::switch_core_op(const scheduler_core_op_handle_t &
     return HAILO_SUCCESS;
 }
 
-void CoreOpsScheduler::signal_read_finish_impl(const scheduler_core_op_handle_t &core_op_handle,
-    const std::string &stream_name, const device_id_t &device_id)
-{
-    TRACE(ReadFrameTrace, "", core_op_handle, stream_name);
-
-    auto scheduled_core_op = m_scheduled_core_ops[core_op_handle];
-    scheduled_core_op->requested_read_frames().decrease(stream_name);
-    scheduled_core_op->finished_read_frames().increase(stream_name);
-    scheduled_core_op->d2h_finished_transferred_frames().decrease(stream_name);
-
-    if (m_devices[device_id]->pending_to_read_frames[core_op_handle][stream_name] > 0) {
-        m_devices[device_id]->pending_to_read_frames[core_op_handle][stream_name]--;
-    }
-
-    decrease_core_op_counters(core_op_handle);
-
-    auto has_drained_everything = has_core_op_drained_everything(core_op_handle, device_id);
-    if (scheduled_core_op->is_nms() && has_drained_everything) {
-        // In NMS networks there is possibility that next wasn't choosen yet
-        choose_next_core_op(device_id, true);
-
-        // If we didn't choose with threshold or timeout lets choose without threshold
-        if (!m_devices[device_id]->is_switching_core_op) {
-            choose_next_core_op(device_id, false);
-        }
-
-        if (has_drained_everything) {
-            TRACE(CoreOpIdleTrace, device_id, core_op_handle);
-        }
-    }
-
-    m_execute_worker_thread = true;
-}
-
 hailo_status CoreOpsScheduler::send_all_pending_buffers(const scheduler_core_op_handle_t &core_op_handle, const device_id_t &device_id, uint32_t burst_size)
 {
     auto current_device_info = m_devices[device_id];
@@ -294,78 +164,53 @@ hailo_status CoreOpsScheduler::send_all_pending_buffers(const scheduler_core_op_
         return HAILO_SUCCESS;
     }
 
-    auto scheduled_core_op = m_scheduled_core_ops[core_op_handle];
+    auto scheduled_core_op = m_scheduled_core_ops.at(core_op_handle);
 
     for (size_t i = 0; i < burst_size; i++) {
-        auto finished_send = false;
-        for (const auto &name : scheduled_core_op->get_inputs_names()) {
-            if (scheduled_core_op->pending_to_send_frames(name) == 0) {
-                finished_send = true;
-                break;
-            }
-        }
-        if (finished_send) {
-            break;
+        if (current_device_info->frames_left_before_stop_streaming > 0) {
+            current_device_info->frames_left_before_stop_streaming--;
         }
 
-        for (const auto &name : scheduled_core_op->get_outputs_names()) {
-            auto output_stream = scheduled_core_op->get_core_op()->get_output_stream_by_name(name);
-            CHECK_EXPECTED_AS_STATUS(output_stream);
+        for (auto &input_stream : scheduled_core_op->get_core_op()->get_input_streams()) {
+            const auto &stream_name = input_stream.get().name();
+            scheduled_core_op->pending_frames().decrease(stream_name);
+            current_device_info->ongoing_frames[core_op_handle].increase(stream_name);
 
-            auto &output_stream_base = static_cast<OutputStreamBase&>(output_stream->get());
-            auto status = output_stream_base.set_next_device_to_read(device_id);
+            // After launching the transfer, signal_frame_transferred may be called (and ongoing frames will be
+            // decreased).
+            auto &input_stream_base = static_cast<InputStreamBase&>(input_stream.get());
+            auto status = input_stream_base.launch_transfer(device_id);
+            if (HAILO_STREAM_ABORTED_BY_USER == status) {
+                LOGGER__INFO("launch_transfer has failed with status=HAILO_STREAM_ABORTED_BY_USER");
+                return status;
+            }
             CHECK_SUCCESS(status);
         }
 
-        for (const auto &name : scheduled_core_op->get_inputs_names()) {
-            auto status = send_pending_buffer(core_op_handle, name, device_id);
+        for (auto &output_stream : scheduled_core_op->get_core_op()->get_output_streams()) {
+            const auto &stream_name = output_stream.get().name();
+            scheduled_core_op->pending_frames().decrease(stream_name);
+            current_device_info->ongoing_frames[core_op_handle].increase(stream_name);
+
+            // After launching the transfer, signal_frame_transferred may be called (and ongoing frames will be
+            // decreased).
+            auto &output_stream_base = static_cast<OutputStreamBase&>(output_stream.get());
+            auto status = output_stream_base.launch_transfer(device_id);
             if (HAILO_STREAM_ABORTED_BY_USER == status) {
-                LOGGER__INFO("send_pending_buffer has failed with status=HAILO_STREAM_ABORTED_BY_USER");
+                LOGGER__INFO("launch_transfer has failed with status=HAILO_STREAM_ABORTED_BY_USER");
                 return status;
             }
             CHECK_SUCCESS(status);
         }
-        scheduled_core_op->set_last_device(device_id);
-    }
-
-    return HAILO_SUCCESS;
-}
-
-hailo_status CoreOpsScheduler::send_pending_buffer(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name,
-    const device_id_t &device_id)
-{
-    assert(m_scheduled_core_ops.size() > core_op_handle);
-    auto scheduled_core_op = m_scheduled_core_ops[core_op_handle];
 
-    auto current_cng = scheduled_core_op->get_core_op();
-    auto input_stream = current_cng->get_input_stream_by_name(stream_name);
-    CHECK_EXPECTED_AS_STATUS(input_stream);
-
-    auto &input_stream_base = static_cast<InputStreamBase&>(input_stream->get());
-    auto status = input_stream_base.send_pending_buffer(device_id);
-    if (HAILO_STREAM_ABORTED_BY_USER == status) {
-        LOGGER__INFO("send_pending_buffer has failed with status=HAILO_STREAM_ABORTED_BY_USER");
-        return status;
-    }
-    CHECK_SUCCESS(status);
-
-    TRACE(InputVdmaDequeueTrace, device_id, core_op_handle, stream_name);
-
-    m_devices[device_id]->current_cycle_requested_transferred_frames_h2d[core_op_handle][stream_name]++;
-    scheduled_core_op->pending_to_send_frames().decrease(stream_name);
-    // Notifying for flush
-    m_core_ops_cvs[core_op_handle]->notify_one(stream_name);
-
-    scheduled_core_op->h2d_finished_transferred_frames().increase(stream_name);
-
-    if (should_core_op_stop(core_op_handle)) {
-        return HAILO_STREAM_ABORTED_BY_USER;
+        scheduled_core_op->set_last_device(device_id);
     }
 
     return HAILO_SUCCESS;
 }
 
-CoreOpsScheduler::ReadyInfo CoreOpsScheduler::is_core_op_ready(const scheduler_core_op_handle_t &core_op_handle, bool check_threshold)
+CoreOpsScheduler::ReadyInfo CoreOpsScheduler::is_core_op_ready(const scheduler_core_op_handle_t &core_op_handle,
+    bool check_threshold, const device_id_t &device_id)
 {
     ReadyInfo result;
     result.is_ready = false;
@@ -375,12 +220,7 @@ CoreOpsScheduler::ReadyInfo CoreOpsScheduler::is_core_op_ready(const scheduler_c
         return result;
     }
 
-    auto scheduled_core_op = m_scheduled_core_ops[core_op_handle];
-    // Check if there arent any write requests
-    const bool has_pending_writes = scheduled_core_op->pending_to_send_frames_min_value() > 0;
-
-    // Check for read request on all the output streams
-    const bool has_avail_pending_to_read_buffers = get_min_avail_output_buffers(core_op_handle) > 0;
+    auto scheduled_core_op = m_scheduled_core_ops.at(core_op_handle);
 
     std::vector<bool> over_threshold;
     over_threshold.reserve(scheduled_core_op->get_inputs_names().size());
@@ -403,7 +243,7 @@ CoreOpsScheduler::ReadyInfo CoreOpsScheduler::is_core_op_ready(const scheduler_c
             auto timeout = timeout_exp.release();
 
             // Check if there arent enough write requests to reach threshold and timeout didnt passed
-            uint32_t write_requests = scheduled_core_op->pending_to_send_frames(name);
+            const auto write_requests = scheduled_core_op->pending_frames()[name];
             auto stream_over_threshold = write_requests >= threshold;
             auto stream_over_timeout = timeout <= (std::chrono::steady_clock::now() - scheduled_core_op->get_last_run_timestamp());
             over_threshold.push_back(stream_over_threshold);
@@ -419,268 +259,138 @@ CoreOpsScheduler::ReadyInfo CoreOpsScheduler::is_core_op_ready(const scheduler_c
         result.over_timeout = std::all_of(over_timeout.begin(), over_timeout.end(), [](auto over) { return over; });
     }
 
-    result.is_ready = has_pending_writes && has_avail_pending_to_read_buffers;
+    result.is_ready = (get_frames_ready_to_transfer(core_op_handle, device_id) > 0);
 
     return result;
 }
 
-hailo_status CoreOpsScheduler::wait_for_read(const scheduler_core_op_handle_t &core_op_handle,
-    const std::string &stream_name, const std::chrono::milliseconds &timeout, const std::function<bool()> &predicate)
+hailo_status CoreOpsScheduler::signal_frame_pending(const scheduler_core_op_handle_t &core_op_handle,
+    const std::string &stream_name, hailo_stream_direction_t direction)
 {
-    std::unique_lock<std::mutex> lock(m_before_read_write_mutex);
-
-    hailo_status status = HAILO_SUCCESS;
-    auto wait_res = m_core_ops_cvs[core_op_handle]->wait_for(stream_name, lock, timeout,
-        [this, core_op_handle, predicate, &stream_name, &status] {
-        if (m_should_core_op_stop[core_op_handle][stream_name]) {
-            status = HAILO_STREAM_ABORTED_BY_USER;
-            return true; // return true so that the wait will finish
-        }
+    std::shared_lock<std::shared_timed_mutex> lock(m_scheduler_mutex);
+    auto scheduled_core_op = m_scheduled_core_ops.at(core_op_handle);
 
-        return predicate();
-    });
-    CHECK(wait_res, HAILO_TIMEOUT, "{} (D2H) failed with status={}, timeout={}ms", stream_name, HAILO_TIMEOUT, timeout.count());
-    if (HAILO_STREAM_ABORTED_BY_USER == status) {
-        return status;
+    if (should_core_op_stop(core_op_handle)) {
+        return HAILO_STREAM_ABORTED_BY_USER;
     }
-    CHECK_SUCCESS(status);
-
-    return HAILO_SUCCESS;
-}
 
-hailo_status CoreOpsScheduler::signal_frame_pending_to_read(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name)
-{
-    {
-        std::unique_lock<std::mutex> lock(m_before_read_write_mutex);
-
-        auto scheduled_core_op = m_scheduled_core_ops[core_op_handle];
-        scheduled_core_op->requested_read_frames().increase(stream_name);
-        m_execute_worker_thread = true;
+    if (HAILO_H2D_STREAM == direction) {
+        TRACE(WriteFrameTrace, core_op_handle, stream_name);
+        scheduled_core_op->mark_frame_sent();
     }
-    m_scheduler_cv.notify_one();
+
+    scheduled_core_op->pending_frames().increase(stream_name);
+    m_scheduler_thread.signal();
 
     return HAILO_SUCCESS;
 }
 
-void CoreOpsScheduler::signal_frame_transferred_d2h(const scheduler_core_op_handle_t &core_op_handle,
-    const std::string &stream_name, const device_id_t &device_id)
+void CoreOpsScheduler::signal_frame_transferred(const scheduler_core_op_handle_t &core_op_handle,
+    const std::string &stream_name, const device_id_t &device_id, hailo_stream_direction_t stream_direction)
 {
-    {
-        std::unique_lock<std::mutex> lock(m_before_read_write_mutex);
-
-        auto scheduled_core_op = m_scheduled_core_ops[core_op_handle];
-        if (!scheduled_core_op->is_nms()) {
-            TRACE(OutputVdmaEnqueueTrace, "", core_op_handle, stream_name, 1);
-            // TODO: Remove d2h_finished_transferred_frames and use current_cycle_finished_transferred_frames_d2h instead
-            scheduled_core_op->d2h_finished_transferred_frames().increase(stream_name);
-            m_devices[device_id]->pending_to_read_frames[core_op_handle][stream_name] += 1;
-            m_devices[device_id]->current_cycle_finished_transferred_frames_d2h[core_op_handle][stream_name] += 1;
-        }
-
-        auto has_drained_everything = has_core_op_drained_everything(core_op_handle, device_id);
-
-        if (has_drained_everything) {
-            TRACE(CoreOpIdleTrace, device_id, core_op_handle);
-        }
-
-        // If ng finished and we didn't choose next lets choose without checking threshold
-        if (!m_devices[device_id]->is_switching_core_op && has_drained_everything) {
-            auto was_chosen  = choose_next_core_op(device_id, true);
-            if (!was_chosen) {
-                choose_next_core_op(device_id, false);
-            }
-        }
-
-        if (m_devices[device_id]->is_switching_core_op) {
-            m_execute_worker_thread = true;
-        }
-    }
+    std::shared_lock<std::shared_timed_mutex> lock(m_scheduler_mutex);
 
-    // Notify stream that new frame was accepted (wait_for read operation)
-    m_core_ops_cvs[core_op_handle]->notify_one(stream_name);
-    m_scheduler_cv.notify_one();
-}
+    auto scheduled_core_op = m_scheduled_core_ops.at(core_op_handle);
 
-hailo_status CoreOpsScheduler::signal_read_finish(const scheduler_core_op_handle_t &core_op_handle,
-    const std::string &stream_name, const device_id_t &device_id)
-{
-    {
-        std::unique_lock<std::mutex> lock(m_before_read_write_mutex);
-        signal_read_finish_impl(core_op_handle, stream_name, device_id);
+    m_devices[device_id]->ongoing_frames[core_op_handle].decrease(stream_name);
+    if (HAILO_D2H_STREAM == stream_direction) {
+        TRACE(OutputVdmaEnqueueTrace, device_id, core_op_handle, stream_name);
     }
-    m_scheduler_cv.notify_one();
-    return HAILO_SUCCESS;
-}
 
-void CoreOpsScheduler::decrease_core_op_counters(const scheduler_core_op_handle_t &core_op_handle)
-{
-    return m_scheduled_core_ops[core_op_handle]->decrease_current_core_op_counters();
+    m_scheduler_thread.signal();
 }
 
-bool CoreOpsScheduler::has_core_op_drained_everything(const scheduler_core_op_handle_t &core_op_handle, const device_id_t &device_id)
+bool CoreOpsScheduler::is_device_idle(const device_id_t &device_id)
 {
-    if (core_op_all_streams_aborted(core_op_handle)) {
-        // We treat core-op as drained only if all streams are aborted - to make sure there aren't any ongoing transfers
-        return true;
-    }
-
+    const auto &device_info = m_devices[device_id];
+    auto core_op_handle = device_info->current_core_op_handle;
     if (INVALID_CORE_OP_HANDLE == core_op_handle) {
         // If no core-op is running, consider it as drained
         return true;
     }
 
-    if ((!m_scheduled_core_ops[core_op_handle]->is_nms()) && (is_multi_device() || m_scheduled_core_ops[core_op_handle]->use_dynamic_batch_flow())) {
-        auto current_device_info = m_devices[device_id];
-        auto max_transferred_h2d = get_max_value_of_unordered_map(current_device_info->current_cycle_requested_transferred_frames_h2d[core_op_handle]);
-        auto min_transferred_d2h = get_min_value_of_unordered_map(current_device_info->current_cycle_finished_transferred_frames_d2h[core_op_handle]);
-
-        return (max_transferred_h2d == min_transferred_d2h);
-    }
-
-    return m_scheduled_core_ops[core_op_handle]->has_core_op_drained_everything();
-}
-
-hailo_status CoreOpsScheduler::flush_pending_buffers(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name,
-    const std::chrono::milliseconds &timeout)
-{
-    std::unique_lock<std::mutex> lock(m_before_read_write_mutex);
-
-    hailo_status status = HAILO_SUCCESS;
-    auto wait_res = m_core_ops_cvs[core_op_handle]->wait_for(stream_name, lock, timeout,
-        [this, core_op_handle, &stream_name, &status] {
-        if (should_core_op_stop(core_op_handle)) {
-            status = HAILO_STREAM_ABORTED_BY_USER;
-            return true; // return true so that the wait will finish
-        }
-
-        assert(m_scheduled_core_ops.size() > core_op_handle);
-        auto scheduled_core_op = m_scheduled_core_ops[core_op_handle];
-        auto pending = scheduled_core_op->pending_to_send_frames(stream_name).load();
-        return (pending == 0);
-    });
-    CHECK(wait_res, HAILO_TIMEOUT, "{} (H2D) failed with status={}, timeout={}ms", stream_name, HAILO_TIMEOUT, timeout.count());
-    if (HAILO_STREAM_ABORTED_BY_USER == status) {
-        LOGGER__INFO("flush pending buffers was aborted in stream ={}", stream_name);
-        return status;
+    if (m_scheduled_core_ops.at(core_op_handle)->all_stream_disabled()) {
+        // We treat core-op as drained only if all streams are aborted - to make sure there aren't any ongoing transfers
+        return true;
     }
-    CHECK_SUCCESS(status);
 
-    return HAILO_SUCCESS;
+    return m_devices[device_id]->is_idle();
 }
 
-hailo_status CoreOpsScheduler::enable_stream(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name)
+void CoreOpsScheduler::enable_stream(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name)
 {
-    {
-        std::unique_lock<std::mutex> lock(m_before_read_write_mutex);
-
-        if (!m_should_core_op_stop[core_op_handle][stream_name]) {
-            return HAILO_SUCCESS;
-        }
-
-        m_should_core_op_stop[core_op_handle][stream_name] = false;
-    }
-    m_core_ops_cvs[core_op_handle]->notify_all();
-
-    return HAILO_SUCCESS;
+    std::shared_lock<std::shared_timed_mutex> lock(m_scheduler_mutex);
+    m_scheduled_core_ops.at(core_op_handle)->enable_stream(stream_name);
 }
 
-hailo_status CoreOpsScheduler::disable_stream(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name)
+void CoreOpsScheduler::disable_stream(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name)
 {
-    {
-        std::unique_lock<std::mutex> lock(m_before_read_write_mutex);
-
-        if (m_should_core_op_stop[core_op_handle][stream_name]) {
-            return HAILO_SUCCESS;
-        }
-
-        m_should_core_op_stop[core_op_handle][stream_name] = true;
-    }
-    m_core_ops_cvs[core_op_handle]->notify_all();
-
-    return HAILO_SUCCESS;
+    std::shared_lock<std::shared_timed_mutex> lock(m_scheduler_mutex);
+    m_scheduled_core_ops.at(core_op_handle)->disable_stream(stream_name);
 }
 
 hailo_status CoreOpsScheduler::set_timeout(const scheduler_core_op_handle_t &core_op_handle, const std::chrono::milliseconds &timeout, const std::string &/*network_name*/)
 {
+    std::shared_lock<std::shared_timed_mutex> lock(m_scheduler_mutex);
     // TODO: call in loop for set_timeout with the relevant stream-names (of the given network)
-    return m_scheduled_core_ops[core_op_handle]->set_timeout(timeout);
+    auto status = m_scheduled_core_ops.at(core_op_handle)->set_timeout(timeout);
+    if (HAILO_SUCCESS == status) {
+        TRACE(SetCoreOpTimeoutTrace, core_op_handle, timeout);
+    }
+    return status;
 }
 
 hailo_status CoreOpsScheduler::set_threshold(const scheduler_core_op_handle_t &core_op_handle, uint32_t threshold, const std::string &/*network_name*/)
 {
-    // TODO: call in loop for set_timeout with the relevant stream-names (of the given network)
-    return m_scheduled_core_ops[core_op_handle]->set_threshold(threshold);
+    std::shared_lock<std::shared_timed_mutex> lock(m_scheduler_mutex);
+    auto status = m_scheduled_core_ops.at(core_op_handle)->set_threshold(threshold);
+    if (HAILO_SUCCESS == status) {
+        TRACE(SetCoreOpThresholdTrace, core_op_handle, threshold);
+    }
+    return status;
 }
 
 hailo_status CoreOpsScheduler::set_priority(const scheduler_core_op_handle_t &core_op_handle, core_op_priority_t priority, const std::string &/*network_name*/)
 {
     CHECK(priority <= HAILO_SCHEDULER_PRIORITY_MAX, HAILO_INVALID_ARGUMENT);
-    std::unique_lock<std::mutex> lock(m_before_read_write_mutex);
-    auto old_priority = m_scheduled_core_ops[core_op_handle]->get_priority();
+
+    // Remove core of from previous priority map
+    std::unique_lock<std::shared_timed_mutex> lock(m_scheduler_mutex);
+    auto old_priority = m_scheduled_core_ops.at(core_op_handle)->get_priority();
     auto &priority_vector = m_core_op_priority[old_priority];
     auto it = std::find(priority_vector.begin(), priority_vector.end(), core_op_handle);
     CHECK(it != priority_vector.end(), HAILO_INTERNAL_FAILURE);
-
     priority_vector.erase(it);
-    m_scheduled_core_ops[core_op_handle]->set_priority(priority);
+    m_next_core_op[old_priority] = 0; // Avoiding overflow by reseting next core op.
+
+    // Add it to the new priority map.
+    m_scheduled_core_ops.at(core_op_handle)->set_priority(priority);
     m_core_op_priority[priority].push_back(core_op_handle);
+    if (!contains(m_next_core_op, priority)) {
+        m_next_core_op[priority] = 0;
+    }
 
+    TRACE(SetCoreOpPriorityTrace, core_op_handle, priority);
     return HAILO_SUCCESS;
 }
 
-bool CoreOpsScheduler::choose_next_core_op(const device_id_t &device_id, bool check_threshold)
-{
-    if (!m_devices[device_id]->is_switching_core_op) {
-        return CoreOpsSchedulerOracle::choose_next_model(*this, m_devices[device_id]->device_id, check_threshold) != INVALID_CORE_OP_HANDLE;
-    }
-    return false;
-}
-
 bool CoreOpsScheduler::should_core_op_stop(const scheduler_core_op_handle_t &core_op_handle)
 {
-    for (const auto &name_flag_pair : m_should_core_op_stop[core_op_handle]) {
-        if (name_flag_pair.second) {
-            return true;
-        }
-    }
-
-    return false;
-}
-
-bool CoreOpsScheduler::core_op_all_streams_aborted(const scheduler_core_op_handle_t &core_op_handle)
-{
-    for (const auto &name_flag_pair : m_should_core_op_stop[core_op_handle]) {
-        if (!name_flag_pair.second) {
-            return false;
-        }
-    }
-    return true;
-}
-
-void CoreOpsScheduler::notify_all()
-{
-    {
-        // Acquire mutex to make sure the notify_all will wake the blocking threads on the cv
-        std::unique_lock<std::mutex> lock(m_before_read_write_mutex);
-    }
-    // TODO: consider notify only the relevant ng or stream
-    for (auto &cng_cvs : m_core_ops_cvs) {
-        cng_cvs.second->notify_all();
-    }
+    return m_scheduled_core_ops.at(core_op_handle)->any_stream_disabled();
 }
 
 hailo_status CoreOpsScheduler::optimize_streaming_if_enabled(const scheduler_core_op_handle_t &core_op_handle)
 {
-    auto scheduled_core_op = m_scheduled_core_ops[core_op_handle];
+    auto scheduled_core_op = m_scheduled_core_ops.at(core_op_handle);
     if (!scheduled_core_op->use_dynamic_batch_flow()) {
         auto next_pair = m_devices.upper_bound(scheduled_core_op->get_last_device()); // Get last device and go to the next device in the map
-        if (m_devices.end() == next_pair){ // In case we reached to the end of the map - start from the beggining
+        if (m_devices.end() == next_pair){ // In case we reached to the end of the map - start from the beginning
             next_pair = m_devices.begin();
         }
         auto &device_info = next_pair->second;
         if (device_info->current_core_op_handle == core_op_handle && !device_info->is_switching_core_op &&
             !CoreOpsSchedulerOracle::should_stop_streaming(*this, scheduled_core_op->get_priority(), device_info->device_id) &&
-            (get_min_avail_buffers_count(core_op_handle, device_info->device_id) >= DEFAULT_BURST_SIZE)) {
+            (get_frames_ready_to_transfer(core_op_handle, device_info->device_id) >= DEFAULT_BURST_SIZE)) {
             auto status = send_all_pending_buffers(core_op_handle, device_info->device_id, DEFAULT_BURST_SIZE);
             if (HAILO_STREAM_ABORTED_BY_USER == status) {
                 LOGGER__INFO("send_all_pending_buffers has failed with status=HAILO_STREAM_ABORTED_BY_USER");
@@ -692,86 +402,96 @@ hailo_status CoreOpsScheduler::optimize_streaming_if_enabled(const scheduler_cor
     return HAILO_SUCCESS;
 }
 
-uint16_t CoreOpsScheduler::get_min_avail_buffers_count(const scheduler_core_op_handle_t &core_op_handle, const device_id_t &device_id)
+uint16_t CoreOpsScheduler::get_frames_ready_to_transfer(scheduler_core_op_handle_t core_op_handle,
+    const device_id_t &device_id) const
+{
+    auto scheduled_core_op = m_scheduled_core_ops.at(core_op_handle);
+    auto device_info = m_devices.at(device_id);
+
+    const auto max_ongoing_frames = scheduled_core_op->get_max_ongoing_frames_per_device();
+    const auto ongoing_frames = device_info->ongoing_frames[core_op_handle].get_max_value();
+    assert(ongoing_frames <= max_ongoing_frames);
+
+    const auto pending_frames = scheduled_core_op->pending_frames().get_min_value();
+
+    return static_cast<uint16_t>(std::min(pending_frames, max_ongoing_frames - ongoing_frames));
+}
+
+void CoreOpsScheduler::schedule()
 {
-    auto scheduled_core_op = m_scheduled_core_ops[core_op_handle];
-    auto device_info = m_devices[device_id];
-
-    uint16_t avail_buffer_count = UINT16_MAX;
-    for (auto &output_stream : scheduled_core_op->get_core_op()->get_output_streams()) {
-        auto &vdevice_output = static_cast<OutputStreamBase&>(output_stream.get());
-        if (auto buffer_size_in_frames = vdevice_output.get_buffer_frames_size()) {
-            auto &pending_frames_in_buffer = device_info->pending_to_read_frames[core_op_handle][vdevice_output.name()];
-            auto ongoing_frames = get_max_value_of_unordered_map(device_info->current_cycle_requested_transferred_frames_h2d[core_op_handle]) -
-                device_info->current_cycle_finished_transferred_frames_d2h[core_op_handle][vdevice_output.name()];
-            assert(*buffer_size_in_frames >= (pending_frames_in_buffer + ongoing_frames));
-            avail_buffer_count = std::min(avail_buffer_count, static_cast<uint16_t>(*buffer_size_in_frames - pending_frames_in_buffer - ongoing_frames));
+    std::shared_lock<std::shared_timed_mutex> lock(m_scheduler_mutex);
+    m_scheduled_core_ops.for_each([this](const std::pair<vdevice_core_op_handle_t, ScheduledCoreOpPtr> &core_op_pair) {
+        auto status = optimize_streaming_if_enabled(core_op_pair.first);
+        if ((HAILO_SUCCESS != status) &&
+            (HAILO_STREAM_ABORTED_BY_USER != status)) {
+            LOGGER__ERROR("optimize_streaming_if_enabled thread failed with status={}", status);
+        }
+
+    });
+
+    auto oracle_decisions = CoreOpsSchedulerOracle::get_oracle_decisions(*this);
+
+    for (const auto &run_params : oracle_decisions) {
+        auto status = switch_core_op(run_params.core_op_handle, run_params.device_id);
+        if (HAILO_STREAM_ABORTED_BY_USER == status) {
+            continue;
         }
-    }
 
-    auto transferred_frames = get_max_value_of_unordered_map(device_info->current_cycle_requested_transferred_frames_h2d[core_op_handle]) -
-        get_min_value_of_unordered_map(device_info->current_cycle_finished_transferred_frames_d2h[core_op_handle]);
-    if (is_multi_device()) {
-        auto avail_input_buffer_count = static_cast<uint16_t>((scheduled_core_op->get_min_input_buffers_count()) - transferred_frames);
-        avail_buffer_count = std::min(avail_input_buffer_count, avail_buffer_count);
+        if (HAILO_SUCCESS != status) {
+            LOGGER__ERROR("Scheduler thread failed with status={}", status);
+            break;
+        }
     }
+}
+
+CoreOpsScheduler::SchedulerThread::SchedulerThread(CoreOpsScheduler &scheduler) :
+    m_scheduler(scheduler),
+    m_is_running(true),
+    m_execute_worker_thread(false),
+    m_thread([this]() { worker_thread_main(); })
+{}
 
-    return avail_buffer_count;
+CoreOpsScheduler::SchedulerThread::~SchedulerThread()
+{
+    stop();
 }
 
-uint16_t CoreOpsScheduler::get_min_avail_output_buffers(const scheduler_core_op_handle_t &core_op_handle)
+void CoreOpsScheduler::SchedulerThread::signal()
 {
-    auto scheduled_core_op = m_scheduled_core_ops[core_op_handle];
-    auto sent_frames = scheduled_core_op->h2d_finished_transferred_frames_max_value() -
-        scheduled_core_op->finished_read_frames_min_value();
+    {
+        std::lock_guard<std::mutex> lock(m_mutex);
+        m_execute_worker_thread = true;
+    }
+    m_cv.notify_one();
+}
 
-    return static_cast<uint16_t>((scheduled_core_op->get_min_output_buffers_count()) - sent_frames);
+void CoreOpsScheduler::SchedulerThread::stop()
+{
+    if (m_thread.joinable()) {
+        m_is_running = false;
+        signal();
+        m_thread.join();
+    }
 }
 
-void CoreOpsScheduler::worker_thread_main()
+void CoreOpsScheduler::SchedulerThread::worker_thread_main()
 {
     OsUtils::set_current_thread_name("SCHEDULER");
-    std::unique_lock<std::mutex> lock(m_before_read_write_mutex);
-    while (m_is_running) {
 
-        m_scheduler_cv.wait(lock, [this]() {
-            return m_execute_worker_thread.load();
-        });
-        m_execute_worker_thread = false;
+    while (m_is_running) {
+        {
+            std::unique_lock<std::mutex> lock(m_mutex);
+            m_cv.wait(lock, [this]() {
+                return m_execute_worker_thread.load();
+            });
+            m_execute_worker_thread = false;
+        }
 
         if (!m_is_running) {
             break;
         }
 
-        for (uint32_t core_op_handle = 0; core_op_handle < m_scheduled_core_ops.size(); core_op_handle++) {
-            auto status = optimize_streaming_if_enabled(core_op_handle);
-            if (HAILO_STREAM_ABORTED_BY_USER == status) {
-                continue;
-            }
-
-            if (HAILO_SUCCESS != status) {
-                if (m_is_running) {
-                    LOGGER__ERROR("Scheduler thread failed with status={}", status);
-                }
-                break;
-            }
-        }
-
-        auto oracle_decisions = CoreOpsSchedulerOracle::get_oracle_decisions(*this);
-
-        for (const auto &run_params : oracle_decisions) {
-            auto status = switch_core_op(run_params.core_op_handle, run_params.device_id);
-            if (HAILO_STREAM_ABORTED_BY_USER == status) {
-                continue;
-            }
-
-            if (HAILO_SUCCESS != status) {
-                if (m_is_running) {
-                    LOGGER__ERROR("Scheduler thread failed with status={}", status);
-                }
-                break;
-            }
-        }
+        m_scheduler.schedule();
     }
 }
 
index c85c2160c51a358afaca20bec8c67da011297c57..94159f656066d307f89639e9e20b8e71b0f28fa9 100644 (file)
 #include "common/utils.hpp"
 #include "common/filesystem.hpp"
 
+#include "utils/thread_safe_map.hpp"
+
 #include "vdevice/scheduler/scheduled_core_op_state.hpp"
-#include "vdevice/scheduler/scheduled_core_op_cv.hpp"
 #include "vdevice/scheduler/scheduler_base.hpp"
 
 
 namespace hailort
 {
 
-#define INVALID_CORE_OP_HANDLE (UINT32_MAX)
-
 using scheduler_core_op_handle_t = uint32_t;
 using core_op_priority_t = uint8_t;
 
@@ -51,70 +50,77 @@ public:
     CoreOpsScheduler &operator=(CoreOpsScheduler &&other) = delete;
     CoreOpsScheduler(CoreOpsScheduler &&other) noexcept = delete;
 
-    Expected<scheduler_core_op_handle_t> add_core_op(std::shared_ptr<CoreOp> added_core_op);
+    hailo_status add_core_op(scheduler_core_op_handle_t core_op_handle, std::shared_ptr<CoreOp> added_core_op);
 
-    hailo_status signal_frame_pending_to_send(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name);
+    // Shutdown the scheduler, stops interrupt thread and deactivate all core ops from all devices. This operation
+    // is not recoverable.
+    void shutdown();
 
-    hailo_status wait_for_read(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name,
-        const std::chrono::milliseconds &timeout, const std::function<bool()> &predicate);
+    hailo_status signal_frame_pending(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name,
+        hailo_stream_direction_t direction);
 
-    hailo_status signal_frame_pending_to_read(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name);
+    void signal_frame_transferred(const scheduler_core_op_handle_t &core_op_handle,
+        const std::string &stream_name, const device_id_t &device_id, hailo_stream_direction_t direction);
 
-    void signal_frame_transferred_d2h(const scheduler_core_op_handle_t &core_op_handle,
-        const std::string &stream_name, const device_id_t &device_id);
-    hailo_status signal_read_finish(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name,
-        const device_id_t &device_id);
-
-    hailo_status enable_stream(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name);
-    hailo_status disable_stream(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name);
+    void enable_stream(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name);
+    void disable_stream(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name);
 
     hailo_status set_timeout(const scheduler_core_op_handle_t &core_op_handle, const std::chrono::milliseconds &timeout, const std::string &network_name);
     hailo_status set_threshold(const scheduler_core_op_handle_t &core_op_handle, uint32_t threshold, const std::string &network_name);
     hailo_status set_priority(const scheduler_core_op_handle_t &core_op_handle, core_op_priority_t priority, const std::string &network_name);
 
-    virtual ReadyInfo is_core_op_ready(const scheduler_core_op_handle_t &core_op_handle, bool check_threshold) override;
-    virtual bool has_core_op_drained_everything(const scheduler_core_op_handle_t &core_op_handle, const device_id_t &device_id) override;
-    hailo_status flush_pending_buffers(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name, const std::chrono::milliseconds &timeout);
-
-    void notify_all();
-
-protected:
-    bool choose_next_core_op(const device_id_t &device_id, bool check_threshold);
-
-    std::unordered_map<scheduler_core_op_handle_t, std::map<stream_name_t, std::atomic_bool>> m_should_core_op_stop;
+    virtual ReadyInfo is_core_op_ready(const scheduler_core_op_handle_t &core_op_handle, bool check_threshold,
+        const device_id_t &device_id) override;
+    virtual bool is_device_idle(const device_id_t &device_id) override;
 
 private:
-    hailo_status switch_core_op(const scheduler_core_op_handle_t &core_op_handle, const device_id_t &device_id,
-        bool keep_nn_config = false);
-    // Needs to be called with m_before_read_write_mutex held.
-    void signal_read_finish_impl(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name,
-        const device_id_t &device_id);
+    hailo_status switch_core_op(const scheduler_core_op_handle_t &core_op_handle, const device_id_t &device_id);
 
     hailo_status send_all_pending_buffers(const scheduler_core_op_handle_t &core_op_handle, const device_id_t &device_id, uint32_t burst_size);
-    hailo_status send_pending_buffer(const scheduler_core_op_handle_t &core_op_handle, const std::string &stream_name, const device_id_t &device_id);
 
-    void decrease_core_op_counters(const scheduler_core_op_handle_t &core_op_handle);
     bool should_core_op_stop(const scheduler_core_op_handle_t &core_op_handle);
-    bool core_op_all_streams_aborted(const scheduler_core_op_handle_t &core_op_handle);
-
-    std::string get_core_op_name(const scheduler_core_op_handle_t &core_op_handle);
-    bool is_core_op_active(const scheduler_core_op_handle_t &core_op_handle);
-    bool is_multi_device();
 
     hailo_status optimize_streaming_if_enabled(const scheduler_core_op_handle_t &core_op_handle);
-    uint16_t get_min_avail_buffers_count(const scheduler_core_op_handle_t &core_op_handle, const device_id_t &device_id);
-    uint16_t get_min_avail_output_buffers(const scheduler_core_op_handle_t &core_op_handle);
 
-    void worker_thread_main();
+    uint16_t get_frames_ready_to_transfer(scheduler_core_op_handle_t core_op_handle, const device_id_t &device_id) const;
+
+    void schedule();
+
+    class SchedulerThread final {
+    public:
+        SchedulerThread(CoreOpsScheduler &scheduler);
+
+        ~SchedulerThread();
+
+        SchedulerThread(const SchedulerThread &) = delete;
+        SchedulerThread &operator=(const SchedulerThread &) = delete;
+
+        void signal();
+        void stop();
+
+    private:
+        void worker_thread_main();
+
+        CoreOpsScheduler &m_scheduler;
+        std::mutex m_mutex;
+        std::condition_variable m_cv;
+        std::atomic_bool m_is_running;
+        std::atomic_bool m_execute_worker_thread;
+        std::thread m_thread;
+    };
+
+    ThreadSafeMap<vdevice_core_op_handle_t, ScheduledCoreOpPtr> m_scheduled_core_ops;
 
-    std::vector<std::shared_ptr<ScheduledCoreOp>> m_scheduled_core_ops;
-    std::mutex m_before_read_write_mutex;
-    std::unordered_map<scheduler_core_op_handle_t, std::shared_ptr<ScheduledCoreOpCV>> m_core_ops_cvs;
+    // This shared mutex guards accessing the scheduler data structures including:
+    //   - m_scheduled_core_ops
+    //   - m_core_op_priority
+    //   - m_next_core_op
+    // Any function that is modifing these structures (for example by adding/removing items) must lock this mutex using
+    // unique_lock. Any function accessing these structures (for example access to
+    // m_scheduled_core_ops.at(core_op_handle) can use shared_lock.
+    std::shared_timed_mutex m_scheduler_mutex;
 
-    std::atomic_bool m_is_running;
-    std::atomic_bool m_execute_worker_thread;
-    std::thread m_scheduler_thread;
-    std::condition_variable m_scheduler_cv;
+    SchedulerThread m_scheduler_thread;
 };
 } /* namespace hailort */
 
index a8575f17bc943e045c0610249d7e188cdcd90672..3c7731667e64290feac287e6b6250a7b31c93b7d 100644 (file)
@@ -17,6 +17,7 @@
 #include "common/filesystem.hpp"
 
 #include "stream_common/stream_internal.hpp"
+#include "vdevice/scheduler/scheduler_counter.hpp"
 
 #include <condition_variable>
 
@@ -27,7 +28,6 @@ namespace hailort
 #define DEFAULT_SCHEDULER_TIMEOUT (std::chrono::milliseconds(0))
 #define DEFAULT_SCHEDULER_MIN_THRESHOLD (0)
 
-#define INVALID_CORE_OP_HANDLE (UINT32_MAX)
 
 using scheduler_core_op_handle_t = uint32_t;
 using core_op_priority_t = uint8_t;
@@ -37,16 +37,39 @@ using stream_name_t = std::string;
 struct ActiveDeviceInfo {
     ActiveDeviceInfo(const device_id_t &device_id, const std::string &device_arch) : 
         current_core_op_handle(INVALID_CORE_OP_HANDLE), next_core_op_handle(INVALID_CORE_OP_HANDLE), is_switching_core_op(false), 
-        current_batch_size(0), current_cycle_requested_transferred_frames_h2d(), current_cycle_finished_transferred_frames_d2h(), 
-        pending_to_read_frames(), device_id(device_id), device_arch(device_arch)
+        current_batch_size(0),
+        frames_left_before_stop_streaming(0),
+        device_id(device_id), device_arch(device_arch)
     {}
+
+    uint32_t get_ongoing_frames() const
+    {
+        if (current_core_op_handle == INVALID_CORE_OP_HANDLE) {
+            // No ongoing frames
+            return 0;
+        }
+
+        return ongoing_frames.at(current_core_op_handle).get_max_value();
+    }
+
+    bool is_idle() const
+    {
+        return 0 == get_ongoing_frames();
+    }
+
     scheduler_core_op_handle_t current_core_op_handle;
     scheduler_core_op_handle_t next_core_op_handle;
     std::atomic_bool is_switching_core_op;
     std::atomic_uint32_t current_batch_size;
-    std::unordered_map<scheduler_core_op_handle_t, std::unordered_map<stream_name_t, std::atomic_uint32_t>> current_cycle_requested_transferred_frames_h2d;
-    std::unordered_map<scheduler_core_op_handle_t, std::unordered_map<stream_name_t, std::atomic_uint32_t>> current_cycle_finished_transferred_frames_d2h;
-    std::unordered_map<scheduler_core_op_handle_t, std::unordered_map<stream_name_t, std::atomic_uint32_t>> pending_to_read_frames;
+
+    // Until this counter is greater than zero, we won't stop streaming on current core op if we have ready frames
+    // (even if there is another core op ready).
+    size_t frames_left_before_stop_streaming;
+
+    // For each stream (both input and output) we store a counter for all ongoing frames. We increase the counter when
+    // launching transfer and decrease it when we get the transfer callback called.
+    std::unordered_map<scheduler_core_op_handle_t, SchedulerCounter> ongoing_frames;
+
     device_id_t device_id;
     std::string device_arch;
 };
@@ -66,8 +89,9 @@ public:
         bool is_ready = false;
     };
 
-    virtual ReadyInfo is_core_op_ready(const scheduler_core_op_handle_t &core_op_handle, bool check_threshold) = 0;
-    virtual bool has_core_op_drained_everything(const scheduler_core_op_handle_t &core_op_handle, const device_id_t &device_id) = 0;
+    virtual ReadyInfo is_core_op_ready(const scheduler_core_op_handle_t &core_op_handle, bool check_threshold,
+        const device_id_t &device_id) = 0;
+    virtual bool is_device_idle(const device_id_t &device_id) = 0;
 
     virtual uint32_t get_device_count() const
     {
@@ -89,17 +113,14 @@ public:
         return m_core_op_priority;
     }
 
-    virtual scheduler_core_op_handle_t get_next_core_op(core_op_priority_t priority)
+    virtual scheduler_core_op_handle_t get_next_core_op(core_op_priority_t priority) const
     {
-        if (!contains(m_next_core_op, priority)) {
-            m_next_core_op[priority] = 0;
-        }
-        return m_next_core_op[priority];
+        return m_next_core_op.at(priority);
     }
 
     virtual void set_next_core_op(const core_op_priority_t priority, const scheduler_core_op_handle_t &core_op_handle)
     {
-        m_next_core_op[priority] = core_op_handle;
+        m_next_core_op.at(priority) = core_op_handle;
     }
 
 protected:
diff --git a/hailort/libhailort/src/vdevice/scheduler/scheduler_counter.hpp b/hailort/libhailort/src/vdevice/scheduler/scheduler_counter.hpp
new file mode 100644 (file)
index 0000000..cf40f3d
--- /dev/null
@@ -0,0 +1,99 @@
+/**
+ * Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file scheduler_counter.hpp
+ * @brief Counter object that wraps a single counter per stream.
+ **/
+
+#ifndef _HAILO_SCHEDULER_COUNTER_HPP_
+#define _HAILO_SCHEDULER_COUNTER_HPP_
+
+#include "common/utils.hpp"
+
+#include <unordered_map>
+#include <cassert>
+#include <atomic>
+
+namespace hailort
+{
+
+using stream_name_t = std::string;
+
+class SchedulerCounter
+{
+public:
+    SchedulerCounter() : m_map()
+    {}
+
+    void insert(const stream_name_t &name)
+    {
+        assert(!contains(m_map, name));
+        m_map[name] = 0;
+    }
+
+    uint32_t operator[](const stream_name_t &name) const
+    {
+        assert(contains(m_map, name));
+        return m_map.at(name);
+    }
+
+    void increase(const stream_name_t &name)
+    {
+        assert(contains(m_map, name));
+        m_map[name]++;
+    }
+
+    void decrease(const stream_name_t &name)
+    {
+        assert(contains(m_map, name));
+        assert(m_map[name] > 0);
+        m_map[name]--;
+    }
+
+    uint32_t get_min_value() const
+    {
+        return get_min_value_of_unordered_map(m_map);
+    }
+
+    uint32_t get_max_value() const
+    {
+        return get_max_value_of_unordered_map(m_map);
+    }
+
+    bool all_values_bigger_or_equal(uint32_t value) const
+    {
+        for (const auto &pair : m_map) {
+            if (value > pair.second) {
+                return false;
+            }
+        }
+        return true;
+    }
+
+    bool empty() const
+    {
+        for (const auto &pair : m_map) {
+            if (0 != pair.second) {
+                return false;
+            }
+        }
+        return true;
+    }
+
+    void reset()
+    {
+        for (auto &pair : m_map) {
+            pair.second = 0;
+        }
+    }
+
+private:
+    std::unordered_map<stream_name_t, std::atomic_uint32_t> m_map;
+};
+
+
+} /* namespace hailort */
+
+#endif /* _HAILO_SCHEDULER_COUNTER_HPP_ */
index 1d97c581ac254af53bd6f54f6c9aabc44bf4ee6a..b6106e1e26955b536b358380f77c80f2163a72a5 100644 (file)
@@ -25,9 +25,11 @@ scheduler_core_op_handle_t CoreOpsSchedulerOracle::choose_next_model(SchedulerBa
             uint32_t index = scheduler.get_next_core_op(iter->first) + i;
             index %= static_cast<uint32_t>(priority_group_size);
             auto core_op_handle = iter->second[index];
-            auto ready_info = scheduler.is_core_op_ready(core_op_handle, check_threshold);
+            auto ready_info = scheduler.is_core_op_ready(core_op_handle, check_threshold, device_id);
             if (ready_info.is_ready) {
-                TRACE(ChooseCoreOpTrace, "", core_op_handle, ready_info.over_threshold, ready_info.over_timeout, iter->first);
+                // In cases device is idle the check_threshold is not needed, therefore is false.
+                bool switch_because_idle = !(check_threshold);
+                TRACE(OracleDecisionTrace, switch_because_idle, device_id, core_op_handle, ready_info.over_threshold, ready_info.over_timeout);
                 device_info->is_switching_core_op = true;
                 device_info->next_core_op_handle = core_op_handle;
                 // Set next to run as next in round-robin
@@ -43,6 +45,13 @@ scheduler_core_op_handle_t CoreOpsSchedulerOracle::choose_next_model(SchedulerBa
 
 bool CoreOpsSchedulerOracle::should_stop_streaming(SchedulerBase &scheduler, core_op_priority_t core_op_priority, const device_id_t &device_id)
 {
+    const auto device_info = scheduler.get_device_info(device_id);
+    if (device_info->frames_left_before_stop_streaming > 0) {
+        // Only when frames_left_before_stop_streaming we consider stop streaming
+        return false;
+    }
+
+    // Now check if there is another qualified core op.
     auto priority_map = scheduler.get_core_op_priority_map();
     for (auto iter = priority_map.rbegin(); (iter != priority_map.rend()) && (iter->first >= core_op_priority); ++iter) {
         auto priority_group_size = iter->second.size();
@@ -51,9 +60,7 @@ bool CoreOpsSchedulerOracle::should_stop_streaming(SchedulerBase &scheduler, cor
             uint32_t index = scheduler.get_next_core_op(iter->first) + i;
             index %= static_cast<uint32_t>(priority_group_size);
             auto core_op_handle = iter->second[index];
-            // We dont want to stay with the same network group if there is a other qualified network group
-            if ((!is_core_op_active(scheduler, core_op_handle)) && scheduler.is_core_op_ready(core_op_handle, true).is_ready &&
-                is_core_op_finished_batch(scheduler, device_id)) {
+            if (!is_core_op_active(scheduler, core_op_handle) && scheduler.is_core_op_ready(core_op_handle, true, device_id).is_ready) {
                 return true;
             }
         }
@@ -75,14 +82,6 @@ bool CoreOpsSchedulerOracle::is_core_op_active(SchedulerBase &scheduler, schedul
     return false;
 }
 
-bool CoreOpsSchedulerOracle::is_core_op_finished_batch(SchedulerBase &scheduler, const device_id_t &device_id)
-{
-    auto device_info = scheduler.get_device_info(device_id);
-    auto max_transferred_h2d = get_max_value_of_unordered_map(device_info->current_cycle_requested_transferred_frames_h2d[device_info->current_core_op_handle]);
-
-    return device_info->current_batch_size <= max_transferred_h2d;
-}
-
 std::vector<RunParams> CoreOpsSchedulerOracle::get_oracle_decisions(SchedulerBase &scheduler)
 {
     auto &devices = scheduler.get_device_infos();
@@ -97,10 +96,15 @@ std::vector<RunParams> CoreOpsSchedulerOracle::get_oracle_decisions(SchedulerBas
         }
 
         // Check if device is idle
-        if (!active_device_info->is_switching_core_op &&
-            scheduler.has_core_op_drained_everything(active_device_info->current_core_op_handle, active_device_info->device_id)) {
-            auto core_op_handle = choose_next_model(scheduler, active_device_info->device_id, false);
+        if (!active_device_info->is_switching_core_op && scheduler.is_device_idle(active_device_info->device_id)) {
+            const bool CHECK_THRESHOLD = true;
+            auto core_op_handle = choose_next_model(scheduler, active_device_info->device_id, CHECK_THRESHOLD);
+            if (core_op_handle == INVALID_CORE_OP_HANDLE) {
+                core_op_handle = choose_next_model(scheduler, active_device_info->device_id, !CHECK_THRESHOLD);
+            }
+
             if (core_op_handle != INVALID_CORE_OP_HANDLE) {
+                // We have a decision
                 oracle_decision.push_back({core_op_handle, active_device_info->device_id});
             }
         }
index fd09944c9a2c0e37296459520c8205ea562c76cb..666b9e5b5c87ba2cc66bfa9671e0f045e05be79c 100644 (file)
@@ -37,7 +37,6 @@ private:
     CoreOpsSchedulerOracle() {}
     // TODO: Consider returning a vector of devices (we can use this function in other places)
     static bool is_core_op_active(SchedulerBase &scheduler, scheduler_core_op_handle_t core_op_handle);
-    static bool is_core_op_finished_batch(SchedulerBase &scheduler, const device_id_t &device_id);
 };
 
 } /* namespace hailort */
index 310892b93aba9e874b00e8706ee0c2dba87a2e8d..8b5cd8b3705930ba4461e3de3cc261f6e1bcf85b 100644 (file)
@@ -12,6 +12,7 @@
 #include "hailo/hailort.h"
 #include "hailo/vdevice.hpp"
 #include "hailo/hailort_defaults.hpp"
+#include "utils/profiler/tracer_macros.hpp"
 
 #include "vdevice/vdevice_internal.hpp"
 #include "vdevice/vdevice_core_op.hpp"
@@ -20,6 +21,7 @@
 #include "vdma/integrated/integrated_device.hpp"
 #include "utils/shared_resource_manager.hpp"
 #include "network_group/network_group_internal.hpp"
+#include "net_flow/pipeline/infer_model_internal.hpp"
 #include "core_op/core_op.hpp"
 
 #ifdef HAILO_SUPPORT_MULTI_PROCESS
@@ -98,6 +100,21 @@ Expected<ConfigureNetworkParams> VDevice::create_configure_params(Hef &hef, cons
     return hef.create_configure_params(stream_interface.release(), network_group_name);
 }
 
+hailo_status VDevice::before_fork()
+{
+    return HAILO_SUCCESS;
+}
+
+hailo_status VDevice::after_fork_in_parent()
+{
+    return HAILO_SUCCESS;
+}
+
+hailo_status VDevice::after_fork_in_child()
+{
+    return HAILO_SUCCESS;
+}
+
 VDeviceHandle::VDeviceHandle(uint32_t handle) : m_handle(handle)
 {}
 
@@ -165,12 +182,31 @@ Expected<hailo_stream_interface_t> VDeviceHandle::get_default_streams_interface(
     return vdevice.value()->get_default_streams_interface();
 }
 
+Expected<InferModel> VDeviceHandle::create_infer_model(const std::string &hef_path)
+{
+    auto &manager = SharedResourceManager<std::string, VDeviceBase>::get_instance();
+    auto vdevice = manager.resource_lookup(m_handle);
+    CHECK_EXPECTED(vdevice);
+
+    return vdevice.value()->create_infer_model(hef_path);
+}
+
+bool VDevice::service_over_ip_mode()
+{
+#ifdef HAILO_SUPPORT_MULTI_PROCESS
+    // If service address is different than the default - we work at service over IP mode
+    return hailort::HAILORT_SERVICE_ADDRESS != HAILORT_SERVICE_DEFAULT_ADDR;
+#endif
+    return false; // no service -> no service over ip
+}
+
 #ifdef HAILO_SUPPORT_MULTI_PROCESS
 
-VDeviceClient::VDeviceClient(std::unique_ptr<HailoRtRpcClient> client, uint32_t handle, std::vector<std::unique_ptr<Device>> &&devices)
-    : m_client(std::move(client))
-    , m_handle(handle)
-    , m_devices(std::move(devices))
+VDeviceClient::VDeviceClient(std::unique_ptr<HailoRtRpcClient> client, VDeviceIdentifier &&identifier,
+    std::vector<std::unique_ptr<Device>> &&devices) :
+        m_client(std::move(client)),
+        m_identifier(std::move(identifier)),
+        m_devices(std::move(devices))
 {}
 
 VDeviceClient::~VDeviceClient()
@@ -181,7 +217,8 @@ VDeviceClient::~VDeviceClient()
     // The vdevice in the service will destruct the ConfiguredNetworkGroupBase,
     // and then the ConfiguredNetworkGroupClient destructor will be called - causing double destruction on ConfiguredNetworkGroupBase.
     m_network_groups.clear();
-    auto reply = m_client->VDevice_release(m_handle, OsUtils::get_curr_pid());
+    auto pid = OsUtils::get_curr_pid();
+    auto reply = m_client->VDevice_release(m_identifier, pid);
     if (reply != HAILO_SUCCESS) {
         LOGGER__CRITICAL("VDevice_release failed!");
     }
@@ -198,7 +235,7 @@ hailo_status VDeviceClient::create_client()
 {
     grpc::ChannelArguments ch_args;
     ch_args.SetMaxReceiveMessageSize(-1);
-    auto channel = grpc::CreateCustomChannel(HAILORT_SERVICE_DEFAULT_ADDR, grpc::InsecureChannelCredentials(), ch_args);
+    auto channel = grpc::CreateCustomChannel(hailort::HAILORT_SERVICE_ADDRESS, grpc::InsecureChannelCredentials(), ch_args);
     CHECK_NOT_NULL(channel, HAILO_INTERNAL_FAILURE);
     auto client = make_unique_nothrow<HailoRtRpcClient>(channel);
     CHECK_NOT_NULL(client, HAILO_INTERNAL_FAILURE);
@@ -215,11 +252,6 @@ hailo_status VDeviceClient::after_fork_in_parent()
 hailo_status VDeviceClient::after_fork_in_child()
 {
     HailoRtRpcClientUtils::get_instance().after_fork_in_child();
-    auto status = create_client();
-    CHECK_SUCCESS(status);
-    auto expected_dup_handle = m_client->VDevice_dup_handle(OsUtils::get_curr_pid(), m_handle);
-    CHECK_EXPECTED_AS_STATUS(expected_dup_handle);
-    m_handle = expected_dup_handle.value();
     return HAILO_SUCCESS;
 }
 
@@ -227,21 +259,24 @@ Expected<std::unique_ptr<VDevice>> VDeviceClient::create(const hailo_vdevice_par
 {
     grpc::ChannelArguments ch_args;
     ch_args.SetMaxReceiveMessageSize(-1);
-    auto channel = grpc::CreateCustomChannel(HAILORT_SERVICE_DEFAULT_ADDR, grpc::InsecureChannelCredentials(), ch_args);
+    auto channel = grpc::CreateCustomChannel(hailort::HAILORT_SERVICE_ADDRESS, grpc::InsecureChannelCredentials(), ch_args);
     CHECK_AS_EXPECTED(channel != nullptr, HAILO_INTERNAL_FAILURE);
 
     auto client = make_unique_nothrow<HailoRtRpcClient>(channel);
     CHECK_AS_EXPECTED(client != nullptr, HAILO_OUT_OF_HOST_MEMORY);
     auto init_status = HailoRtRpcClientUtils::get_instance().init_client_service_communication();
     CHECK_SUCCESS_AS_EXPECTED(init_status);
-    auto reply = client->VDevice_create(params, OsUtils::get_curr_pid());
+
+    auto pid = OsUtils::get_curr_pid();
+    auto reply = client->VDevice_create(params, pid);
     CHECK_EXPECTED(reply);
 
     auto handle = reply.value();
-    auto devices = client->VDevice_get_physical_devices(handle);
+    // When working with service over IP - no access to physical devices (returning empty vector)
+    auto devices = (VDevice::service_over_ip_mode()) ? std::vector<std::unique_ptr<Device>>() : client->VDevice_get_physical_devices(handle);
     CHECK_EXPECTED(devices);
 
-    auto client_vdevice = std::unique_ptr<VDeviceClient>(new VDeviceClient(std::move(client), handle, devices.release()));
+    auto client_vdevice = std::unique_ptr<VDeviceClient>(new VDeviceClient(std::move(client), VDeviceIdentifier(handle), devices.release()));
     CHECK_AS_EXPECTED(client_vdevice != nullptr, HAILO_OUT_OF_HOST_MEMORY);
 
     return std::unique_ptr<VDevice>(std::move(client_vdevice));
@@ -250,17 +285,17 @@ Expected<std::unique_ptr<VDevice>> VDeviceClient::create(const hailo_vdevice_par
 Expected<ConfiguredNetworkGroupVector> VDeviceClient::configure(Hef &hef,
     const NetworkGroupsParamsMap &configure_params)
 {
-    auto networks_handles = m_client->VDevice_configure(m_handle, hef, OsUtils::get_curr_pid(), configure_params);
+    auto networks_handles = m_client->VDevice_configure(m_identifier, hef, OsUtils::get_curr_pid(), configure_params);
     CHECK_EXPECTED(networks_handles);
 
     ConfiguredNetworkGroupVector networks;
     networks.reserve(networks_handles->size());
-    for (auto &handle : networks_handles.value()) {
+    for (auto &ng_handle : networks_handles.value()) {
         auto expected_client = HailoRtRpcClientUtils::create_client();
         CHECK_EXPECTED(expected_client);
 
         auto client = expected_client.release();
-        auto network_group = make_shared_nothrow<ConfiguredNetworkGroupClient>(std::move(client), handle);
+        auto network_group = make_shared_nothrow<ConfiguredNetworkGroupClient>(std::move(client), NetworkGroupIdentifier(m_identifier, ng_handle));
         CHECK_NOT_NULL_AS_EXPECTED(network_group, HAILO_OUT_OF_HOST_MEMORY);
 
         networks.emplace_back(network_group);
@@ -272,6 +307,7 @@ Expected<ConfiguredNetworkGroupVector> VDeviceClient::configure(Hef &hef,
 Expected<std::vector<std::reference_wrapper<Device>>> VDeviceClient::get_physical_devices() const
 {
     std::vector<std::reference_wrapper<Device>> devices_refs;
+    CHECK_AS_EXPECTED(0 < m_devices.size(), HAILO_INVALID_OPERATION, "get_physical_devices() usage is invalid when working with service over IP. In order to use a local service, unset env var {}", HAILORT_SERVICE_ADDRESS_ENV_VAR);
 
     for (auto &device : m_devices) {
         devices_refs.push_back(*device);
@@ -282,12 +318,18 @@ Expected<std::vector<std::reference_wrapper<Device>>> VDeviceClient::get_physica
 
 Expected<std::vector<std::string>> VDeviceClient::get_physical_devices_ids() const
 {
-    return m_client->VDevice_get_physical_devices_ids(m_handle);
+    return m_client->VDevice_get_physical_devices_ids(m_identifier);
 }
 
 Expected<hailo_stream_interface_t> VDeviceClient::get_default_streams_interface() const
 {
-    return m_client->VDevice_get_default_streams_interface(m_handle);
+    return m_client->VDevice_get_default_streams_interface(m_identifier);
+}
+
+Expected<InferModel> VDeviceClient::create_infer_model(const std::string &hef_path)
+{
+    (void)hef_path;
+    return make_unexpected(HAILO_NOT_IMPLEMENTED);
 }
 
 #endif // HAILO_SUPPORT_MULTI_PROCESS
@@ -360,6 +402,8 @@ hailo_status VDeviceBase::validate_params(const hailo_vdevice_params_t &params)
 
 Expected<std::unique_ptr<VDeviceBase>> VDeviceBase::create(const hailo_vdevice_params_t &params)
 {
+    TRACE(InitProfilerProtoTrace);
+
     auto devices_expected = create_devices(params);
     CHECK_EXPECTED(devices_expected);
     auto devices = devices_expected.release();
@@ -379,9 +423,11 @@ Expected<std::unique_ptr<VDeviceBase>> VDeviceBase::create(const hailo_vdevice_p
         auto device_arch_str = HailoRTCommon::get_device_arch_str(device_arch.value());
         device_archs.emplace_back(device_arch_str);
         vdevice_ids += " " + std::string(id_info_str);
+        TRACE(AddDeviceTrace, id_info_str, device_arch_str);
     }
     LOGGER__INFO("{}", vdevice_ids);
 
+    TRACE(MonitorStartTrace, uint32_t(device_ids.size()));
 
     CoreOpsSchedulerPtr scheduler_ptr;
     if (HAILO_SCHEDULING_ALGORITHM_NONE != params.scheduling_algorithm) {
@@ -401,6 +447,20 @@ Expected<std::unique_ptr<VDeviceBase>> VDeviceBase::create(const hailo_vdevice_p
     return vdevice;
 }
 
+VDeviceBase::~VDeviceBase()
+{
+    if (m_core_ops_scheduler) {
+        // The scheduler is held as weak/shared ptr, so it may not be freed by this destructor implicitly.
+        // The scheduler will be freed when the last reference is freed. If it will be freed inside some interrupt
+        // dispatcher thread (which holds a reference to the shared ptr) we will get stuck since the scheduler
+        // destructor will activate all core ops (and waits for the interrupt dispatcher).
+        // To solve it, we manually shutdown the scheduler here to make sure all devices have no activated core op and
+        // all interrupt dispatcher threads are idle.
+        m_core_ops_scheduler->shutdown();
+    }
+    TRACE(DumpProfilerState);
+}
+
 Expected<ConfiguredNetworkGroupVector> VDeviceBase::configure(Hef &hef,
     const NetworkGroupsParamsMap &configure_params)
 {
@@ -428,13 +488,9 @@ Expected<ConfiguredNetworkGroupVector> VDeviceBase::configure(Hef &hef,
         }
         std::shared_ptr<VDeviceCoreOp> vdevice_network_group = nullptr;
         if (identical_core_op) {
-            auto vdevice_network_group_exp = VDeviceCoreOp::duplicate(identical_core_op);
+            auto vdevice_network_group_exp = VDeviceCoreOp::duplicate(identical_core_op, network_params_pair.second);
             CHECK_EXPECTED(vdevice_network_group_exp);
-
             vdevice_network_group = vdevice_network_group_exp.release();
-            vdevice_network_group->set_core_op_handle(identical_core_op->core_op_handle());
-            auto status = vdevice_network_group->create_vdevice_streams_from_duplicate(identical_core_op);
-            CHECK_SUCCESS_AS_EXPECTED(status);
         } else {
             auto vdevice_network_group_expected = create_vdevice_network_group(hef, network_params_pair, use_multiplexer);
             CHECK_EXPECTED(vdevice_network_group_expected);
@@ -458,6 +514,40 @@ Expected<ConfiguredNetworkGroupVector> VDeviceBase::configure(Hef &hef,
     return added_network_groups;
 }
 
+Expected<InferModel> VDeviceBase::create_infer_model(const std::string &hef_path)
+{
+    auto hef_expected = Hef::create(hef_path);
+    CHECK_EXPECTED(hef_expected);
+    auto hef = hef_expected.release();
+
+    std::unordered_map<std::string, InferModel::InferStream> inputs;
+    std::unordered_map<std::string, InferModel::InferStream> outputs;
+
+    auto input_vstream_infos = hef.get_input_vstream_infos();
+    CHECK_EXPECTED(input_vstream_infos);
+
+    for (const auto &vstream_info : input_vstream_infos.value()) {
+        auto pimpl = make_shared_nothrow<InferModel::InferStream::Impl>(vstream_info);
+        CHECK_NOT_NULL_AS_EXPECTED(pimpl, HAILO_OUT_OF_HOST_MEMORY);
+
+        InferModel::InferStream stream(pimpl);
+        inputs.emplace(vstream_info.name, std::move(stream));
+    }
+
+    auto output_vstream_infos = hef.get_output_vstream_infos();
+    CHECK_EXPECTED(output_vstream_infos);
+
+    for (const auto &vstream_info : output_vstream_infos.value()) {
+        auto pimpl = make_shared_nothrow<InferModel::InferStream::Impl>(vstream_info);
+        CHECK_NOT_NULL_AS_EXPECTED(pimpl, HAILO_OUT_OF_HOST_MEMORY);
+
+        InferModel::InferStream stream(pimpl);
+        outputs.emplace(vstream_info.name, std::move(stream));
+    }
+
+    return InferModel(*this, std::move(hef), std::move(inputs), std::move(outputs));
+}
+
 Expected<hailo_stream_interface_t> VDeviceBase::get_default_streams_interface() const
 {
     auto stream_interface = m_devices.begin()->second.get()->get_default_streams_interface();
@@ -549,19 +639,25 @@ Expected<NetworkGroupsParamsMap> VDeviceBase::create_local_config_params(Hef &he
         local_config_params = config_params_exp.release();
     }
 
-    /* Validate batch size is identical for all networks in case scheduler is enabled */
-    if (m_core_ops_scheduler) {
-        uint16_t ref_batch_size = UINT16_MAX;
-        for (const auto &ng_params_pair : local_config_params) {
+    for (auto &ng_params_pair : local_config_params) {
+        if (m_core_ops_scheduler) {
+            // Validate batch size is identical for all networks in case scheduler is enabled.
+            uint16_t ref_batch_size = UINT16_MAX;
             for (const auto &network_params_pair : ng_params_pair.second.network_params_by_name) {
                 if (UINT16_MAX == ref_batch_size) {
                     ref_batch_size = network_params_pair.second.batch_size;
                 }
                 CHECK_AS_EXPECTED(ref_batch_size == network_params_pair.second.batch_size, HAILO_INVALID_OPERATION,
-                    "When scheduler is enabled, all networks should have the same batch_size. configure_params contains {} and {}. "
-                    "To disable scheduler, set HAILO_SCHEDULING_ALGORITHM_NONE in VDevice creation.", ref_batch_size, network_params_pair.second.batch_size);
+                    "When scheduler is enabled, all networks should have the same batch_size. "
+                    "configure_params contains {} and {}. "
+                    "To disable scheduler, set HAILO_SCHEDULING_ALGORITHM_NONE in VDevice creation.", ref_batch_size,
+                    network_params_pair.second.batch_size);
             }
         }
+
+        // Validate batch size (network group batch size vs network batch size).
+        auto status = Hef::Impl::update_network_batch_size(ng_params_pair.second);
+        CHECK_SUCCESS_AS_EXPECTED(status);
     }
 
     return local_config_params;
@@ -570,12 +666,21 @@ Expected<NetworkGroupsParamsMap> VDeviceBase::create_local_config_params(Hef &he
 Expected<std::shared_ptr<VDeviceCoreOp>> VDeviceBase::create_vdevice_network_group(Hef &hef,
     const std::pair<const std::string, ConfigureNetworkParams> &params, bool use_multiplexer)
 {
-    std::map<device_id_t, std::vector<std::shared_ptr<CoreOp>>> core_ops_bundle;
+    std::map<device_id_t, std::shared_ptr<CoreOp>> core_ops;
 
     // configure all the devices to this ng and then push the core ops to bundle vector
        for (const auto &pair : m_devices) {
         auto &device = pair.second;
-        auto ng_vector = device->configure(hef, { std::make_pair(params.first, params.second) });
+
+        ConfigureNetworkParams low_level_params = params.second;
+        if (m_core_ops_scheduler) {
+            // When the scheduler is enabled, all low level streams must be async (even if the user uses sync API).
+            for (auto &stream_params : low_level_params.stream_params_by_name) {
+                stream_params.second.flags |= HAILO_STREAM_FLAGS_ASYNC;
+            }
+        }
+
+        auto ng_vector = device->configure(hef, { std::make_pair(params.first, low_level_params) });
         CHECK_EXPECTED(ng_vector);
 
         assert(1 == ng_vector->size());
@@ -590,36 +695,50 @@ Expected<std::shared_ptr<VDeviceCoreOp>> VDeviceBase::create_vdevice_network_gro
         }
 
         auto ng_core_ops = network_group_base->get_core_ops();
-        auto &core_ops_vector = core_ops_bundle.emplace(device->get_dev_id(), std::vector<std::shared_ptr<CoreOp>>{}).first->second;
+        // To support several ng_core_ops, one should return vector of VDeviceCoreOp.
+        CHECK_AS_EXPECTED(ng_core_ops.size() == 1, HAILO_NOT_IMPLEMENTED,
+            "Only one core op for network group is supported");
+
+        core_ops.emplace(device->get_dev_id(), ng_core_ops[0]);
+    }
 
-        core_ops_vector.insert(core_ops_vector.begin(), ng_core_ops.begin(), ng_core_ops.end());
+    std::shared_ptr<PipelineMultiplexer> multiplexer = nullptr;
+    if (use_multiplexer) {
+        multiplexer = make_shared_nothrow<PipelineMultiplexer>();
+        CHECK_NOT_NULL_AS_EXPECTED(multiplexer, HAILO_OUT_OF_HOST_MEMORY);
     }
 
+    auto core_op_handle = allocate_core_op_handle();
 
-    auto vdevice_network_group_exp = VDeviceCoreOp::create(core_ops_bundle, m_core_ops_scheduler, hef.hash());
+    auto vdevice_network_group_exp = VDeviceCoreOp::create(m_active_core_op_holder, params.second, core_ops,
+        m_core_ops_scheduler, core_op_handle, multiplexer, hef.hash());
     CHECK_EXPECTED(vdevice_network_group_exp);
     auto vdevice_network_group = vdevice_network_group_exp.release();
 
-    auto ng_handle = INVALID_CORE_OP_HANDLE;
     if (m_core_ops_scheduler) {
-        auto core_op_handle_exp = m_core_ops_scheduler->add_core_op(vdevice_network_group);
-        CHECK_EXPECTED(core_op_handle_exp);
-        ng_handle = core_op_handle_exp.release();
-    }
-    vdevice_network_group->set_core_op_handle(ng_handle);
+        auto status = m_core_ops_scheduler->add_core_op(core_op_handle, vdevice_network_group);
+        CHECK_SUCCESS_AS_EXPECTED(status);
 
-    std::shared_ptr<PipelineMultiplexer> multiplexer = nullptr;
-    if (use_multiplexer) {
-        multiplexer = make_shared_nothrow<PipelineMultiplexer>();
-        CHECK_NOT_NULL_AS_EXPECTED(multiplexer, HAILO_OUT_OF_HOST_MEMORY);
-    }
+        // On scheduler, the streams are always activated
+        for (auto &input : vdevice_network_group->get_input_streams()) {
+            status = dynamic_cast<InputStreamBase&>(input.get()).activate_stream();
+            CHECK_SUCCESS_AS_EXPECTED(status);
+        }
 
-    auto status = vdevice_network_group->create_vdevice_streams_from_config_params(multiplexer, ng_handle);
-    CHECK_SUCCESS_AS_EXPECTED(status);
+        for (auto &output : vdevice_network_group->get_output_streams()) {
+            status = dynamic_cast<OutputStreamBase&>(output.get()).activate_stream();
+            CHECK_SUCCESS_AS_EXPECTED(status);
+        }
+    }
 
     return vdevice_network_group;
 }
 
+vdevice_core_op_handle_t VDeviceBase::allocate_core_op_handle()
+{
+    return m_next_core_op_handle++;
+}
+
 bool VDeviceBase::should_use_multiplexer(const ConfigureNetworkParams &network_params)
 {
     const auto &stream_params_by_name = network_params.stream_params_by_name;
index 18e3715bf576c3ac3a38f66bc5a9f0015d19b951..100aee45cc1306448cddca5a9880e194e42d7257 100644 (file)
@@ -8,94 +8,53 @@
  **/
 
 #include "vdevice/vdevice_core_op.hpp"
-#include "vdevice/vdevice_stream.hpp"
+#include "vdevice/scheduler/scheduled_stream.hpp"
+#include "vdevice/vdevice_native_stream.hpp"
 #include "vdevice/vdevice_stream_multiplexer_wrapper.hpp"
 #include "net_flow/pipeline/vstream_internal.hpp"
-#include "utils/profiler/tracer_macros.hpp"
+
+#define INVALID_BATCH_SIZE (-1)
 
 
 namespace hailort
 {
 
-Expected<std::unique_ptr<ActivatedNetworkGroup>> VDeviceActivatedCoreOp::create(
-    std::map<device_id_t, std::vector<std::shared_ptr<CoreOp>>> &core_ops,
-    std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
-    std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
-    const hailo_activate_network_group_params_t &network_group_params,
-    EventPtr core_op_activated_event, uint16_t dynamic_batch_size,
-    AccumulatorPtr deactivation_time_accumulator,
-    bool resume_pending_stream_transfers)
+Expected<std::shared_ptr<VDeviceCoreOp>> VDeviceCoreOp::create(ActiveCoreOpHolder &active_core_op_holder,
+    const ConfigureNetworkParams &configure_params,
+    const std::map<device_id_t, std::shared_ptr<CoreOp>> &core_ops,
+    CoreOpsSchedulerWeakPtr core_ops_scheduler, vdevice_core_op_handle_t core_op_handle,
+    std::shared_ptr<PipelineMultiplexer> multiplexer,
+    const std::string &hef_hash)
 {
     auto status = HAILO_UNINITIALIZED;
-    std::vector<std::unique_ptr<ActivatedNetworkGroup>> activated_network_groups;
-    activated_network_groups.reserve(core_ops.size());
-    for (const auto &pair : core_ops) {
-        auto &core_op_vector = pair.second;
-        for (auto &core_op : core_op_vector) {
-            auto ang = core_op->create_activated_network_group(network_group_params, dynamic_batch_size,
-                resume_pending_stream_transfers);
-            CHECK_EXPECTED(ang);
-            activated_network_groups.emplace_back(ang.release());
+
+    for (auto &core_op : core_ops)
+    {
+        core_op.second->set_vdevice_core_op_handle(core_op_handle);
+        for (auto &stream : core_op.second->get_input_streams())
+        {
+            auto &native_stream = static_cast<VDeviceNativeInputStream&>(stream.get());
+            native_stream.set_vdevice_core_op_handle(core_op_handle);
         }
     }
-    auto ang = VDeviceActivatedCoreOp(std::move(activated_network_groups), input_streams, output_streams,
-        network_group_params, core_op_activated_event, deactivation_time_accumulator, status);
 
+    VDeviceCoreOp object(active_core_op_holder, configure_params, std::move(core_ops), core_ops_scheduler,
+        core_op_handle, multiplexer, hef_hash, status);
     CHECK_SUCCESS_AS_EXPECTED(status);
-    std::unique_ptr<ActivatedNetworkGroup> activated_net_group_ptr =
-        make_unique_nothrow<VDeviceActivatedCoreOp>(std::move(ang));
-    CHECK_AS_EXPECTED(nullptr != activated_net_group_ptr, HAILO_OUT_OF_HOST_MEMORY);
-
-    status = core_op_activated_event->signal();
-    CHECK_SUCCESS_AS_EXPECTED(status, "Failed to signal network activation event");
-
-    return activated_net_group_ptr;
-}
-
-const std::string &VDeviceActivatedCoreOp::get_network_group_name() const
-{
-    // network_group_name is same across all NGs
-    return m_activated_network_groups[0]->get_network_group_name();
-}
-
-Expected<Buffer> VDeviceActivatedCoreOp::get_intermediate_buffer(const IntermediateBufferKey &key)
-{
-    CHECK_AS_EXPECTED(1 == m_activated_network_groups.size(), HAILO_INVALID_OPERATION, "getting intermediate buffer is supported only over single device");
-    return m_activated_network_groups[0]->get_intermediate_buffer(key);
-}
 
-hailo_status VDeviceActivatedCoreOp::set_keep_nn_config_during_reset(const bool keep_nn_config_during_reset)
-{
-    for (auto &activated_network_group : m_activated_network_groups) {
-        auto status = activated_network_group->set_keep_nn_config_during_reset(keep_nn_config_during_reset);
-        CHECK_SUCCESS(status);
+    int batch_size = INVALID_BATCH_SIZE;
+    bool batch_size_equals = std::all_of(configure_params.network_params_by_name.begin(),
+        configure_params.network_params_by_name.end(), [&](std::pair<std::string, hailo_network_parameters_t> n_param_map) {
+        return n_param_map.second.batch_size == configure_params.network_params_by_name.begin()->second.batch_size;
+    });
+    if (batch_size_equals) {
+        batch_size = configure_params.network_params_by_name.begin()->second.batch_size;
     }
-    return HAILO_SUCCESS;
-}
-
-VDeviceActivatedCoreOp::VDeviceActivatedCoreOp(std::vector<std::unique_ptr<ActivatedNetworkGroup>> &&activated_network_groups,
-    std::map<std::string, std::shared_ptr<InputStream>> &input_streams, std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
-    const hailo_activate_network_group_params_t &network_group_params, EventPtr core_op_activated_event, AccumulatorPtr deactivation_time_accumulator, hailo_status &status)
-    : ActivatedCoreOp(network_group_params, input_streams, output_streams, std::move(core_op_activated_event), status),
-        m_activated_network_groups(std::move(activated_network_groups)), m_should_reset_core_op(true), m_deactivation_time_accumulator(deactivation_time_accumulator)
-{
-}
 
-VDeviceActivatedCoreOp::VDeviceActivatedCoreOp(VDeviceActivatedCoreOp &&other) noexcept :
-    ActivatedCoreOp(std::move(other)),
-    m_activated_network_groups(std::move(other.m_activated_network_groups)),
-    m_should_reset_core_op(std::exchange(other.m_should_reset_core_op, false)),
-    m_deactivation_time_accumulator(std::move(other.m_deactivation_time_accumulator))
-{
-}
-
-
-Expected<std::shared_ptr<VDeviceCoreOp>> VDeviceCoreOp::create(const std::map<device_id_t, std::vector<std::shared_ptr<CoreOp>>> &core_ops,
-        CoreOpsSchedulerWeakPtr core_ops_scheduler, const std::string &hef_hash)
-{
-    auto status = HAILO_UNINITIALIZED;
-
-    VDeviceCoreOp object(std::move(core_ops), core_ops_scheduler, hef_hash, status);
+    // TODO HRT-11373: remove is_nms from monitor
+    TRACE(AddCoreOpTrace, "", object.name(), DEFAULT_SCHEDULER_TIMEOUT.count(), DEFAULT_SCHEDULER_MIN_THRESHOLD,
+        core_op_handle, object.is_nms(), batch_size);
+    status = object.create_vdevice_streams_from_config_params();
     CHECK_SUCCESS_AS_EXPECTED(status);
 
     auto obj_ptr = make_shared_nothrow<VDeviceCoreOp>(std::move(object));
@@ -104,12 +63,17 @@ Expected<std::shared_ptr<VDeviceCoreOp>> VDeviceCoreOp::create(const std::map<de
     return obj_ptr;
 }
 
-Expected<std::shared_ptr<VDeviceCoreOp>> VDeviceCoreOp::duplicate(std::shared_ptr<VDeviceCoreOp> other)
+Expected<std::shared_ptr<VDeviceCoreOp>> VDeviceCoreOp::duplicate(std::shared_ptr<VDeviceCoreOp> other,
+    const ConfigureNetworkParams &configure_params)
 {
     auto status = HAILO_UNINITIALIZED;
     auto copy = other->m_core_ops;
 
-    VDeviceCoreOp object(std::move(copy), other->m_core_ops_scheduler, other->m_hef_hash, status);
+    VDeviceCoreOp object(other->m_active_core_op_holder, configure_params, std::move(copy), other->m_core_ops_scheduler,
+        other->m_core_op_handle, other->m_multiplexer, other->m_hef_hash, status);
+    CHECK_SUCCESS_AS_EXPECTED(status);
+
+    status = object.create_vdevice_streams_from_duplicate(other);
     CHECK_SUCCESS_AS_EXPECTED(status);
 
     auto obj_ptr = make_shared_nothrow<VDeviceCoreOp>(std::move(object));
@@ -119,59 +83,57 @@ Expected<std::shared_ptr<VDeviceCoreOp>> VDeviceCoreOp::duplicate(std::shared_pt
 }
 
 
-VDeviceCoreOp::VDeviceCoreOp(const std::map<device_id_t, std::vector<std::shared_ptr<CoreOp>>> &core_ops,
-    CoreOpsSchedulerWeakPtr core_ops_scheduler, const std::string &hef_hash, hailo_status &status) :
-        CoreOp((core_ops.begin()->second)[0]->m_config_params, (core_ops.begin()->second)[0]->m_metadata, status),
+VDeviceCoreOp::VDeviceCoreOp(ActiveCoreOpHolder &active_core_op_holder,
+    const ConfigureNetworkParams &configure_params,
+    const std::map<device_id_t, std::shared_ptr<CoreOp>> &core_ops,
+    CoreOpsSchedulerWeakPtr core_ops_scheduler, vdevice_core_op_handle_t core_op_handle,
+    std::shared_ptr<PipelineMultiplexer> multiplexer, const std::string &hef_hash, hailo_status &status) :
+        CoreOp(configure_params, core_ops.begin()->second->m_metadata, active_core_op_holder, status),
         m_core_ops(std::move(core_ops)),
         m_core_ops_scheduler(core_ops_scheduler),
-        m_scheduler_handle(INVALID_CORE_OP_HANDLE),
+        m_core_op_handle(core_op_handle),
+        m_multiplexer(multiplexer),
         m_multiplexer_handle(0),
-        m_multiplexer(),
         m_hef_hash(hef_hash)
 {}
 
 Expected<hailo_stream_interface_t> VDeviceCoreOp::get_default_streams_interface()
 {
-    auto first_streams_interface = (m_core_ops.begin()->second)[0]->get_default_streams_interface();
+    auto first_streams_interface = m_core_ops.begin()->second->get_default_streams_interface();
     CHECK_EXPECTED(first_streams_interface);
 #ifndef NDEBUG
     // Check that all physical devices has the same interface
     for (const auto &pair : m_core_ops) {
-        auto &core_op_vector = pair.second;
-        for (auto &core_op : core_op_vector) {
-            auto iface = core_op->get_default_streams_interface();
-            CHECK_EXPECTED(iface);
-            CHECK_AS_EXPECTED(iface.value() == first_streams_interface.value(), HAILO_INTERNAL_FAILURE,
-                "Not all default stream interfaces are the same");
-        }
+        auto &core_op = pair.second;
+        auto iface = core_op->get_default_streams_interface();
+        CHECK_EXPECTED(iface);
+        CHECK_AS_EXPECTED(iface.value() == first_streams_interface.value(), HAILO_INTERNAL_FAILURE,
+            "Not all default stream interfaces are the same");
     }
 #endif
     return first_streams_interface;
 }
 
-hailo_status VDeviceCoreOp::create_vdevice_streams_from_config_params(std::shared_ptr<PipelineMultiplexer> multiplexer,
-    scheduler_core_op_handle_t scheduler_handle)
+hailo_status VDeviceCoreOp::create_vdevice_streams_from_config_params()
 {
     // TODO - HRT-6931 - raise error on this case
     if (((m_config_params.latency & HAILO_LATENCY_MEASURE) == HAILO_LATENCY_MEASURE) && (1 < m_core_ops.size())) {
         LOGGER__WARNING("Latency measurement is not supported on more than 1 physical device.");
     }
 
-    m_multiplexer = multiplexer;
-
     for (const auto &stream_parameters_pair : m_config_params.stream_params_by_name) {
         switch (stream_parameters_pair.second.direction) {
             case HAILO_H2D_STREAM:
                 {
                     auto status = create_input_vdevice_stream_from_config_params(stream_parameters_pair.second,
-                        stream_parameters_pair.first, multiplexer, scheduler_handle);
+                        stream_parameters_pair.first);
                     CHECK_SUCCESS(status);
                 }
                 break;
             case HAILO_D2H_STREAM:
                 {
                     auto status = create_output_vdevice_stream_from_config_params(stream_parameters_pair.second,
-                        stream_parameters_pair.first, multiplexer, scheduler_handle);
+                        stream_parameters_pair.first);
                     CHECK_SUCCESS(status);
                 }
                 break;
@@ -182,24 +144,18 @@ hailo_status VDeviceCoreOp::create_vdevice_streams_from_config_params(std::share
     }
 
     for (const auto &input_stream : m_input_streams) {
-        if (HAILO_STREAM_INTERFACE_ETH == static_cast<InputStreamBase&>(*input_stream.second).get_interface()) {
+        if (HAILO_STREAM_INTERFACE_ETH == input_stream.second->get_interface()) {
             continue;
         }
-        auto expected_queue_size = static_cast<InputStreamBase&>(*input_stream.second).get_buffer_frames_size();
+        auto expected_queue_size = input_stream.second->get_buffer_frames_size();
         CHECK_EXPECTED_AS_STATUS(expected_queue_size);
-        TRACE(CreateCoreOpInputStreamsTrace, "", name(), input_stream.first, (uint32_t)expected_queue_size.value());
     }
     for (const auto &output_stream : m_output_streams) {
-        if (hailo_format_order_t::HAILO_FORMAT_ORDER_HAILO_NMS == (static_cast<OutputStreamBase&>(*output_stream.second).get_layer_info().format.order)) {
-            TRACE(CreateCoreOpOutputStreamsTrace, "", name(), output_stream.first, SCHEDULER_MON_NAN_VAL);
-            continue;
-        }
-        if (HAILO_STREAM_INTERFACE_ETH == static_cast<OutputStreamBase&>(*output_stream.second).get_interface()) {
+        if (HAILO_STREAM_INTERFACE_ETH == output_stream.second->get_interface()) {
             continue;
         }
-        auto expected_queue_size = static_cast<OutputStreamBase&>(*output_stream.second).get_buffer_frames_size();
+        auto expected_queue_size = output_stream.second->get_buffer_frames_size();
         CHECK_EXPECTED_AS_STATUS(expected_queue_size);
-        TRACE(CreateCoreOpOutputStreamsTrace, "", name(), output_stream.first, (uint32_t)expected_queue_size.value());
     }
 
     if (m_multiplexer) {
@@ -211,109 +167,149 @@ hailo_status VDeviceCoreOp::create_vdevice_streams_from_config_params(std::share
 }
 
 hailo_status VDeviceCoreOp::create_input_vdevice_stream_from_config_params(const hailo_stream_parameters_t &stream_params,
-    const std::string &stream_name, std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_core_op_handle_t scheduler_handle)
+    const std::string &stream_name)
 {
     auto edge_layer = get_layer_info(stream_name);
     CHECK_EXPECTED_AS_STATUS(edge_layer);
 
-    if (HailoRTCommon::is_vdma_stream_interface(stream_params.stream_interface)) {
-        std::map<device_id_t, std::reference_wrapper<VdmaInputStreamBase>> low_level_streams;
-        for (const auto &pair : m_core_ops) {
-            auto &device_id = pair.first;
-            auto &core_op_vector = pair.second;
-            for (auto &core_op : core_op_vector) {
-                auto stream = core_op->get_input_stream_by_name(stream_name);
-                CHECK(stream, HAILO_INTERNAL_FAILURE);
-                low_level_streams.emplace(device_id, dynamic_cast<VdmaInputStreamBase&>(stream.release().get()));
-            }
-        }
-        auto input_stream = VDeviceInputStreamBase::create(std::move(low_level_streams), stream_params, 
-            edge_layer.value(), scheduler_handle, m_core_op_activated_event, m_core_ops_scheduler);
-        CHECK_EXPECTED_AS_STATUS(input_stream);
-
-        if (multiplexer) {
-            auto input_stream_wrapper = VDeviceInputStreamMultiplexerWrapper::create(input_stream.release(), edge_layer->network_name, multiplexer, scheduler_handle);
-            CHECK_EXPECTED_AS_STATUS(input_stream_wrapper);
-            m_input_streams.insert(make_pair(stream_name, input_stream_wrapper.release()));
+    std::map<device_id_t, std::reference_wrapper<InputStreamBase>> low_level_streams;
+    for (const auto &pair : m_core_ops) {
+        auto &device_id = pair.first;
+        auto &core_op = pair.second;
+        auto stream = core_op->get_input_stream_by_name(stream_name);
+        CHECK(stream, HAILO_INTERNAL_FAILURE);
+        TRACE(CreateCoreOpInputStreamsTrace, device_id, name(), stream_name, (uint32_t)stream->get().get_buffer_frames_size().value(),
+            core_op->vdevice_core_op_handle());
+        low_level_streams.emplace(device_id, stream.release());
+    }
+
+    std::shared_ptr<InputStreamBase> input_stream = nullptr;
+
+    if (m_core_ops_scheduler.lock()) {
+        auto scheduled_stream = ScheduledInputStream::create(std::move(low_level_streams),
+            edge_layer.value(), m_core_op_handle, m_core_ops_scheduler, m_core_op_activated_event);
+        CHECK_EXPECTED_AS_STATUS(scheduled_stream);
+
+        if (m_multiplexer) {
+            auto multiplexer_stream = VDeviceInputStreamMultiplexerWrapper::create(scheduled_stream.release(),
+                edge_layer->network_name, m_multiplexer);
+            CHECK_EXPECTED_AS_STATUS(multiplexer_stream);
+
+            input_stream = multiplexer_stream.release();
         } else {
-            m_input_streams.insert(make_pair(stream_name, input_stream.release()));
+            input_stream = scheduled_stream.release();
         }
 
     } else {
-        assert(1 == m_core_ops.size());
-        auto stream = (m_core_ops.begin()->second)[0]->get_input_stream_by_name(stream_name);
-        CHECK(stream, HAILO_INTERNAL_FAILURE);
-        assert(1 == m_core_ops.size());
-        assert(contains((m_core_ops.begin()->second)[0]->m_input_streams, stream_name));
-        m_input_streams.insert(make_pair(stream_name, m_core_ops.begin()->second[0]->m_input_streams.at(stream_name)));
+        auto max_batch_size = get_stream_batch_size(stream_name);
+        CHECK_EXPECTED_AS_STATUS(max_batch_size);
+
+        auto native_stream = VDeviceNativeInputStream::create(std::move(low_level_streams),
+            m_core_op_activated_event, edge_layer.value(), max_batch_size.release(), m_core_op_handle);
+        CHECK_EXPECTED_AS_STATUS(native_stream);
+
+        input_stream = native_stream.release();
     }
 
+    auto status = add_input_stream(std::move(input_stream), stream_params);
+    CHECK_SUCCESS(status);
+
     return HAILO_SUCCESS;
 }
 
 hailo_status VDeviceCoreOp::create_output_vdevice_stream_from_config_params(const hailo_stream_parameters_t &stream_params,
-    const std::string &stream_name, std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_core_op_handle_t scheduler_handle)
+    const std::string &stream_name)
 {
     auto edge_layer = get_layer_info(stream_name);
     CHECK_EXPECTED_AS_STATUS(edge_layer);
 
-    if (HailoRTCommon::is_vdma_stream_interface(stream_params.stream_interface)) {
-        std::map<device_id_t, std::reference_wrapper<VdmaOutputStreamBase>> low_level_streams;
-        for (const auto &pair : m_core_ops) {
-            auto &device_id = pair.first;
-            auto &core_op_vector = pair.second;
-            for (auto &core_op : core_op_vector) {
-                auto stream = core_op->get_output_stream_by_name(stream_name);
-                CHECK(stream, HAILO_INTERNAL_FAILURE);
-                low_level_streams.emplace(device_id, dynamic_cast<VdmaOutputStreamBase&>(stream.release().get()));
-            }
-        }
-        auto output_stream = VDeviceOutputStreamBase::create(std::move(low_level_streams), stream_params,
-            edge_layer.value(), scheduler_handle, m_core_op_activated_event, m_core_ops_scheduler);
-        CHECK_EXPECTED_AS_STATUS(output_stream);
-
-        if (multiplexer) {
-            // We allow multiplexer only on scheduled streams.
-            auto output_stream_wrapper = VDeviceOutputStreamMultiplexerWrapper::create(output_stream.release(), edge_layer->network_name, multiplexer, scheduler_handle);
-            CHECK_EXPECTED_AS_STATUS(output_stream_wrapper);
-            m_output_streams.insert(make_pair(stream_name, output_stream_wrapper.release()));
+    std::map<device_id_t, std::reference_wrapper<OutputStreamBase>> low_level_streams;
+    for (const auto &pair : m_core_ops) {
+        auto &device_id = pair.first;
+        auto &core_op = pair.second;
+        auto stream = core_op->get_output_stream_by_name(stream_name);
+        CHECK(stream, HAILO_INTERNAL_FAILURE);
+        TRACE(CreateCoreOpOutputStreamsTrace, device_id, name(), stream_name, (uint32_t)stream->get().get_buffer_frames_size().value(),
+            core_op->vdevice_core_op_handle());
+        low_level_streams.emplace(device_id, stream.release());
+    }
+
+    std::shared_ptr<OutputStreamBase> output_stream = nullptr;
+
+    if (m_core_ops_scheduler.lock()) {
+        auto scheduled_stream = ScheduledOutputStream::create(std::move(low_level_streams), m_core_op_handle,
+            edge_layer.value(), m_core_op_activated_event, m_core_ops_scheduler);
+        CHECK_EXPECTED_AS_STATUS(scheduled_stream);
+
+        if (m_multiplexer) {
+            auto multiplexer_stream = VDeviceOutputStreamMultiplexerWrapper::create(scheduled_stream.release(),
+                edge_layer->network_name, m_multiplexer);
+            CHECK_EXPECTED_AS_STATUS(multiplexer_stream);
+
+            output_stream = multiplexer_stream.release();
         } else {
-            m_output_streams.insert(make_pair(stream_name, output_stream.release()));
+            output_stream = scheduled_stream.release();
         }
+
     } else {
-        assert(1 == m_core_ops.size());
-        assert(contains((m_core_ops.begin()->second)[0]->m_output_streams, stream_name));
-        m_output_streams.insert(make_pair(stream_name, (m_core_ops.begin()->second)[0]->m_output_streams.at(stream_name)));
+        auto max_batch_size = get_stream_batch_size(stream_name);
+        CHECK_EXPECTED_AS_STATUS(max_batch_size);
+
+        auto native_stream = VDeviceNativeOutputStream::create(std::move(low_level_streams),
+            m_core_op_activated_event, edge_layer.value(), max_batch_size.release(), m_core_op_handle);
+        CHECK_EXPECTED_AS_STATUS(native_stream);
+
+        output_stream = native_stream.release();
     }
 
+    auto status = add_output_stream(std::move(output_stream), stream_params);
+    CHECK_SUCCESS(status);
+
     return HAILO_SUCCESS;
 }
 
 hailo_status VDeviceCoreOp::create_vdevice_streams_from_duplicate(std::shared_ptr<VDeviceCoreOp> other)
 {
-    // TODO - HRT-6931 - raise error on this case 
+    // TODO - HRT-6931 - raise error on this case
     if (((m_config_params.latency & HAILO_LATENCY_MEASURE) == HAILO_LATENCY_MEASURE) && (1 < m_core_ops.size())) {
         LOGGER__WARNING("Latency measurement is not supported on more than 1 physical device.");
     }
 
     assert(other->m_multiplexer != nullptr);
-    m_multiplexer = other->m_multiplexer;
     m_multiplexer_handle = other->multiplexer_duplicates_count() + 1;
 
-    for (auto &name_stream_pair : other->m_input_streams) {
-        auto input_stream = static_cast<VDeviceInputStreamMultiplexerWrapper*>(name_stream_pair.second.get());
-        auto copy = input_stream->clone(m_multiplexer_handle);
-        CHECK_EXPECTED_AS_STATUS(copy);
-
-        m_input_streams.insert(make_pair(name_stream_pair.first, copy.release()));
-    }
-
-    for (auto &name_stream_pair : other->m_output_streams) {
-        auto output_stream = static_cast<VDeviceOutputStreamMultiplexerWrapper*>(name_stream_pair.second.get());
-        auto copy = output_stream->clone(m_multiplexer_handle);
-        CHECK_EXPECTED_AS_STATUS(copy);
-
-        m_output_streams.insert(make_pair(name_stream_pair.first, copy.release()));
+    for (const auto &stream_parameters_pair : m_config_params.stream_params_by_name) {
+        switch (stream_parameters_pair.second.direction) {
+        case HAILO_H2D_STREAM:
+        {
+            auto other_stream = other->get_input_stream_by_name(stream_parameters_pair.first);
+            CHECK_EXPECTED_AS_STATUS(other_stream);
+            auto &other_stream_wrapper = dynamic_cast<VDeviceInputStreamMultiplexerWrapper&>(other_stream->get());
+
+            auto copy = other_stream_wrapper.clone(m_multiplexer_handle);
+            CHECK_EXPECTED_AS_STATUS(copy);
+
+            auto status = add_input_stream(copy.release(), stream_parameters_pair.second);
+            CHECK_SUCCESS(status);
+            break;
+        }
+        case HAILO_D2H_STREAM:
+        {
+            auto other_stream = other->get_output_stream_by_name(stream_parameters_pair.first);
+            CHECK_EXPECTED_AS_STATUS(other_stream);
+            auto &other_stream_wrapper = dynamic_cast<VDeviceOutputStreamMultiplexerWrapper&>(other_stream->get());
+
+            auto copy = other_stream_wrapper.clone(m_multiplexer_handle);
+            CHECK_EXPECTED_AS_STATUS(copy);
+
+            auto status = add_output_stream(copy.release(), stream_parameters_pair.second);
+            CHECK_SUCCESS(status);
+            break;
+        }
+        default:
+            LOGGER__ERROR("stream name {} direction is invalid.", stream_parameters_pair.first);
+            return HAILO_INVALID_ARGUMENT;
+        }
     }
 
     auto status = other->m_multiplexer->add_core_op_instance(m_multiplexer_handle, *this);
@@ -322,14 +318,9 @@ hailo_status VDeviceCoreOp::create_vdevice_streams_from_duplicate(std::shared_pt
     return HAILO_SUCCESS;
 }
 
-void VDeviceCoreOp::set_core_op_handle(scheduler_core_op_handle_t handle)
-{
-    m_scheduler_handle = handle;
-}
-
-scheduler_core_op_handle_t VDeviceCoreOp::core_op_handle() const
+vdevice_core_op_handle_t VDeviceCoreOp::core_op_handle() const
 {
-    return m_scheduler_handle;
+    return m_core_op_handle;
 }
 
 bool VDeviceCoreOp::is_scheduled() const
@@ -345,7 +336,7 @@ hailo_status VDeviceCoreOp::set_scheduler_timeout(const std::chrono::millisecond
     if (network_name != HailoRTDefaults::get_network_name(name())) {
         CHECK(network_name.empty(), HAILO_NOT_IMPLEMENTED, "Setting scheduler timeout for a specific network is currently not supported");
     }
-    auto status = core_ops_scheduler->set_timeout(m_scheduler_handle, timeout, network_name);
+    auto status = core_ops_scheduler->set_timeout(m_core_op_handle, timeout, network_name);
     CHECK_SUCCESS(status);
     return HAILO_SUCCESS;
 }
@@ -358,7 +349,7 @@ hailo_status VDeviceCoreOp::set_scheduler_threshold(uint32_t threshold, const st
     if (network_name != HailoRTDefaults::get_network_name(name())) {
         CHECK(network_name.empty(), HAILO_NOT_IMPLEMENTED, "Setting scheduler threshold for a specific network is currently not supported");
     }
-    auto status = core_ops_scheduler->set_threshold(m_scheduler_handle, threshold, network_name);
+    auto status = core_ops_scheduler->set_threshold(m_core_op_handle, threshold, network_name);
     CHECK_SUCCESS(status);
     return HAILO_SUCCESS;
 }
@@ -371,14 +362,14 @@ hailo_status VDeviceCoreOp::set_scheduler_priority(uint8_t priority, const std::
     if (network_name != HailoRTDefaults::get_network_name(name())) {
         CHECK(network_name.empty(), HAILO_NOT_IMPLEMENTED, "Setting scheduler priority for a specific network is currently not supported");
     }
-    auto status = core_ops_scheduler->set_priority(m_scheduler_handle, priority, network_name);
+    auto status = core_ops_scheduler->set_priority(m_core_op_handle, priority, network_name);
     CHECK_SUCCESS(status);
     return HAILO_SUCCESS;
 }
 
 Expected<std::shared_ptr<LatencyMetersMap>> VDeviceCoreOp::get_latency_meters()
 {
-    return m_core_ops.begin()->second[0]->get_latency_meters();
+    return m_core_ops.begin()->second->get_latency_meters();
 }
 
 Expected<vdma::BoundaryChannelPtr> VDeviceCoreOp::get_boundary_vdma_channel_by_stream_name(const std::string &stream_name)
@@ -386,7 +377,7 @@ Expected<vdma::BoundaryChannelPtr> VDeviceCoreOp::get_boundary_vdma_channel_by_s
     CHECK_AS_EXPECTED(1 == m_core_ops.size(), HAILO_INVALID_OPERATION,
         "get_boundary_vdma_channel_by_stream_name function is not supported on more than 1 physical device.");
 
-    return m_core_ops.begin()->second[0]->get_boundary_vdma_channel_by_stream_name(stream_name);
+    return m_core_ops.begin()->second->get_boundary_vdma_channel_by_stream_name(stream_name);
 }
 
 void VDeviceCoreOp::set_vstreams_multiplexer_callbacks(std::vector<OutputVStream> &output_vstreams)
@@ -407,42 +398,64 @@ void VDeviceCoreOp::set_vstreams_multiplexer_callbacks(std::vector<OutputVStream
     }
 }
 
-Expected<std::shared_ptr<VdmaConfigCoreOp>> VDeviceCoreOp::get_core_op_by_device_id(const device_id_t &device_id)
+hailo_status VDeviceCoreOp::activate_impl(uint16_t dynamic_batch_size)
 {
-    CHECK_AS_EXPECTED(m_core_ops.count(device_id), HAILO_INVALID_ARGUMENT);
-    auto core_op = std::dynamic_pointer_cast<VdmaConfigCoreOp>(m_core_ops[device_id][0]);
-    CHECK_NOT_NULL_AS_EXPECTED(core_op, HAILO_INTERNAL_FAILURE);
-    return core_op;
+    assert(!m_core_ops_scheduler.lock());
+
+    // Activate all physical device core ops.
+    for (const auto &pair : m_core_ops) {
+        auto &core_op = pair.second;
+        auto status = core_op->activate(dynamic_batch_size);
+        CHECK_SUCCESS(status);
+    }
+
+    // Activate low level streams
+    return activate_low_level_streams();
 }
 
-Expected<std::unique_ptr<ActivatedNetworkGroup>> VDeviceCoreOp::create_activated_network_group(
-    const hailo_activate_network_group_params_t &network_group_params, uint16_t dynamic_batch_size,
-    bool resume_pending_stream_transfers)
+hailo_status VDeviceCoreOp::deactivate_impl()
 {
-    auto start_time = std::chrono::steady_clock::now();
+    auto status = HAILO_SUCCESS; // Success oriented
 
-    CHECK_AS_EXPECTED(!m_core_ops_scheduler.lock(), HAILO_INVALID_OPERATION,
-        "Manually activating a core-op is not allowed when the core-op scheduler is active!");
+    auto deactivate_status = deactivate_low_level_streams();
+    if (HAILO_SUCCESS != deactivate_status) {
+        LOGGER__ERROR("Failed to deactivate low level streams with {}", deactivate_status);
+        status = deactivate_status; // continue on failure
+    }
 
-    auto res = VDeviceActivatedCoreOp::create(m_core_ops, m_input_streams, m_output_streams,
-        network_group_params, m_core_op_activated_event, dynamic_batch_size, m_deactivation_time_accumulator,
-        resume_pending_stream_transfers);
-    const auto elapsed_time_ms = std::chrono::duration<double, std::milli>(
-        std::chrono::steady_clock::now() - start_time).count();
-    CHECK_EXPECTED(res);
+    for (const auto &pair : m_core_ops) {
+        auto &core_op = pair.second;
+        deactivate_status = core_op->deactivate();
+        if (HAILO_SUCCESS != deactivate_status) {
+            LOGGER__ERROR("Failed to deactivate low level streams with {}", deactivate_status);
+            status = deactivate_status; // continue on failure
 
-    LOGGER__INFO("Activating {} on VDevice took {} milliseconds. Note that the function is asynchronous and"
-        " thus the network is not fully activated yet.", name(), elapsed_time_ms);
-    m_activation_time_accumulator->add_data_point(elapsed_time_ms);
+        }
+    }
 
-    return res;
+    return status;
+}
+
+Expected<std::shared_ptr<VdmaConfigCoreOp>> VDeviceCoreOp::get_core_op_by_device_id(const device_id_t &device_id)
+{
+    CHECK_AS_EXPECTED(m_core_ops.count(device_id), HAILO_INVALID_ARGUMENT);
+    auto core_op = std::dynamic_pointer_cast<VdmaConfigCoreOp>(m_core_ops[device_id]);
+    CHECK_NOT_NULL_AS_EXPECTED(core_op, HAILO_INTERNAL_FAILURE);
+    return core_op;
 }
 
 Expected<HwInferResults> VDeviceCoreOp::run_hw_infer_estimator()
 {
     CHECK_AS_EXPECTED(1 == m_core_ops.size(), HAILO_INVALID_OPERATION,
         "run_hw_infer_estimator function is not supported on more than 1 physical device.");
-    return m_core_ops.begin()->second[0]->run_hw_infer_estimator();
+    return m_core_ops.begin()->second->run_hw_infer_estimator();
+}
+
+Expected<Buffer> VDeviceCoreOp::get_intermediate_buffer(const IntermediateBufferKey &key)
+{
+    CHECK_AS_EXPECTED(1 == m_core_ops.size(), HAILO_INVALID_OPERATION,
+        "get_intermediate_buffer function is not supported on more than 1 physical device.");
+    return m_core_ops.begin()->second->get_intermediate_buffer(key);
 }
 
 } /* namespace hailort */
index e93c5e8688b1f003f24fcc268a59a50a6edb8100..f0b1dd6e85034e50d343e4268a10d6109c0444d9 100644 (file)
@@ -18,6 +18,7 @@
 
 #include "vdevice/scheduler/scheduler.hpp"
 #include "vdevice/pipeline_multiplexer.hpp"
+#include "utils/profiler/tracer_macros.hpp"
 
 #include <cstdint>
 
 namespace hailort
 {
 
-class VDeviceActivatedCoreOp : public ActivatedCoreOp
-{
-public:
-    static Expected<std::unique_ptr<ActivatedNetworkGroup>> create(std::map<device_id_t, std::vector<std::shared_ptr<CoreOp>>> &core_ops,
-        std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
-        std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
-        const hailo_activate_network_group_params_t &network_group_params, EventPtr core_op_activated_event,
-        uint16_t dynamic_batch_size, AccumulatorPtr deactivation_time_accumulator,
-        bool resume_pending_stream_transfers);
-
-    virtual ~VDeviceActivatedCoreOp()
-    {
-        if (!m_should_reset_core_op) {
-            return;
-        }
-        const auto start_time = std::chrono::steady_clock::now();
-
-        m_core_op_activated_event->reset();
-        m_activated_network_groups.clear();
-
-        const auto elapsed_time_ms = std::chrono::duration<double, std::milli>(
-            std::chrono::steady_clock::now() - start_time).count();
-        LOGGER__INFO("Deactivating took {} ms", elapsed_time_ms);
-        m_deactivation_time_accumulator->add_data_point(elapsed_time_ms);
-    }
-
-    VDeviceActivatedCoreOp(const VDeviceActivatedCoreOp &other) = delete;
-    VDeviceActivatedCoreOp &operator=(const VDeviceActivatedCoreOp &other) = delete;
-    VDeviceActivatedCoreOp &operator=(VDeviceActivatedCoreOp &&other) = delete;
-    VDeviceActivatedCoreOp(VDeviceActivatedCoreOp &&other) noexcept;
-
-    virtual const std::string &get_network_group_name() const override;
-    virtual Expected<Buffer> get_intermediate_buffer(const IntermediateBufferKey &key) override;
-    virtual hailo_status set_keep_nn_config_during_reset(const bool keep_nn_config_during_reset) override;
-
-private:
-    VDeviceActivatedCoreOp(
-        std::vector<std::unique_ptr<ActivatedNetworkGroup>> &&activated_network_groups,
-        std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
-        std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
-        const hailo_activate_network_group_params_t &network_group_params, EventPtr core_op_activated_event,
-        AccumulatorPtr deactivation_time_accumulator, hailo_status &status);
-
-    std::vector<std::unique_ptr<ActivatedNetworkGroup>> m_activated_network_groups;
-    bool m_should_reset_core_op;
-    AccumulatorPtr m_deactivation_time_accumulator;
-};
 
 class VDeviceCoreOp : public CoreOp
 {
 public:
-    static Expected<std::shared_ptr<VDeviceCoreOp>> create(const std::map<device_id_t, std::vector<std::shared_ptr<CoreOp>>> &core_ops,
-        CoreOpsSchedulerWeakPtr core_ops_scheduler, const std::string &hef_hash);
+    static Expected<std::shared_ptr<VDeviceCoreOp>> create(
+        ActiveCoreOpHolder &active_core_op_holder,
+        const ConfigureNetworkParams &configure_params,
+        const std::map<device_id_t, std::shared_ptr<CoreOp>> &core_ops,
+        CoreOpsSchedulerWeakPtr core_ops_scheduler, vdevice_core_op_handle_t core_op_handle,
+        std::shared_ptr<PipelineMultiplexer> multiplexer, const std::string &hef_hash);
 
-    static Expected<std::shared_ptr<VDeviceCoreOp>> duplicate(std::shared_ptr<VDeviceCoreOp> other);
+    static Expected<std::shared_ptr<VDeviceCoreOp>> duplicate(std::shared_ptr<VDeviceCoreOp> other,
+        const ConfigureNetworkParams &configure_params);
 
     virtual ~VDeviceCoreOp() = default;
     VDeviceCoreOp(const VDeviceCoreOp &other) = delete;
@@ -87,17 +46,6 @@ public:
     VDeviceCoreOp &operator=(VDeviceCoreOp &&other) = delete;
     VDeviceCoreOp(VDeviceCoreOp &&other) = default;
 
-    hailo_status create_vdevice_streams_from_config_params(std::shared_ptr<PipelineMultiplexer> multiplexer,
-        scheduler_core_op_handle_t scheduler_handle);
-    hailo_status create_input_vdevice_stream_from_config_params(
-        const hailo_stream_parameters_t &stream_params, const std::string &stream_name,
-        std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_core_op_handle_t scheduler_handle);
-    hailo_status create_output_vdevice_stream_from_config_params(
-        const hailo_stream_parameters_t &stream_params, const std::string &stream_name,
-        std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_core_op_handle_t scheduler_handle);
-
-    hailo_status create_vdevice_streams_from_duplicate(std::shared_ptr<VDeviceCoreOp> other);
-
     bool equals(const Hef &hef, const std::pair<const std::string, ConfigureNetworkParams> &params_pair)
     {
         if ((params_pair.first == name()) && (hef.hash() == m_hef_hash)) {
@@ -133,8 +81,7 @@ public:
     virtual Expected<vdma::BoundaryChannelPtr> get_boundary_vdma_channel_by_stream_name(
         const std::string &stream_name) override;
 
-    void set_core_op_handle(scheduler_core_op_handle_t handle);
-    scheduler_core_op_handle_t core_op_handle() const;
+    vdevice_core_op_handle_t core_op_handle() const;
     virtual bool is_scheduled() const override;
     virtual hailo_status set_scheduler_timeout(const std::chrono::milliseconds &timeout, const std::string &network_name) override;
     virtual hailo_status set_scheduler_threshold(uint32_t threshold, const std::string &network_name) override;
@@ -150,33 +97,35 @@ public:
         return m_core_op_activated_event->wait(timeout);
     }
 
-    virtual hailo_status activate_impl(uint16_t /*dynamic_batch_size*/, bool /* resume_pending_stream_transfers */) override
-    {
-        return HAILO_INTERNAL_FAILURE;
-    }
-
-    virtual hailo_status deactivate_impl(bool /* keep_nn_config_during_reset */) override
-    {
-        return HAILO_INTERNAL_FAILURE;
-    }
-
-    virtual Expected<std::unique_ptr<ActivatedNetworkGroup>> create_activated_network_group(
-        const hailo_activate_network_group_params_t &network_group_params, uint16_t dynamic_batch_size,
-        bool resume_pending_stream_transfers) override;
+    virtual hailo_status activate_impl(uint16_t dynamic_batch_size) override;
+    virtual hailo_status deactivate_impl() override;
 
     Expected<std::shared_ptr<VdmaConfigCoreOp>> get_core_op_by_device_id(const device_id_t &device_bdf_id);
 
     virtual Expected<HwInferResults> run_hw_infer_estimator() override;
+    virtual Expected<Buffer> get_intermediate_buffer(const IntermediateBufferKey &) override;
 
 private:
-    VDeviceCoreOp(const std::map<device_id_t, std::vector<std::shared_ptr<CoreOp>>> &core_ops, CoreOpsSchedulerWeakPtr core_ops_scheduler,
+    VDeviceCoreOp(ActiveCoreOpHolder &active_core_op_holder,
+        const ConfigureNetworkParams &configure_params,
+        const std::map<device_id_t, std::shared_ptr<CoreOp>> &core_ops,
+        CoreOpsSchedulerWeakPtr core_ops_scheduler, scheduler_core_op_handle_t core_op_handle,
+        std::shared_ptr<PipelineMultiplexer> multiplexer, // TODO: multiplexer handle
         const std::string &hef_hash, hailo_status &status);
 
-    std::map<device_id_t, std::vector<std::shared_ptr<CoreOp>>> m_core_ops;
+    hailo_status create_vdevice_streams_from_config_params();
+    hailo_status create_input_vdevice_stream_from_config_params(
+        const hailo_stream_parameters_t &stream_params, const std::string &stream_name);
+    hailo_status create_output_vdevice_stream_from_config_params(
+        const hailo_stream_parameters_t &stream_params, const std::string &stream_name);
+
+    hailo_status create_vdevice_streams_from_duplicate(std::shared_ptr<VDeviceCoreOp> other);
+
+    std::map<device_id_t, std::shared_ptr<CoreOp>> m_core_ops;
     CoreOpsSchedulerWeakPtr m_core_ops_scheduler;
-    scheduler_core_op_handle_t m_scheduler_handle;
-    multiplexer_core_op_handle_t m_multiplexer_handle;
+    const vdevice_core_op_handle_t m_core_op_handle;
     std::shared_ptr<PipelineMultiplexer> m_multiplexer;
+    multiplexer_core_op_handle_t m_multiplexer_handle;
     std::string m_hef_hash;
 };
 
index a2ba6985ca9f0c6e4caad8084dc61f00d9560cd5..6a059dd108a939ed365c37e72b330b978e74b87c 100644 (file)
@@ -46,7 +46,7 @@ public:
     VDeviceBase(const VDeviceBase &) = delete;
     VDeviceBase &operator=(VDeviceBase &&) = delete;
     VDeviceBase &operator=(const VDeviceBase &) = delete;
-    virtual ~VDeviceBase() = default;
+    virtual ~VDeviceBase();
 
     virtual Expected<ConfiguredNetworkGroupVector> configure(Hef &hef,
         const NetworkGroupsParamsMap &configure_params={}) override;
@@ -78,6 +78,8 @@ public:
         return m_core_ops_scheduler;
     }
 
+    virtual Expected<InferModel> create_infer_model(const std::string &hef_path) override;
+
     // Currently only homogeneous vDevice is allow (= all devices are from the same type)
     virtual Expected<hailo_stream_interface_t> get_default_streams_interface() const override;
 
@@ -85,7 +87,7 @@ public:
 
 private:
     VDeviceBase(std::map<device_id_t, std::unique_ptr<Device>> &&devices, CoreOpsSchedulerPtr core_ops_scheduler) :
-        m_devices(std::move(devices)), m_core_ops_scheduler(core_ops_scheduler)
+        m_devices(std::move(devices)), m_core_ops_scheduler(core_ops_scheduler), m_next_core_op_handle(0)
         {}
 
     static Expected<std::map<device_id_t, std::unique_ptr<Device>>> create_devices(const hailo_vdevice_params_t &params);
@@ -94,12 +96,14 @@ private:
     Expected<std::shared_ptr<VDeviceCoreOp>> create_vdevice_network_group(Hef &hef,
         const std::pair<const std::string, ConfigureNetworkParams> &params, bool use_multiplexer);
     bool should_use_multiplexer(const ConfigureNetworkParams &params);
+    vdevice_core_op_handle_t allocate_core_op_handle();
 
     std::map<device_id_t, std::unique_ptr<Device>> m_devices;
     CoreOpsSchedulerPtr m_core_ops_scheduler;
     std::vector<std::shared_ptr<VDeviceCoreOp>> m_vdevice_core_ops;
     std::vector<std::shared_ptr<ConfiguredNetworkGroup>> m_network_groups; // TODO: HRT-9547 - Remove when ConfiguredNetworkGroup will be kept in global context
-
+    ActiveCoreOpHolder m_active_core_op_holder;
+    vdevice_core_op_handle_t m_next_core_op_handle;
     std::mutex m_mutex;
 };
 
@@ -122,18 +126,19 @@ public:
 
     Expected<std::vector<std::string>> get_physical_devices_ids() const override;
     Expected<hailo_stream_interface_t> get_default_streams_interface() const override;
+    virtual Expected<InferModel> create_infer_model(const std::string &hef_path) override;
 
     virtual hailo_status before_fork() override;
     virtual hailo_status after_fork_in_parent() override;
     virtual hailo_status after_fork_in_child() override;
 
 private:
-    VDeviceClient(std::unique_ptr<HailoRtRpcClient> client, uint32_t handle, std::vector<std::unique_ptr<hailort::Device>> &&devices);
+    VDeviceClient(std::unique_ptr<HailoRtRpcClient> client, VDeviceIdentifier &&identifier, std::vector<std::unique_ptr<hailort::Device>> &&devices);
 
     hailo_status create_client();
 
     std::unique_ptr<HailoRtRpcClient> m_client;
-    uint32_t m_handle;
+    VDeviceIdentifier m_identifier;
     std::vector<std::unique_ptr<Device>> m_devices;
     std::vector<std::shared_ptr<ConfiguredNetworkGroup>> m_network_groups;
 };
@@ -157,6 +162,7 @@ public:
     Expected<std::vector<std::reference_wrapper<Device>>> get_physical_devices() const override;
     Expected<std::vector<std::string>> get_physical_devices_ids() const override;
     Expected<hailo_stream_interface_t> get_default_streams_interface() const override;
+    Expected<InferModel> create_infer_model(const std::string &hef_path) override;
 
 private:
     VDeviceHandle(uint32_t handle);
index 2022c1848fee9eb4040bed9aaffc42234fa0dc7f..05b00b9ac0b3ef0b146e54d3d9fa48cf76f189d3 100644 (file)
 
 namespace hailort {
 
+
 /** Input stream **/
-hailo_status VDeviceNativeInputStreamBase::abort()
+Expected<std::unique_ptr<VDeviceNativeInputStream>> VDeviceNativeInputStream::create(
+    std::map<device_id_t, std::reference_wrapper<InputStreamBase>> &&streams,
+    EventPtr core_op_activated_event,
+    const LayerInfo &layer_info,
+    uint16_t batch_size,
+    vdevice_core_op_handle_t core_op_handle)
+{
+    std::unique_ptr<CallbackReorderQueue> reorder_queue = nullptr;
+    if (auto max_queue_size_per_stream = streams.begin()->second.get().get_buffer_frames_size()) {
+        const auto max_queue_size = max_queue_size_per_stream.value() * streams.size();
+        reorder_queue = make_unique_nothrow<CallbackReorderQueue>(max_queue_size);
+        CHECK_NOT_NULL_AS_EXPECTED(reorder_queue, HAILO_OUT_OF_HOST_MEMORY);
+    }
+
+    auto status = HAILO_UNINITIALIZED;
+    auto stream = make_unique_nothrow<VDeviceNativeInputStream>(std::move(streams),
+        std::move(core_op_activated_event), layer_info, batch_size, core_op_handle, std::move(reorder_queue), status);
+    CHECK_AS_EXPECTED((nullptr != stream), HAILO_OUT_OF_HOST_MEMORY);
+    CHECK_SUCCESS_AS_EXPECTED(status);
+    return stream;
+}
+
+hailo_status VDeviceNativeInputStream::set_buffer_mode(StreamBufferMode buffer_mode)
+{
+    // The buffer is not owned by this class so we just forward the mode to base streams
+    for (const auto &pair : m_streams) {
+        auto &stream = pair.second.get();
+        auto status = stream.set_buffer_mode(buffer_mode);
+        CHECK_SUCCESS(status);
+    }
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status VDeviceNativeInputStream::activate_stream()
+{
+    // m_streams should be activate when the specific core op is activated.
+    return HAILO_SUCCESS;
+}
+
+hailo_status VDeviceNativeInputStream::deactivate_stream()
+{
+    // m_streams should be deactivated when the specific core op is activated.
+    return HAILO_SUCCESS;
+}
+
+hailo_status VDeviceNativeInputStream::abort()
 {
     auto status = HAILO_SUCCESS; // Best effort
     for (auto &pair: m_streams){
+        const auto &device_id = pair.first;
         auto &stream = pair.second;
         auto abort_status = stream.get().abort();
         if (HAILO_SUCCESS != status) {
-            LOGGER__ERROR("Failed to abort input stream. (status: {} device: {})", status, stream.get().get_dev_id());
+            LOGGER__ERROR("Failed to abort input stream. (status: {} device: {})", status, device_id);
             status = abort_status;
         }
     }
     return status;
 }
 
-hailo_status VDeviceNativeInputStreamBase::clear_abort()
+hailo_status VDeviceNativeInputStream::clear_abort()
 {
     auto status = HAILO_SUCCESS; // Best effort
     for (auto &pair: m_streams){
+        const auto &device_id = pair.first;
         auto &stream = pair.second;
         auto clear_abort_status = stream.get().clear_abort();
         if ((HAILO_SUCCESS != clear_abort_status) && (HAILO_STREAM_NOT_ACTIVATED != clear_abort_status)) {
-            LOGGER__ERROR("Failed to clear abort input stream. (status: {} device: {})", clear_abort_status, stream.get().get_dev_id());
+            LOGGER__ERROR("Failed to clear abort input stream. (status: {} device: {})", clear_abort_status, device_id);
             status = clear_abort_status;
         }
     }
@@ -42,82 +91,117 @@ hailo_status VDeviceNativeInputStreamBase::clear_abort()
     return status;
 }
 
-Expected<std::unique_ptr<VDeviceNativeInputStream>> VDeviceNativeInputStream::create(
-    std::map<device_id_t, std::reference_wrapper<VdmaInputStreamBase>> &&streams,
-    EventPtr &&core_op_activated_event,
-    const LayerInfo &layer_info)
+std::chrono::milliseconds VDeviceNativeInputStream::get_timeout() const
 {
-    auto status = HAILO_UNINITIALIZED;
-    auto stream = make_unique_nothrow<VDeviceNativeInputStream>(std::move(streams),
-        std::move(core_op_activated_event), layer_info, status);
-    CHECK_AS_EXPECTED((nullptr != stream), HAILO_OUT_OF_HOST_MEMORY);
-    CHECK_SUCCESS_AS_EXPECTED(status);
-    return stream;
+    // All timeout values of m_streams should be the same
+    return m_streams.begin()->second.get().get_timeout();
 }
 
-hailo_status VDeviceNativeInputStream::write_impl(const MemoryView &buffer, const std::function<bool()> &should_cancel)
+hailo_status VDeviceNativeInputStream::set_timeout(std::chrono::milliseconds timeout)
 {
-    if (should_cancel()) {
-        return HAILO_STREAM_ABORTED_BY_USER;
-    }
-    auto status = m_streams.at(m_next_transfer_stream).get().write_impl(buffer);
-    if (HAILO_SUCCESS != status) {
-        LOGGER__INFO("Write to stream has failed! status = {}", status);
-        return status;
+    for (const auto &pair : m_streams) {
+        const auto &device_id = pair.first;
+        auto &stream = pair.second;
+        auto status = stream.get().set_timeout(timeout);
+        CHECK_SUCCESS(status, "Failed to set timeout to input stream. (device: {})", device_id);
     }
+    return HAILO_SUCCESS;
+}
 
-    // Update m_next_transfer_stream only if 'batch' frames has been transferred
-    if (0 == (++m_acc_frames % m_streams.begin()->second.get().get_dynamic_batch_size())) {
-        auto it = m_streams.upper_bound(m_next_transfer_stream);
-        if (m_streams.end() == it) {
-            it = m_streams.begin();
+hailo_stream_interface_t VDeviceNativeInputStream::get_interface() const
+{
+    // All interface values of m_streams should be the same
+    return m_streams.begin()->second.get().get_interface();
+}
+
+Expected<size_t> VDeviceNativeInputStream::get_buffer_frames_size() const
+{
+    // All get_buffer_frames_size values of m_streams should be the same
+    return m_streams.begin()->second.get().get_buffer_frames_size();
+}
+
+hailo_status VDeviceNativeInputStream::flush()
+{
+    auto status = HAILO_SUCCESS; // Best effort
+    for (const auto &pair : m_streams) {
+        const auto &device_id = pair.first;
+        auto &stream = pair.second;
+        auto flush_status = stream.get().flush();
+        if (HAILO_SUCCESS != status) {
+            LOGGER__ERROR("Failed to flush input stream. (status: {} device: {})", status, device_id);
+            status = flush_status;
         }
-        m_next_transfer_stream = it->first;
-        m_acc_frames = 0;
     }
-    return HAILO_SUCCESS;
+    return status;
 }
 
-Expected<std::unique_ptr<VDeviceNativeAsyncInputStream>> VDeviceNativeAsyncInputStream::create(
-    std::map<device_id_t, std::reference_wrapper<VdmaInputStreamBase>> &&streams,
-    EventPtr &&core_op_activated_event,
-    const LayerInfo &layer_info)
+hailo_status VDeviceNativeInputStream::write_impl(const MemoryView &buffer)
 {
-    auto max_queue_size_per_stream = streams.begin()->second.get().get_buffer_frames_size();
-    CHECK_EXPECTED(max_queue_size_per_stream);
-    const auto max_queue_size = max_queue_size_per_stream.value() * streams.size();
+    TRACE(WriteFrameTrace, m_core_op_handle, name());
 
-    auto status = HAILO_UNINITIALIZED;
-    auto stream = make_unique_nothrow<VDeviceNativeAsyncInputStream>(std::move(streams),
-        std::move(core_op_activated_event), layer_info, max_queue_size, status);
-    CHECK_AS_EXPECTED((nullptr != stream), HAILO_OUT_OF_HOST_MEMORY);
-    CHECK_SUCCESS_AS_EXPECTED(status);
-    return stream;
+    auto status = next_stream().write_impl(buffer);
+    if ((HAILO_STREAM_ABORTED_BY_USER == status) || (HAILO_STREAM_NOT_ACTIVATED == status)){
+        LOGGER__INFO("Failed write to stream {} (device: {}) with status={}", name(), m_next_transfer_stream, status);
+        return status;
+    }
+    CHECK_SUCCESS(status, "Failed write to stream (device: {})", m_next_transfer_stream);
+
+    advance_stream();
+    return HAILO_SUCCESS;
 }
 
-hailo_status VDeviceNativeAsyncInputStream::wait_for_async_ready(size_t transfer_size, std::chrono::milliseconds timeout)
+hailo_status VDeviceNativeInputStream::wait_for_async_ready(size_t transfer_size, std::chrono::milliseconds timeout)
 {
     return m_streams.at(m_next_transfer_stream).get().wait_for_async_ready(transfer_size, timeout);
 }
 
-Expected<size_t> VDeviceNativeAsyncInputStream::get_async_max_queue_size() const
+Expected<size_t> VDeviceNativeInputStream::get_async_max_queue_size() const
 {
-    return Expected<size_t>(m_max_queue_size);
+    // The actual max queue size can't be just max_queue_per_stream * m_streams.size(),
+    // since we transfer an entire batch for each device at a time (so even if we have place
+    // to transfer in other streams, we first finishes the batch).
+    // To overcome this problem, we check how many "batches" we can transfer at a time (batch_count_queued)
+    // and make sure the queue for each stream contains a specific batch. We can potentaily transfer
+    // the resuide of the batch from last device, but then we will have problems with non-batch aligned
+    // transfers.
+    auto &first_stream = m_streams.begin()->second.get();
+    const auto max_queue_per_stream = first_stream.get_async_max_queue_size();
+    CHECK_EXPECTED(max_queue_per_stream);
+
+    assert(*max_queue_per_stream >= m_batch_size);
+
+    const auto batch_count_queued = *max_queue_per_stream / m_batch_size;
+    const auto actual_queue_per_stream = m_batch_size * batch_count_queued;
+
+    return actual_queue_per_stream * m_streams.size();
 }
 
-hailo_status VDeviceNativeAsyncInputStream::write_async(TransferRequest &&transfer_request)
+hailo_status VDeviceNativeInputStream::write_async(TransferRequest &&transfer_request)
 {
     // TODO HRT-10583 - allow option to remove reorder queue
-    transfer_request.callback = m_callback_reorder_queue.wrap_callback(transfer_request.callback);
+    CHECK(m_callback_reorder_queue, HAILO_INVALID_OPERATION, "Stream does not support async api");
+    transfer_request.callback = m_callback_reorder_queue->wrap_callback(transfer_request.callback);
 
-    auto status = m_streams.at(m_next_transfer_stream).get().write_async(std::move(transfer_request));
+    TRACE(WriteFrameTrace, m_core_op_handle, name());
+
+    auto status = next_stream().write_async(std::move(transfer_request));
     if (HAILO_SUCCESS != status) {
-        m_callback_reorder_queue.cancel_last_callback();
+        m_callback_reorder_queue->cancel_last_callback();
         return status;
     }
 
-    // Update m_next_transfer_stream_index only if 'batch' frames has been transferred
-    if (0 == (++m_acc_frames % m_streams.begin()->second.get().get_dynamic_batch_size())) {
+    advance_stream();
+    return HAILO_SUCCESS;
+}
+
+InputStreamBase &VDeviceNativeInputStream::next_stream()
+{
+    return m_streams.at(m_next_transfer_stream).get();
+}
+
+void VDeviceNativeInputStream::advance_stream()
+{
+    if (0 == (++m_acc_frames % m_batch_size)) {
         auto it = m_streams.upper_bound(m_next_transfer_stream);
         if (m_streams.end() == it) {
             it = m_streams.begin();
@@ -125,24 +209,64 @@ hailo_status VDeviceNativeAsyncInputStream::write_async(TransferRequest &&transf
         m_next_transfer_stream = it->first;
         m_acc_frames = 0;
     }
+}
+
+/** Output stream **/
+Expected<std::unique_ptr<VDeviceNativeOutputStream>> VDeviceNativeOutputStream::create(
+    std::map<device_id_t, std::reference_wrapper<OutputStreamBase>> &&streams,
+    EventPtr core_op_activated_event,
+    const LayerInfo &layer_info,
+    uint16_t batch_size,
+    vdevice_core_op_handle_t core_op_handle)
+{
+    std::unique_ptr<CallbackReorderQueue> reorder_queue = nullptr;
+    if (auto max_queue_size_per_stream = streams.begin()->second.get().get_buffer_frames_size()) {
+        const auto max_queue_size = max_queue_size_per_stream.value() * streams.size();
+        reorder_queue = make_unique_nothrow<CallbackReorderQueue>(max_queue_size);
+        CHECK_NOT_NULL_AS_EXPECTED(reorder_queue, HAILO_OUT_OF_HOST_MEMORY);
+    }
+
+    auto status = HAILO_UNINITIALIZED;
+    auto stream = make_unique_nothrow<VDeviceNativeOutputStream>(std::move(streams),
+        std::move(core_op_activated_event), layer_info, batch_size, core_op_handle, std::move(reorder_queue), status);
+    CHECK_AS_EXPECTED((nullptr != stream), HAILO_OUT_OF_HOST_MEMORY);
+    CHECK_SUCCESS_AS_EXPECTED(status);
+    return stream;
+}
+
+hailo_status VDeviceNativeOutputStream::set_buffer_mode(StreamBufferMode buffer_mode)
+{
+    // The buffer is not owned by this class so we just forward the mode to base streams
+    for (const auto &pair : m_streams) {
+        auto &stream = pair.second.get();
+        auto status = stream.set_buffer_mode(buffer_mode);
+        CHECK_SUCCESS(status);
+    }
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status VDeviceNativeOutputStream::activate_stream()
+{
+    // m_streams should be activate when the specific core op is activated.
     return HAILO_SUCCESS;
 }
 
-hailo_status VDeviceNativeAsyncInputStream::write_impl(const MemoryView &, const std::function<bool()> &)
+hailo_status VDeviceNativeOutputStream::deactivate_stream()
 {
-    LOGGER__ERROR("Sync write is not supported by async streams");
-    return HAILO_INVALID_OPERATION;
+    // m_streams should be deactivated when the specific core op is activated.
+    return HAILO_SUCCESS;
 }
 
-/** Output stream **/
-hailo_status VDeviceNativeOutputStreamBase::abort()
+hailo_status VDeviceNativeOutputStream::abort()
 {
     auto status = HAILO_SUCCESS; // Best effort
     for (const auto &pair : m_streams) {
+        const auto &device_id = pair.first;
         auto &stream = pair.second;
         auto abort_status = stream.get().abort();
         if (HAILO_SUCCESS != status) {
-            LOGGER__ERROR("Failed to abort output stream. (status: {} device: {})", status, stream.get().get_dev_id());
+            LOGGER__ERROR("Failed to abort output stream. (status: {} device: {})", status, device_id);
             status = abort_status;
         }
     }
@@ -150,14 +274,15 @@ hailo_status VDeviceNativeOutputStreamBase::abort()
     return status;
 }
 
-hailo_status VDeviceNativeOutputStreamBase::clear_abort()
+hailo_status VDeviceNativeOutputStream::clear_abort()
 {
     auto status = HAILO_SUCCESS; // Best effort
     for (const auto &pair : m_streams) {
+        const auto &device_id = pair.first;
         auto &stream = pair.second;
         auto clear_abort_status = stream.get().clear_abort();
         if ((HAILO_SUCCESS != clear_abort_status) && (HAILO_STREAM_NOT_ACTIVATED != clear_abort_status)) {
-            LOGGER__ERROR("Failed to clear abort output stream. (status: {} device: {})", clear_abort_status, stream.get().get_dev_id());
+            LOGGER__ERROR("Failed to clear abort output stream. (status: {} device: {})", clear_abort_status, device_id);
             status = clear_abort_status;
         }
     }
@@ -165,80 +290,111 @@ hailo_status VDeviceNativeOutputStreamBase::clear_abort()
     return status;
 }
 
-Expected<std::unique_ptr<VDeviceNativeOutputStream>> VDeviceNativeOutputStream::create(
-    std::map<device_id_t, std::reference_wrapper<VdmaOutputStreamBase>> &&streams,
-    EventPtr &&core_op_activated_event,
-    const LayerInfo &layer_info)
+std::chrono::milliseconds VDeviceNativeOutputStream::get_timeout() const
 {
-    auto status = HAILO_UNINITIALIZED;
-    auto stream = make_unique_nothrow<VDeviceNativeOutputStream>(std::move(streams),
-        std::move(core_op_activated_event), layer_info, status);
-    CHECK_AS_EXPECTED((nullptr != stream), HAILO_OUT_OF_HOST_MEMORY);
-    CHECK_SUCCESS_AS_EXPECTED(status);
-    return stream;
+    // All timeout values of m_streams should be the same
+    return m_streams.begin()->second.get().get_timeout();
 }
 
-hailo_status VDeviceNativeOutputStream::read(MemoryView buffer)
+hailo_status VDeviceNativeOutputStream::set_timeout(std::chrono::milliseconds timeout)
 {
-    auto status = m_streams.at(m_next_transfer_stream).get().read(buffer);
-    if (HAILO_STREAM_ABORTED_BY_USER == status) {
-      // In case of aborted by user, don't show it as infer error
-      LOGGER__INFO("Stream aborted by user (device: {})", m_streams.at(m_next_transfer_stream).get().get_dev_id());
-      return status;
+    for (const auto &pair : m_streams) {
+        const auto &device_id = pair.first;
+        auto &stream = pair.second;
+        auto status = stream.get().set_timeout(timeout);
+        CHECK_SUCCESS(status, "Failed to set timeout to output stream. (device: {})", device_id);
     }
-    CHECK_SUCCESS(status, "Read from stream has failed! status = {}", status);
+    return HAILO_SUCCESS;
+}
 
-    // Update m_next_transfer_stream_index only if 'batch' frames has been transferred
-    if (0 == (++m_acc_frames % m_streams.begin()->second.get().get_dynamic_batch_size())) {
-        auto it = m_streams.upper_bound(m_next_transfer_stream);
-        if (m_streams.end() == it) {
-            it = m_streams.begin();
-        }
-        m_next_transfer_stream = it->first;
-        m_acc_frames = 0;
-    }
+hailo_stream_interface_t VDeviceNativeOutputStream::get_interface() const
+{
+    // All interface values of m_streams should be the same
+    return m_streams.begin()->second.get().get_interface();
+}
 
-    return HAILO_SUCCESS;
+Expected<size_t> VDeviceNativeOutputStream::get_buffer_frames_size() const
+{
+    // All get_buffer_frames_size values of m_streams should be the same
+    return m_streams.begin()->second.get().get_buffer_frames_size();
 }
 
-Expected<std::unique_ptr<VDeviceNativeAsyncOutputStream>> VDeviceNativeAsyncOutputStream::create(
-    std::map<device_id_t, std::reference_wrapper<VdmaOutputStreamBase>> &&streams,
-    EventPtr &&core_op_activated_event,
-    const LayerInfo &layer_info)
+hailo_status VDeviceNativeOutputStream::read_impl(MemoryView buffer)
 {
-    auto max_queue_size_per_stream = streams.begin()->second.get().get_buffer_frames_size();
-    CHECK_EXPECTED(max_queue_size_per_stream);
-    const auto max_queue_size = max_queue_size_per_stream.value() * streams.size();
+    auto status = next_stream().read_impl(buffer);
+    if ((HAILO_STREAM_ABORTED_BY_USER == status) || (HAILO_STREAM_NOT_ACTIVATED == status)){
+      LOGGER__INFO("Failed read from stream {} (device: {})", status, m_next_transfer_stream);
+      return status;
+    }
+    CHECK_SUCCESS(status, "Failed read from stream (device: {})", m_next_transfer_stream);
 
-    auto status = HAILO_UNINITIALIZED;
-    auto stream = make_unique_nothrow<VDeviceNativeAsyncOutputStream>(std::move(streams),
-        std::move(core_op_activated_event), layer_info, max_queue_size, status);
-    CHECK_AS_EXPECTED((nullptr != stream), HAILO_OUT_OF_HOST_MEMORY);
-    CHECK_SUCCESS_AS_EXPECTED(status);
-    return stream;
+    TRACE(ReadFrameTrace, m_core_op_handle, name());
+
+    advance_stream();
+    return HAILO_SUCCESS;
 }
 
-hailo_status VDeviceNativeAsyncOutputStream::wait_for_async_ready(size_t transfer_size, std::chrono::milliseconds timeout)
+hailo_status VDeviceNativeOutputStream::wait_for_async_ready(size_t transfer_size, std::chrono::milliseconds timeout)
 {
-    return m_streams.at(m_next_transfer_stream).get().wait_for_async_ready(transfer_size, timeout);
+    return next_stream().wait_for_async_ready(transfer_size, timeout);
 }
 
-Expected<size_t> VDeviceNativeAsyncOutputStream::get_async_max_queue_size() const
+Expected<size_t> VDeviceNativeOutputStream::get_async_max_queue_size() const
 {
-    return Expected<size_t>(m_max_queue_size);
+    // The actual max queue size can't be just max_queue_per_stream * m_streams.size(),
+    // since we transfer an entire batch for each device at a time (so even if we have place
+    // to transfer in other streams, we first finishes the batch).
+    // To overcome this problem, we check how many "batches" we can transfer at a time (batch_count_queued)
+    // and make sure the queue for each stream contains a specific batch. We can potentaily transfer
+    // the resuide of the batch from last device, but then we will have problems with non-batch aligned
+    // transfers.
+    auto &first_stream = m_streams.begin()->second.get();
+    const auto max_queue_per_stream = first_stream.get_async_max_queue_size();
+    CHECK_EXPECTED(max_queue_per_stream);
+
+    assert(*max_queue_per_stream >= m_batch_size);
+
+    const auto batch_count_queued = *max_queue_per_stream / m_batch_size;
+    const auto actual_queue_per_stream = m_batch_size * batch_count_queued;
+
+    return actual_queue_per_stream * m_streams.size();
 }
 
-hailo_status VDeviceNativeAsyncOutputStream::read_async(TransferRequest &&transfer_request)
+hailo_status VDeviceNativeOutputStream::read_async(TransferRequest &&transfer_request)
 {
     // TODO HRT-10583 - allow option to remove reorder queue
-    transfer_request.callback = m_callback_reorder_queue.wrap_callback(transfer_request.callback);
-    auto status = m_streams.at(m_next_transfer_stream).get().read_async(std::move(transfer_request));
+    CHECK(m_callback_reorder_queue, HAILO_INVALID_OPERATION, "Stream does not support async api");
+
+
+    auto reorder_queue_callback = m_callback_reorder_queue->wrap_callback(transfer_request.callback);
+
+    transfer_request.callback = [this, callback=reorder_queue_callback](hailo_status status) {
+        if (HAILO_SUCCESS == status) {
+            TRACE(ReadFrameTrace, m_core_op_handle, name());
+        }
+
+        callback(status);
+    };
+
+    auto status = next_stream().read_async(std::move(transfer_request));
     if (HAILO_SUCCESS != status) {
-        m_callback_reorder_queue.cancel_last_callback();
+        m_callback_reorder_queue->cancel_last_callback();
         return status;
     }
+
     // Update m_next_transfer_stream_index only if 'batch' frames has been transferred
-    if (0 == (++m_acc_frames % m_streams.begin()->second.get().get_dynamic_batch_size())) {
+    advance_stream();
+    return HAILO_SUCCESS;
+}
+
+OutputStreamBase &VDeviceNativeOutputStream::next_stream()
+{
+    return m_streams.at(m_next_transfer_stream).get();
+}
+
+void VDeviceNativeOutputStream::advance_stream()
+{
+    if (0 == (++m_acc_frames % m_batch_size)) {
         auto it = m_streams.upper_bound(m_next_transfer_stream);
         if (m_streams.end() == it) {
             it = m_streams.begin();
@@ -246,14 +402,6 @@ hailo_status VDeviceNativeAsyncOutputStream::read_async(TransferRequest &&transf
         m_next_transfer_stream = it->first;
         m_acc_frames = 0;
     }
-
-    return HAILO_SUCCESS;
-}
-
-hailo_status VDeviceNativeAsyncOutputStream::read(MemoryView)
-{
-    LOGGER__ERROR("The read function is not supported by async streams");
-    return HAILO_INVALID_OPERATION;
 }
 
 } /* namespace hailort */
\ No newline at end of file
index 61ce42c9a2aad3e0d4c61c8916067101b2f29862..452095c024be38fcca3509e962ef16505aacf878 100644 (file)
 #include "hailo/expected.hpp"
 
 #include "stream_common/stream_internal.hpp"
-#include "vdevice_stream.hpp"
 #include "vdevice/callback_reorder_queue.hpp"
+#include "vdevice/vdevice_core_op.hpp"
 
 
 namespace hailort
 {
 
 
-class VDeviceNativeInputStreamBase : public VDeviceInputStreamBase {
+class VDeviceNativeInputStream : public InputStreamBase {
 public:
-    static Expected<std::unique_ptr<VDeviceNativeInputStreamBase>> create(
-        std::map<device_id_t, std::reference_wrapper<VdmaInputStreamBase>> &&streams,
-        EventPtr &&core_op_activated_event,
-        const LayerInfo &layer_info);
+    static Expected<std::unique_ptr<VDeviceNativeInputStream>> create(
+        std::map<device_id_t, std::reference_wrapper<InputStreamBase>> &&streams,
+        EventPtr core_op_activated_event,
+        const LayerInfo &layer_info,
+        uint16_t batch_size,
+        vdevice_core_op_handle_t core_op_handle);
 
-    VDeviceNativeInputStreamBase(
-        std::map<device_id_t, std::reference_wrapper<VdmaInputStreamBase>> &&streams,
+    VDeviceNativeInputStream(
+        std::map<device_id_t, std::reference_wrapper<InputStreamBase>> &&streams,
         EventPtr &&core_op_activated_event,
         const LayerInfo &layer_info,
+        uint16_t batch_size,
+        vdevice_core_op_handle_t core_op_handle,
+        std::unique_ptr<CallbackReorderQueue> &&callback_reorder_queue,
         hailo_status &status) :
-            VDeviceInputStreamBase(std::move(streams), std::move(core_op_activated_event), layer_info, status)
+            InputStreamBase(layer_info, streams.begin()->second.get().get_interface(),
+                            std::move(core_op_activated_event), status),
+            m_streams(std::move(streams)),
+            m_next_transfer_stream(m_streams.begin()->first),
+            m_acc_frames(0),
+            m_batch_size(batch_size),
+            m_callback_reorder_queue(std::move(callback_reorder_queue)),
+            m_core_op_handle(core_op_handle)
     {}
 
+    virtual hailo_status set_buffer_mode(StreamBufferMode buffer_mode) override;
+    virtual hailo_status activate_stream() override;
+    virtual hailo_status deactivate_stream() override;
     virtual hailo_status abort() override;
     virtual hailo_status clear_abort() override;
     virtual bool is_scheduled() override { return false; };
-};
-
-class VDeviceNativeInputStream : public VDeviceNativeInputStreamBase {
-public:
-    static Expected<std::unique_ptr<VDeviceNativeInputStream>> create(
-        std::map<device_id_t, std::reference_wrapper<VdmaInputStreamBase>> &&streams,
-        EventPtr &&core_op_activated_event,
-        const LayerInfo &layer_info);
 
-    using VDeviceNativeInputStreamBase::VDeviceNativeInputStreamBase;
+    virtual hailo_stream_interface_t get_interface() const override;
+    virtual std::chrono::milliseconds get_timeout() const override;
+    virtual hailo_status set_timeout(std::chrono::milliseconds timeout) override;
 
-protected:
-    virtual hailo_status write_impl(const MemoryView &buffer, const std::function<bool()> &should_cancel) override;\
-};
-
-class VDeviceNativeAsyncInputStream : public VDeviceNativeInputStreamBase {
-public:
-    static Expected<std::unique_ptr<VDeviceNativeAsyncInputStream>> create(
-        std::map<device_id_t, std::reference_wrapper<VdmaInputStreamBase>> &&streams,
-        EventPtr &&core_op_activated_event,
-        const LayerInfo &layer_info);
-
-    VDeviceNativeAsyncInputStream(
-        std::map<device_id_t, std::reference_wrapper<VdmaInputStreamBase>> &&streams,
-        EventPtr &&core_op_activated_event,
-        const LayerInfo &layer_info,
-        size_t max_queue_size,
-        hailo_status &status) :
-            VDeviceNativeInputStreamBase(std::move(streams), std::move(core_op_activated_event), layer_info, status),
-            m_callback_reorder_queue(max_queue_size), // TODO HRT-1058 - use reorder queue only when needed
-            m_max_queue_size(max_queue_size)
-    {}
+    virtual Expected<size_t> get_buffer_frames_size() const override;
+    virtual hailo_status flush() override;
 
+    virtual hailo_status write_impl(const MemoryView &buffer) override;
     virtual hailo_status wait_for_async_ready(size_t transfer_size, std::chrono::milliseconds timeout) override;
     virtual hailo_status write_async(TransferRequest &&transfer_request) override;
     virtual Expected<size_t> get_async_max_queue_size() const override;
 
 protected:
-    virtual hailo_status write_impl(const MemoryView &buffer, const std::function<bool()> &should_cancel) override;
 
-private:
-    CallbackReorderQueue m_callback_reorder_queue;
-    const size_t m_max_queue_size;
-};
+    InputStreamBase &next_stream();
+    void advance_stream();
 
-class VDeviceNativeOutputStreamBase : public VDeviceOutputStreamBase {
-public:
-    VDeviceNativeOutputStreamBase(
-        std::map<device_id_t, std::reference_wrapper<VdmaOutputStreamBase>> &&streams,
-        EventPtr &&core_op_activated_event,
-        const LayerInfo &layer_info,
-        hailo_status &status) :
-            VDeviceOutputStreamBase(std::move(streams), layer_info, std::move(core_op_activated_event), status)
-    {}
+    std::map<device_id_t, std::reference_wrapper<InputStreamBase>> m_streams;
+    device_id_t m_next_transfer_stream;
+    uint32_t m_acc_frames;
+    const uint16_t m_batch_size;
+    std::unique_ptr<CallbackReorderQueue> m_callback_reorder_queue;
+    vdevice_core_op_handle_t m_core_op_handle;
 
-    virtual hailo_status abort() override;
-    virtual hailo_status clear_abort() override;
-    virtual bool is_scheduled() override { return false; };
 };
 
-class VDeviceNativeOutputStream : public VDeviceNativeOutputStreamBase {
+class VDeviceNativeOutputStream : public OutputStreamBase {
 public:
     static Expected<std::unique_ptr<VDeviceNativeOutputStream>> create(
-        std::map<device_id_t, std::reference_wrapper<VdmaOutputStreamBase>> &&streams,
-        EventPtr &&core_op_activated_event, const LayerInfo &layer_info);
+        std::map<device_id_t, std::reference_wrapper<OutputStreamBase>> &&streams,
+        EventPtr core_op_activated_event, const LayerInfo &layer_info, uint16_t batch_size,
+        vdevice_core_op_handle_t core_op_handle);
 
-    using VDeviceNativeOutputStreamBase::VDeviceNativeOutputStreamBase;
-    virtual hailo_status read(MemoryView buffer) override;
-};
-
-class VDeviceNativeAsyncOutputStream : public VDeviceNativeOutputStreamBase {
-public:
-    static Expected<std::unique_ptr<VDeviceNativeAsyncOutputStream>> create(
-        std::map<device_id_t, std::reference_wrapper<VdmaOutputStreamBase>> &&streams,
-        EventPtr &&core_op_activated_event, const LayerInfo &layer_info);
-
-    VDeviceNativeAsyncOutputStream(
-        std::map<device_id_t, std::reference_wrapper<VdmaOutputStreamBase>> &&streams,
+    VDeviceNativeOutputStream(
+        std::map<device_id_t, std::reference_wrapper<OutputStreamBase>> &&streams,
         EventPtr &&core_op_activated_event,
         const LayerInfo &layer_info,
-        size_t max_queue_size,
+        uint16_t batch_size,
+        vdevice_core_op_handle_t core_op_handle,
+        std::unique_ptr<CallbackReorderQueue> &&callback_reorder_queue,
         hailo_status &status) :
-            VDeviceNativeOutputStreamBase(std::move(streams), std::move(core_op_activated_event), layer_info, status),
-            m_callback_reorder_queue(max_queue_size), // TODO HRT-1058 - use reorder queue only when needed
-            m_max_queue_size(max_queue_size)
-    {}
+            OutputStreamBase(layer_info, streams.begin()->second.get().get_interface(),
+                                    std::move(core_op_activated_event), status),
+            m_streams(std::move(streams)),
+            m_next_transfer_stream(m_streams.begin()->first),
+            m_acc_frames(0),
+            m_batch_size(batch_size),
+            m_core_op_handle(core_op_handle),
+            m_callback_reorder_queue(std::move(callback_reorder_queue))
+    {
+        for (auto &output_stream :  m_streams) {
+            if (HAILO_STREAM_INTERFACE_ETH != output_stream.second.get().get_interface()) {
+                auto register_status = output_stream.second.get().register_interrupt_callback(
+                    [core_op_handle=m_core_op_handle, name=name(), device_id=output_stream.first]() {
+                        TRACE(OutputVdmaEnqueueTrace, device_id, core_op_handle, name);
+                    }
+                );
+                if (HAILO_SUCCESS != register_status) {
+                    LOGGER__ERROR("Failing register interrupt callback {}", register_status);
+                }
+            }
+        }
+    }
+
+    virtual hailo_status set_buffer_mode(StreamBufferMode buffer_mode) override;
+    virtual hailo_status activate_stream() override;
+    virtual hailo_status deactivate_stream() override;
+    virtual hailo_status abort() override;
+    virtual hailo_status clear_abort() override;
+    virtual bool is_scheduled() override { return false; };
+    virtual hailo_stream_interface_t get_interface() const override;
+    virtual std::chrono::milliseconds get_timeout() const override;
+    virtual hailo_status set_timeout(std::chrono::milliseconds timeout) override;
+    virtual Expected<size_t> get_buffer_frames_size() const override;
+
+    virtual hailo_status read_impl(MemoryView buffer) override;
 
     virtual hailo_status wait_for_async_ready(size_t transfer_size, std::chrono::milliseconds timeout) override;
     virtual hailo_status read_async(TransferRequest &&transfer_request) override;
     virtual Expected<size_t> get_async_max_queue_size() const override;
-    virtual hailo_status read(MemoryView buffer) override;
 
 private:
-    CallbackReorderQueue m_callback_reorder_queue;
-    const size_t m_max_queue_size;
- };
+    OutputStreamBase &next_stream();
+    void advance_stream();
+
+    std::map<device_id_t, std::reference_wrapper<OutputStreamBase>> m_streams;
+    device_id_t m_next_transfer_stream;
+    uint32_t m_acc_frames;
+    const uint16_t m_batch_size;
+    vdevice_core_op_handle_t m_core_op_handle;
+    std::unique_ptr<CallbackReorderQueue> m_callback_reorder_queue;
+};
 
 } /* namespace hailort */
 
diff --git a/hailort/libhailort/src/vdevice/vdevice_stream.cpp b/hailort/libhailort/src/vdevice/vdevice_stream.cpp
deleted file mode 100644 (file)
index 6123597..0000000
+++ /dev/null
@@ -1,303 +0,0 @@
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file vdevice_stream.cpp
- * @brief TODO: brief
- *
- * TODO: doc
- **/
-
-#include "hailo/hailort.h"
-#include "hailo/stream.hpp"
-#include "hailo/hef.hpp"
-#include "hailo/hailort_common.hpp"
-
-#include "common/utils.hpp"
-
-#include "vdevice/vdevice_stream.hpp"
-#include "vdevice/vdevice_native_stream.hpp"
-#include "vdevice/scheduler/multi_device_scheduled_stream.hpp"
-#include "vdevice/scheduler/scheduled_stream.hpp"
-#include "core_op/resource_manager/resource_manager.hpp"
-
-#include <new>
-
-
-namespace hailort
-{
-
-/** Input stream **/
-VDeviceInputStreamBase::~VDeviceInputStreamBase()
-{
-    // We want to stop the vdma channel before closing the stream in the firmware
-    // because sending data to a closed stream may terminate the dma engine
-    if (m_is_stream_activated) {
-        (void)deactivate_stream();
-    }
-}
-
-hailo_status VDeviceInputStreamBase::activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers)
-{
-    for (const auto &pair : m_streams) {
-        auto &stream = pair.second;
-        auto status = stream.get().activate_stream(dynamic_batch_size, resume_pending_stream_transfers);
-        if (HAILO_SUCCESS != status) {
-            LOGGER__ERROR("Failed to activate input stream. (device: {})", stream.get().get_dev_id());
-            deactivate_stream();
-            return status;
-        }
-    }
-    m_is_stream_activated = true;
-    return HAILO_SUCCESS;
-}
-
-hailo_status VDeviceInputStreamBase::deactivate_stream()
-{
-    auto status = HAILO_SUCCESS; // Best effort
-    for (const auto &pair : m_streams) {
-        auto &stream = pair.second;
-        auto deactivate_status = stream.get().deactivate_stream();
-        if (HAILO_SUCCESS != deactivate_status) {
-            LOGGER__ERROR("Failed to deactivate input stream. (status: {} device: {})", deactivate_status, stream.get().get_dev_id());
-            status = deactivate_status;
-        }
-    }
-    m_is_stream_activated = false;
-    return status;
-}
-
-hailo_status VDeviceInputStreamBase::send_pending_buffer(const device_id_t &device_id)
-{
-    assert(1 == m_streams.size());
-    auto &vdma_input = dynamic_cast<VdmaInputStreamBase&>(m_streams.at(m_next_transfer_stream).get());
-    return vdma_input.send_pending_buffer(device_id);
-}
-
-Expected<size_t> VDeviceInputStreamBase::get_buffer_frames_size() const
-{
-    return m_streams.begin()->second.get().get_buffer_frames_size();
-}
-
-Expected<size_t> VDeviceInputStreamBase::get_pending_frames_count() const
-{
-    size_t total_pending_frames_count = 0;
-    for (const auto &pair : m_streams) {
-        auto &stream = pair.second;
-        auto stream_pending_frames_count = stream.get().get_pending_frames_count();
-        CHECK_EXPECTED(stream_pending_frames_count);
-        total_pending_frames_count += stream_pending_frames_count.value();
-    }
-    return total_pending_frames_count;
-}
-
-Expected<std::unique_ptr<VDeviceInputStreamBase>> VDeviceInputStreamBase::create(
-    std::map<device_id_t, std::reference_wrapper<VdmaInputStreamBase>> &&low_level_streams,
-    const hailo_stream_parameters_t &stream_params, const LayerInfo &edge_layer,
-    const scheduler_core_op_handle_t &core_op_handle, EventPtr core_op_activated_event,
-    CoreOpsSchedulerWeakPtr core_ops_scheduler)
-{
-    assert(0 < low_level_streams.size());
-
-    if (core_ops_scheduler.lock()) {
-        if ((stream_params.flags & HAILO_STREAM_FLAGS_ASYNC) != 0) {
-            auto stream = ScheduledAsyncInputStream::create(std::move(low_level_streams),
-                core_op_handle, std::move(core_op_activated_event), edge_layer,
-                core_ops_scheduler);
-            CHECK_EXPECTED(stream);
-            return std::unique_ptr<VDeviceInputStreamBase>(stream.release());
-        } else {
-            if (1 < low_level_streams.size()) {
-                auto stream = MultiDeviceScheduledInputStream::create(std::move(low_level_streams),
-                    core_op_handle, std::move(core_op_activated_event), edge_layer,
-                    core_ops_scheduler);
-                CHECK_EXPECTED(stream);
-                return std::unique_ptr<VDeviceInputStreamBase>(stream.release());
-            } else {
-                auto stream = ScheduledInputStream::create(std::move(low_level_streams),
-                    core_op_handle, std::move(core_op_activated_event), edge_layer,
-                    core_ops_scheduler);
-                CHECK_EXPECTED(stream);
-                return std::unique_ptr<VDeviceInputStreamBase>(stream.release());
-            }
-        }
-    } else {
-        if ((stream_params.flags & HAILO_STREAM_FLAGS_ASYNC) != 0) {
-            auto stream = VDeviceNativeAsyncInputStream::create(std::move(low_level_streams),
-                std::move(core_op_activated_event), edge_layer);
-            CHECK_EXPECTED(stream);
-            return std::unique_ptr<VDeviceInputStreamBase>(stream.release());
-        } else {
-            auto stream = VDeviceNativeInputStream::create(std::move(low_level_streams),
-                std::move(core_op_activated_event), edge_layer);
-            CHECK_EXPECTED(stream);
-            return std::unique_ptr<VDeviceInputStreamBase>(stream.release());
-        }
-
-    }
-}
-
-hailo_status VDeviceInputStreamBase::set_timeout(std::chrono::milliseconds timeout)
-{
-    for (const auto &pair : m_streams) {
-        auto &stream = pair.second;
-        auto status = stream.get().set_timeout(timeout);
-        CHECK_SUCCESS(status, "Failed to set timeout to input stream. (device: {})", stream.get().get_dev_id());
-    }
-    return HAILO_SUCCESS;
-}
-
-std::chrono::milliseconds VDeviceInputStreamBase::get_timeout() const
-{
-    // All timeout values of m_streams should be the same
-    return m_streams.begin()->second.get().get_timeout();
-}
-
-hailo_stream_interface_t VDeviceInputStreamBase::get_interface() const
-{
-    // All interface values of m_streams should be the same
-    return m_streams.begin()->second.get().get_interface();
-}
-
-hailo_status VDeviceInputStreamBase::flush()
-{
-    auto status = HAILO_SUCCESS; // Best effort
-    for (const auto &pair : m_streams) {
-        auto &stream = pair.second;
-        auto flush_status = stream.get().flush();
-        if (HAILO_SUCCESS != status) {
-            LOGGER__ERROR("Failed to flush input stream. (status: {} device: {})", status, stream.get().get_dev_id());
-            status = flush_status;
-        }
-    }
-    return status;
-}
-
-hailo_status VDeviceInputStreamBase::write_impl(const MemoryView &buffer)
-{
-    return write_impl(buffer, []() { return false; });
-}
-
-/** Output stream **/
-hailo_status VDeviceOutputStreamBase::deactivate_stream()
-{
-    auto status = HAILO_SUCCESS; // Best effort
-    for (const auto &pair : m_streams) {
-        auto &stream = pair.second;
-        auto deactivate_status = stream.get().deactivate_stream();
-        if (HAILO_SUCCESS != status) {
-            LOGGER__ERROR("Failed to deactivate output stream. (status: {} device: {})", status, stream.get().get_dev_id());
-            status = deactivate_status;
-        }
-    }
-    m_is_stream_activated = false;
-    return status;
-}
-
-VDeviceOutputStreamBase::~VDeviceOutputStreamBase()
-{
-    // We want to stop the vdma channel before closing the stream in the firmware
-    // because sending data to a closed stream may terminate the dma engine
-    if (m_is_stream_activated) {
-        (void)deactivate_stream();
-    }
-}
-
-hailo_status VDeviceOutputStreamBase::activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers)
-{
-    for (const auto &pair : m_streams) {
-        auto &stream = pair.second;
-        auto status = stream.get().activate_stream(dynamic_batch_size, resume_pending_stream_transfers);
-        if (HAILO_SUCCESS != status) {
-            LOGGER__ERROR("Failed to activate output stream. (device: {})", stream.get().get_dev_id());
-            deactivate_stream();
-            return status;
-        }
-    }
-    m_is_stream_activated = true;
-    return HAILO_SUCCESS;
-}
-
-hailo_status VDeviceOutputStreamBase::read_impl(MemoryView &/*buffer*/)
-{
-    LOGGER__ERROR("read_impl should not be called in vdevice flow");
-    return HAILO_INTERNAL_FAILURE;
-}
-
-Expected<std::unique_ptr<VDeviceOutputStreamBase>> VDeviceOutputStreamBase::create(
-    std::map<device_id_t, std::reference_wrapper<VdmaOutputStreamBase>> &&low_level_streams,
-    const hailo_stream_parameters_t &stream_params, const LayerInfo &edge_layer,
-    const scheduler_core_op_handle_t &core_op_handle, EventPtr core_op_activated_event,
-    CoreOpsSchedulerWeakPtr core_ops_scheduler)
-{
-    assert(0 < low_level_streams.size());
-
-    if (core_ops_scheduler.lock()) {
-        if ((stream_params.flags & HAILO_STREAM_FLAGS_ASYNC) != 0) {
-            LOGGER__ERROR("Async output streams are not supported with scheduler");
-            return make_unexpected(HAILO_NOT_IMPLEMENTED);
-        } else {
-            auto stream = ScheduledOutputStream::create(std::move(low_level_streams), core_op_handle,
-                edge_layer, std::move(core_op_activated_event), core_ops_scheduler);
-            CHECK_EXPECTED(stream);
-            return std::unique_ptr<VDeviceOutputStreamBase>(stream.release());
-        }
-    } else {
-        if ((stream_params.flags & HAILO_STREAM_FLAGS_ASYNC) != 0) {
-            auto stream = VDeviceNativeAsyncOutputStream::create(std::move(low_level_streams),
-                std::move(core_op_activated_event), edge_layer);
-            CHECK_EXPECTED(stream);
-            return std::unique_ptr<VDeviceOutputStreamBase>(stream.release());
-        } else {
-            auto stream = VDeviceNativeOutputStream::create(std::move(low_level_streams),
-                std::move(core_op_activated_event), edge_layer);
-            CHECK_EXPECTED(stream);
-            return std::unique_ptr<VDeviceOutputStreamBase>(stream.release());
-        }
-    }
-}
-
-hailo_status VDeviceOutputStreamBase::set_timeout(std::chrono::milliseconds timeout)
-{
-    for (const auto &pair : m_streams) {
-        auto &stream = pair.second;
-        auto status = stream.get().set_timeout(timeout);
-        CHECK_SUCCESS(status, "Failed to set timeout to output stream. (device: {})", stream.get().get_dev_id());
-    }
-    return HAILO_SUCCESS;
-}
-
-std::chrono::milliseconds VDeviceOutputStreamBase::get_timeout() const
-{
-    // All timeout values of m_streams should be the same
-    return m_streams.begin()->second.get().get_timeout();
-}
-
-hailo_stream_interface_t VDeviceOutputStreamBase::get_interface() const
-{
-    // All interface values of m_streams should be the same
-    return m_streams.begin()->second.get().get_interface();
-}
-
-Expected<size_t> VDeviceOutputStreamBase::get_buffer_frames_size() const
-{
-    return m_streams.begin()->second.get().get_buffer_frames_size();
-}
-
-Expected<size_t> VDeviceOutputStreamBase::get_pending_frames_count() const
-{
-    size_t total_pending_frames_count = 0;
-    for (const auto &pair : m_streams) {
-        auto &stream = pair.second;
-        auto stream_pending_frames_count = stream.get().get_pending_frames_count();
-        if (HAILO_NOT_AVAILABLE == stream_pending_frames_count.status()) {
-            return make_unexpected(HAILO_NOT_AVAILABLE);
-        }
-            CHECK_EXPECTED(stream_pending_frames_count);
-            total_pending_frames_count += stream_pending_frames_count.value();
-    }
-    return total_pending_frames_count;
-}
-
-} /* namespace hailort */
diff --git a/hailort/libhailort/src/vdevice/vdevice_stream.hpp b/hailort/libhailort/src/vdevice/vdevice_stream.hpp
deleted file mode 100644 (file)
index c5cb88a..0000000
+++ /dev/null
@@ -1,149 +0,0 @@
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file vdevice_stream.hpp
- * @brief Internal stream implementation for VDevice
- *
- * InputStream                                      (External "interface")
- * |-- InputStreamBase                              (Base class)
- *     |-- VDeviceInputStreamBase                   (Base class for vdevice streams)
- *     |   |-- VDeviceNativeInputStreamBase
- *     |   |    |-- VDeviceNativeInputStream        (Sync api)
- *     |   |    |-- VDeviceNativeAsyncInputStream   (Async api)
- *     |   |-- ScheduledInputStreamBase
- *     |   |    |-- ScheduledInputStream            (Sync api)
- *     |   |    |-- ScheduledAsyncInputStream       (Async api)
- *
- * OutputStream                                     (External "interface")
- * |-- OutputStreamBase                             (Base class)
- *     |-- VDeviceOutputStreamBase                  (Base class for vdevice streams)
- *     |   |-- VDeviceNativeOutputStreamBase
- *     |   |    |-- VDeviceNativeOutputStream       (Sync api)
- *     |   |    |-- VDeviceNativeAsyncOutputStream  (Async api)
- *     |   |-- ScheduledOutputStreamBase
- *     |   |    |-- ScheduledOutputStream           (Sync api)
- *     |   |    |-- ScheduledAsyncOutputStream      (Async api)
- **/
-
-#ifndef HAILO_VDEVICE_STREAM_HPP_
-#define HAILO_VDEVICE_STREAM_HPP_
-
-#include "hailo/hailort.h"
-#include "hailo/expected.hpp"
-
-#include "vdevice/vdevice_internal.hpp"
-#include "vdma/vdma_device.hpp"
-#include "vdma/vdma_stream.hpp"
-#include "stream_common/stream_internal.hpp"
-
-
-namespace hailort
-{
-
-class VDeviceInputStreamBase : public InputStreamBase {
-
-public:
-    static Expected<std::unique_ptr<VDeviceInputStreamBase>> create(
-        std::map<device_id_t, std::reference_wrapper<VdmaInputStreamBase>> &&low_level_streams,
-        const hailo_stream_parameters_t &stream_params, const LayerInfo &edge_layer,
-        const scheduler_core_op_handle_t &core_op_handle, EventPtr core_op_activated_event,
-        CoreOpsSchedulerWeakPtr core_ops_scheduler);
-
-    virtual ~VDeviceInputStreamBase();
-
-    virtual hailo_status activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers) override;
-    virtual hailo_status deactivate_stream() override;
-    virtual hailo_stream_interface_t get_interface() const override;
-    virtual std::chrono::milliseconds get_timeout() const override;
-    virtual hailo_status set_timeout(std::chrono::milliseconds timeout) override;
-
-    virtual hailo_status send_pending_buffer(const device_id_t &device_id) override;
-    virtual Expected<size_t> get_buffer_frames_size() const override;
-    virtual Expected<size_t> get_pending_frames_count() const override;
-    virtual bool is_scheduled() override = 0;
-    virtual hailo_status abort() override = 0;
-    virtual hailo_status clear_abort() override = 0;
-    virtual hailo_status flush() override;
-
-    virtual void notify_all()
-    {
-        // Overriden in scheduled_stream
-        return;
-    }
-
-protected:
-    virtual hailo_status write_impl(const MemoryView &buffer) final override;
-    virtual hailo_status write_impl(const MemoryView &buffer, const std::function<bool()> &should_cancel) = 0;
-
-    VDeviceInputStreamBase(
-        std::map<device_id_t, std::reference_wrapper<VdmaInputStreamBase>> &&streams,
-        EventPtr &&core_op_activated_event,
-        const LayerInfo &layer_info,
-        hailo_status &status) :
-            InputStreamBase(layer_info, streams.begin()->second.get().get_interface(), std::move(core_op_activated_event), status),
-            m_streams(std::move(streams)),
-            m_is_stream_activated(false),
-            m_next_transfer_stream(m_streams.begin()->first),
-            m_acc_frames(0)
-    {}
-
-    std::map<device_id_t, std::reference_wrapper<VdmaInputStreamBase>> m_streams;
-    bool m_is_stream_activated;
-    device_id_t m_next_transfer_stream;
-    uint32_t m_acc_frames;
-
-private:
-    friend class VDeviceInputStreamMultiplexerWrapper;
-};
-
-class VDeviceOutputStreamBase : public OutputStreamBase {
-public:
-    virtual ~VDeviceOutputStreamBase();
-
-    static Expected<std::unique_ptr<VDeviceOutputStreamBase>> create(
-        std::map<device_id_t, std::reference_wrapper<VdmaOutputStreamBase>> &&low_level_streams,
-        const hailo_stream_parameters_t &stream_params, const LayerInfo &edge_layer,
-        const scheduler_core_op_handle_t &core_op_handle, EventPtr core_op_activated_event,
-        CoreOpsSchedulerWeakPtr core_ops_scheduler);
-
-    virtual hailo_status activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers) override;
-    virtual hailo_status deactivate_stream() override;
-    virtual hailo_stream_interface_t get_interface() const override;
-    virtual std::chrono::milliseconds get_timeout() const override;
-    virtual hailo_status set_timeout(std::chrono::milliseconds timeout) override;
-    virtual Expected<size_t> get_buffer_frames_size() const override;
-    virtual Expected<size_t> get_pending_frames_count() const override; // Returns the accumulated pending frames
-    virtual hailo_status abort() override = 0;
-    virtual hailo_status clear_abort() override = 0;
-    virtual bool is_scheduled() override = 0;
-
-protected:
-    VDeviceOutputStreamBase(
-        std::map<device_id_t, std::reference_wrapper<VdmaOutputStreamBase>> &&streams,
-        const LayerInfo &layer_info,
-        EventPtr &&core_op_activated_event,
-        hailo_status &status) :
-            OutputStreamBase(layer_info, streams.begin()->second.get().get_interface(),
-                std::move(core_op_activated_event), status),
-            m_streams(std::move(streams)),
-            m_is_stream_activated(false),
-            m_next_transfer_stream(m_streams.begin()->first),
-            m_acc_frames(0)
-    {}
-
-    virtual hailo_status read_impl(MemoryView &buffer) override final;
-
-    std::map<device_id_t, std::reference_wrapper<VdmaOutputStreamBase>> m_streams;
-    bool m_is_stream_activated;
-    device_id_t m_next_transfer_stream;
-    uint32_t m_acc_frames;
-
-private:
-    friend class VDeviceOutputStreamMultiplexerWrapper;
-};
-
-} /* namespace hailort */
-
-#endif /* HAILO_VDEVICE_STREAM_HPP_ */
index 1b7b0a1c01c25db95dbfa7b0cb39dacdc6a6dbf2..7aeec0de98dca1d501697d3b1a9101424c819bd8 100644 (file)
@@ -3,34 +3,40 @@
 namespace hailort
 {
 
+hailo_status VDeviceInputStreamMultiplexerWrapper::set_buffer_mode(StreamBufferMode buffer_mode)
+{
+    // Buffer is not owned by this class, so we just forward the request to base stream.
+    return m_base_stream->set_buffer_mode(buffer_mode);
+}
+
 const hailo_stream_info_t &VDeviceInputStreamMultiplexerWrapper::get_info() const
 {
-    return m_vdevice_input_stream->get_info();
+    return m_base_stream->get_info();
 }
 
 const CONTROL_PROTOCOL__nn_stream_config_t &VDeviceInputStreamMultiplexerWrapper::get_nn_stream_config()
 {
-    return m_vdevice_input_stream->get_nn_stream_config();
+    return m_base_stream->get_nn_stream_config();
 }
 
-hailo_status VDeviceInputStreamMultiplexerWrapper::activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers)
+hailo_status VDeviceInputStreamMultiplexerWrapper::activate_stream()
 {
-    return m_vdevice_input_stream->activate_stream(dynamic_batch_size, resume_pending_stream_transfers);
+    return m_base_stream->activate_stream();
 }
 
 hailo_status VDeviceInputStreamMultiplexerWrapper::deactivate_stream()
 {
-    return m_vdevice_input_stream->deactivate_stream();
+    return m_base_stream->deactivate_stream();
 }
 
 hailo_stream_interface_t VDeviceInputStreamMultiplexerWrapper::get_interface() const
 {
-    return m_vdevice_input_stream->get_interface();
+    return m_base_stream->get_interface();
 }
 
 std::chrono::milliseconds VDeviceInputStreamMultiplexerWrapper::get_timeout() const
 {
-    return m_vdevice_input_stream->get_timeout();
+    return m_base_stream->get_timeout();
 }
 
 hailo_status VDeviceInputStreamMultiplexerWrapper::abort()
@@ -43,7 +49,7 @@ hailo_status VDeviceInputStreamMultiplexerWrapper::abort()
     auto status = m_multiplexer->disable_stream(m_core_op_multiplexer_handle, name());
     CHECK_SUCCESS(status);
 
-    m_vdevice_input_stream->notify_all();
+    m_base_stream->notify_all();
 
     status = m_multiplexer->run_once_for_stream(name(), INPUT_RUN_ONCE_HANDLE__ABORT, m_core_op_multiplexer_handle);
     CHECK_SUCCESS(status);
@@ -64,7 +70,7 @@ hailo_status VDeviceInputStreamMultiplexerWrapper::clear_abort()
     status = m_multiplexer->run_once_for_stream(name(), INPUT_RUN_ONCE_HANDLE__CLEAR_ABORT, m_core_op_multiplexer_handle);
     CHECK_SUCCESS(status);
 
-    m_vdevice_input_stream->notify_all();
+    m_base_stream->notify_all();
 
     return HAILO_SUCCESS;
 }
@@ -72,23 +78,18 @@ hailo_status VDeviceInputStreamMultiplexerWrapper::clear_abort()
 bool VDeviceInputStreamMultiplexerWrapper::is_scheduled()
 {
     // Multiplexer can only work with scheduler
-    assert(m_vdevice_input_stream->is_scheduled());
+    assert(m_base_stream->is_scheduled());
     return true;
 }
 
-hailo_status VDeviceInputStreamMultiplexerWrapper::send_pending_buffer(const device_id_t &device_id)
+hailo_status VDeviceInputStreamMultiplexerWrapper::launch_transfer(const device_id_t &device_id)
 {
-    return m_vdevice_input_stream->send_pending_buffer(device_id);
+    return m_base_stream->launch_transfer(device_id);
 }
 
 Expected<size_t> VDeviceInputStreamMultiplexerWrapper::get_buffer_frames_size() const
 {
-    return m_vdevice_input_stream->get_buffer_frames_size();
-}
-
-Expected<size_t> VDeviceInputStreamMultiplexerWrapper::get_pending_frames_count() const
-{
-    return m_vdevice_input_stream->get_pending_frames_count();
+    return m_base_stream->get_buffer_frames_size();
 }
 
 hailo_status VDeviceInputStreamMultiplexerWrapper::write_impl(const MemoryView &buffer)
@@ -99,7 +100,7 @@ hailo_status VDeviceInputStreamMultiplexerWrapper::write_impl(const MemoryView &
     }
     CHECK_SUCCESS(status);
 
-    auto write_status = m_vdevice_input_stream->write_impl(buffer, [this]() { return m_is_aborted->load(); });
+    auto write_status = m_base_stream->write_impl(buffer, [this]() { return m_is_aborted->load(); });
     status = m_multiplexer->signal_write_finish(m_core_op_multiplexer_handle, write_status != HAILO_SUCCESS);
     CHECK_SUCCESS(status);
     if (HAILO_STREAM_ABORTED_BY_USER == write_status) {
@@ -112,7 +113,7 @@ hailo_status VDeviceInputStreamMultiplexerWrapper::write_impl(const MemoryView &
 
 hailo_status VDeviceInputStreamMultiplexerWrapper::set_timeout(std::chrono::milliseconds timeout)
 {
-    return m_vdevice_input_stream->set_timeout(timeout);
+    return m_base_stream->set_timeout(timeout);
 }
 
 hailo_status VDeviceInputStreamMultiplexerWrapper::flush()
@@ -120,67 +121,75 @@ hailo_status VDeviceInputStreamMultiplexerWrapper::flush()
     return m_multiplexer->run_once_for_stream(name(), INPUT_RUN_ONCE_HANDLE__FLUSH, m_core_op_multiplexer_handle);
 }
 
-Expected<std::unique_ptr<VDeviceInputStreamMultiplexerWrapper>> VDeviceInputStreamMultiplexerWrapper::create(std::shared_ptr<VDeviceInputStreamBase> vdevice_input_stream,
-    std::string network_name, std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_core_op_handle_t core_ops_scheduler_handle,
+Expected<std::unique_ptr<VDeviceInputStreamMultiplexerWrapper>> VDeviceInputStreamMultiplexerWrapper::create(
+    std::shared_ptr<ScheduledInputStream> base_stream,
+    std::string network_name, std::shared_ptr<PipelineMultiplexer> multiplexer,
     multiplexer_core_op_handle_t core_op_multiplexer_handle)
 {
-    assert(vdevice_input_stream->is_scheduled());
+    assert(base_stream->is_scheduled());
     hailo_status status = HAILO_UNINITIALIZED;
-    std::unique_ptr<VDeviceInputStreamMultiplexerWrapper> wrapper(new (std::nothrow) VDeviceInputStreamMultiplexerWrapper(vdevice_input_stream, network_name, multiplexer,
-        core_ops_scheduler_handle, core_op_multiplexer_handle, status));
+    std::unique_ptr<VDeviceInputStreamMultiplexerWrapper> wrapper(
+        new (std::nothrow) VDeviceInputStreamMultiplexerWrapper(base_stream, network_name, multiplexer,
+            core_op_multiplexer_handle, status));
     CHECK_NOT_NULL_AS_EXPECTED(wrapper, HAILO_OUT_OF_HOST_MEMORY);
     CHECK_SUCCESS_AS_EXPECTED(status);
 
     return wrapper;
 }
 
-Expected<std::unique_ptr<VDeviceInputStreamMultiplexerWrapper>> VDeviceInputStreamMultiplexerWrapper::clone(multiplexer_core_op_handle_t core_op_multiplexer_handle)
+Expected<std::unique_ptr<VDeviceInputStreamMultiplexerWrapper>> VDeviceInputStreamMultiplexerWrapper::clone(
+    multiplexer_core_op_handle_t core_op_multiplexer_handle)
 {
-    auto wrapper = create(m_vdevice_input_stream, m_network_name, m_multiplexer, m_core_ops_scheduler_handle, core_op_multiplexer_handle);
+    auto wrapper = create(m_base_stream, m_network_name, m_multiplexer, core_op_multiplexer_handle);
     CHECK_EXPECTED(wrapper);
 
     return wrapper;
 }
 
-VDeviceInputStreamMultiplexerWrapper::VDeviceInputStreamMultiplexerWrapper(std::shared_ptr<VDeviceInputStreamBase> &vdevice_input_stream,
-    std::string network_name, std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_core_op_handle_t core_ops_scheduler_handle,
+VDeviceInputStreamMultiplexerWrapper::VDeviceInputStreamMultiplexerWrapper(
+    std::shared_ptr<ScheduledInputStream> base_stream,
+    std::string network_name, std::shared_ptr<PipelineMultiplexer> multiplexer,
     multiplexer_core_op_handle_t core_op_multiplexer_handle, hailo_status &status) :
-    InputStreamBase(vdevice_input_stream->get_info(),
-        vdevice_input_stream->m_nn_stream_config, vdevice_input_stream->get_core_op_activated_event()),
-    m_vdevice_input_stream(vdevice_input_stream),
+    InputStreamBase(base_stream->get_layer_info(), base_stream->get_interface(),
+        base_stream->get_core_op_activated_event(), status),
+    m_base_stream(base_stream),
     m_multiplexer(multiplexer),
-    m_core_ops_scheduler_handle(core_ops_scheduler_handle),
     m_core_op_multiplexer_handle(core_op_multiplexer_handle),
     m_network_name(network_name),
     m_is_aborted()
 {
+    if (HAILO_SUCCESS != status) {
+        // Parent returned error
+        return;
+    }
+
     m_is_aborted = make_unique_nothrow<std::atomic_bool>(false);
     if (nullptr == m_is_aborted) {
         status = HAILO_OUT_OF_HOST_MEMORY;
         LOGGER__ERROR("Failed to allocate memory! status = {}", status);
         return;
     }
-    status = multiplexer->register_run_once_for_stream(vdevice_input_stream->name(), INPUT_RUN_ONCE_HANDLE__FLUSH, [this]
+    status = multiplexer->register_run_once_for_stream(base_stream->name(), INPUT_RUN_ONCE_HANDLE__FLUSH, [this]
     {
-        return m_vdevice_input_stream->flush();
+        return m_base_stream->flush();
     });
     if (HAILO_SUCCESS != status) {
         LOGGER__ERROR("register_run_once_for_stream failed! status = {}", status);
         return;
     }
 
-    status = multiplexer->register_run_once_for_stream(vdevice_input_stream->name(), INPUT_RUN_ONCE_HANDLE__ABORT, [this]
+    status = multiplexer->register_run_once_for_stream(base_stream->name(), INPUT_RUN_ONCE_HANDLE__ABORT, [this]
     {
-        return m_vdevice_input_stream->abort();
+        return m_base_stream->abort();
     });
     if (HAILO_SUCCESS != status) {
         LOGGER__ERROR("register_run_once_for_stream failed! status = {}", status);
         return;
     }
 
-    status = multiplexer->register_run_once_for_stream(vdevice_input_stream->name(), INPUT_RUN_ONCE_HANDLE__CLEAR_ABORT, [this]
+    status = multiplexer->register_run_once_for_stream(base_stream->name(), INPUT_RUN_ONCE_HANDLE__CLEAR_ABORT, [this]
     {
-        return m_vdevice_input_stream->clear_abort();
+        return m_base_stream->clear_abort();
     });
     if (HAILO_SUCCESS != status) {
         LOGGER__ERROR("register_run_once_for_stream failed! status = {}", status);
@@ -188,39 +197,45 @@ VDeviceInputStreamMultiplexerWrapper::VDeviceInputStreamMultiplexerWrapper(std::
     }
 }
 
+hailo_status VDeviceOutputStreamMultiplexerWrapper::set_buffer_mode(StreamBufferMode buffer_mode)
+{
+    // Buffer is not owned by this class, so we just forward the request to base stream.
+    return m_base_stream->set_buffer_mode(buffer_mode);
+}
+
 const hailo_stream_info_t &VDeviceOutputStreamMultiplexerWrapper::get_info() const
 {
-    return m_vdevice_output_stream->get_info();
+    return m_base_stream->get_info();
 }
 
 const CONTROL_PROTOCOL__nn_stream_config_t &VDeviceOutputStreamMultiplexerWrapper::get_nn_stream_config()
 {
-    return m_vdevice_output_stream->get_nn_stream_config();
+    return m_base_stream->get_nn_stream_config();
 }
 
-hailo_status VDeviceOutputStreamMultiplexerWrapper::activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers)
+hailo_status VDeviceOutputStreamMultiplexerWrapper::activate_stream()
 {
-    return m_vdevice_output_stream->activate_stream(dynamic_batch_size, resume_pending_stream_transfers);
+    return m_base_stream->activate_stream();
 }
 
 hailo_status VDeviceOutputStreamMultiplexerWrapper::deactivate_stream()
 {
-    return m_vdevice_output_stream->deactivate_stream();
+    return m_base_stream->deactivate_stream();
 }
 
 hailo_stream_interface_t VDeviceOutputStreamMultiplexerWrapper::get_interface() const
 {
-    return m_vdevice_output_stream->get_interface();
+    return m_base_stream->get_interface();
 }
 
 std::chrono::milliseconds VDeviceOutputStreamMultiplexerWrapper::get_timeout() const
 {
-    return m_vdevice_output_stream->get_timeout();
+    return m_base_stream->get_timeout();
 }
 
-hailo_status VDeviceOutputStreamMultiplexerWrapper::set_next_device_to_read(const device_id_t &device_id)
+hailo_status VDeviceOutputStreamMultiplexerWrapper::launch_transfer(const device_id_t &device_id)
 {
-    return m_vdevice_output_stream->set_next_device_to_read(device_id);
+    return m_base_stream->launch_transfer(device_id);
 }
 
 hailo_status VDeviceOutputStreamMultiplexerWrapper::abort()
@@ -258,29 +273,20 @@ hailo_status VDeviceOutputStreamMultiplexerWrapper::clear_abort()
 bool VDeviceOutputStreamMultiplexerWrapper::is_scheduled()
 {
     // Multiplexer can only work with scheduler
-    assert(m_vdevice_output_stream->is_scheduled());
+    assert(m_base_stream->is_scheduled());
     return true;
 }
 
 Expected<size_t> VDeviceOutputStreamMultiplexerWrapper::get_buffer_frames_size() const
 {
-    return m_vdevice_output_stream->get_buffer_frames_size();
-}
-Expected<size_t> VDeviceOutputStreamMultiplexerWrapper::get_pending_frames_count() const
-{
-    return m_vdevice_output_stream->get_pending_frames_count();
+    return m_base_stream->get_buffer_frames_size();
 }
 
-hailo_status VDeviceOutputStreamMultiplexerWrapper::read_impl(MemoryView &buffer)
-{
-    return m_vdevice_output_stream->read_impl(buffer);
-}
-
-hailo_status VDeviceOutputStreamMultiplexerWrapper::read(MemoryView buffer)
+hailo_status VDeviceOutputStreamMultiplexerWrapper::read_impl(MemoryView buffer)
 {
     uint32_t frames_to_drain_count = 0;
     auto expected_drain_count = m_multiplexer->wait_for_read(m_core_op_multiplexer_handle, name(),
-        m_vdevice_output_stream->get_timeout());
+        m_base_stream->get_timeout());
     if (HAILO_STREAM_ABORTED_BY_USER == expected_drain_count.status()) {
         return expected_drain_count.status();
     }
@@ -289,14 +295,14 @@ hailo_status VDeviceOutputStreamMultiplexerWrapper::read(MemoryView buffer)
     frames_to_drain_count = expected_drain_count.release();
 
     for (uint32_t i = 0; i < frames_to_drain_count; i++) {
-        auto status = m_vdevice_output_stream->read(buffer);
+        auto status = m_base_stream->read(buffer);
         if ((HAILO_STREAM_ABORTED_BY_USER == status) || (HAILO_STREAM_NOT_ACTIVATED == status)) {
             return status;
         }
         CHECK_SUCCESS(status);
     }
 
-    auto status = m_vdevice_output_stream->read(buffer);
+    auto status = m_base_stream->read(buffer);
     if ((HAILO_STREAM_ABORTED_BY_USER == status) || (HAILO_STREAM_NOT_ACTIVATED == status)) {
         return status;
     }
@@ -310,38 +316,41 @@ hailo_status VDeviceOutputStreamMultiplexerWrapper::read(MemoryView buffer)
 
 hailo_status VDeviceOutputStreamMultiplexerWrapper::set_timeout(std::chrono::milliseconds timeout)
 {
-    return m_vdevice_output_stream->set_timeout(timeout);
+    return m_base_stream->set_timeout(timeout);
 }
 
-Expected<std::unique_ptr<VDeviceOutputStreamMultiplexerWrapper>> VDeviceOutputStreamMultiplexerWrapper::create(std::shared_ptr<VDeviceOutputStreamBase> vdevice_output_stream,
-    std::string network_name, std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_core_op_handle_t core_ops_scheduler_handle,
+Expected<std::unique_ptr<VDeviceOutputStreamMultiplexerWrapper>> VDeviceOutputStreamMultiplexerWrapper::create(
+    std::shared_ptr<OutputStreamBase> base_stream,
+    std::string network_name, std::shared_ptr<PipelineMultiplexer> multiplexer,
     multiplexer_core_op_handle_t core_op_multiplexer_handle)
 {
-    assert(vdevice_output_stream->is_scheduled());
+    assert(base_stream->is_scheduled());
     hailo_status status = HAILO_UNINITIALIZED;
-    std::unique_ptr<VDeviceOutputStreamMultiplexerWrapper> wrapper(new (std::nothrow) VDeviceOutputStreamMultiplexerWrapper(vdevice_output_stream, network_name, multiplexer,
-        core_ops_scheduler_handle, core_op_multiplexer_handle, status));
+    std::unique_ptr<VDeviceOutputStreamMultiplexerWrapper> wrapper(
+        new (std::nothrow) VDeviceOutputStreamMultiplexerWrapper(base_stream, network_name, multiplexer,
+            core_op_multiplexer_handle, status));
     CHECK_NOT_NULL_AS_EXPECTED(wrapper, HAILO_OUT_OF_HOST_MEMORY);
 
     return wrapper;
 }
 
-Expected<std::unique_ptr<VDeviceOutputStreamMultiplexerWrapper>> VDeviceOutputStreamMultiplexerWrapper::clone(scheduler_core_op_handle_t core_op_multiplexer_handle)
+Expected<std::unique_ptr<VDeviceOutputStreamMultiplexerWrapper>> VDeviceOutputStreamMultiplexerWrapper::clone(
+    multiplexer_core_op_handle_t core_op_multiplexer_handle)
 {
-    auto wrapper = create(m_vdevice_output_stream, m_network_name, m_multiplexer, m_core_ops_scheduler_handle, core_op_multiplexer_handle);
+    auto wrapper = create(m_base_stream, m_network_name, m_multiplexer, core_op_multiplexer_handle);
     CHECK_EXPECTED(wrapper);
 
     return wrapper;
 }
 
-VDeviceOutputStreamMultiplexerWrapper::VDeviceOutputStreamMultiplexerWrapper(std::shared_ptr<VDeviceOutputStreamBase> &vdevice_output_stream,
-        std::string network_name, std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_core_op_handle_t core_ops_scheduler_handle,
+VDeviceOutputStreamMultiplexerWrapper::VDeviceOutputStreamMultiplexerWrapper(
+        std::shared_ptr<OutputStreamBase> base_stream,
+        std::string network_name, std::shared_ptr<PipelineMultiplexer> multiplexer,
         multiplexer_core_op_handle_t core_op_multiplexer_handle, hailo_status &status) :
-    OutputStreamBase(vdevice_output_stream->get_layer_info(), vdevice_output_stream->get_info(),
-        vdevice_output_stream->m_nn_stream_config, vdevice_output_stream->get_core_op_activated_event()),
-    m_vdevice_output_stream(vdevice_output_stream),
+    OutputStreamBase(base_stream->get_layer_info(), base_stream->get_info(),
+        base_stream->m_nn_stream_config, base_stream->get_core_op_activated_event()),
+    m_base_stream(base_stream),
     m_multiplexer(multiplexer),
-    m_core_ops_scheduler_handle(core_ops_scheduler_handle),
     m_core_op_multiplexer_handle(core_op_multiplexer_handle),
     m_network_name(network_name),
     m_is_aborted()
@@ -353,18 +362,18 @@ VDeviceOutputStreamMultiplexerWrapper::VDeviceOutputStreamMultiplexerWrapper(std
         return;
     }
 
-    status = multiplexer->register_run_once_for_stream(vdevice_output_stream->name(), OUTPUT_RUN_ONCE_HANDLE__ABORT, [this]
+    status = multiplexer->register_run_once_for_stream(m_base_stream->name(), OUTPUT_RUN_ONCE_HANDLE__ABORT, [this]
     {
-        return m_vdevice_output_stream->abort();
+        return m_base_stream->abort();
     });
     if (HAILO_SUCCESS != status) {
         LOGGER__ERROR("register_run_once_for_stream failed! status = {}", status);
         return;
     }
 
-    status = multiplexer->register_run_once_for_stream(vdevice_output_stream->name(), OUTPUT_RUN_ONCE_HANDLE__CLEAR_ABORT, [this]
+    status = multiplexer->register_run_once_for_stream(m_base_stream->name(), OUTPUT_RUN_ONCE_HANDLE__CLEAR_ABORT, [this]
     {
-        return m_vdevice_output_stream->clear_abort();
+        return m_base_stream->clear_abort();
     });
     if (HAILO_SUCCESS != status) {
         LOGGER__ERROR("register_run_once_for_stream failed! status = {}", status);
index 92e054bd58b4c00a341919667ff89ad4707da25f..b2c55a670640ba8b82fd3285a1e6d9b3b85c5a5b 100644 (file)
@@ -13,7 +13,7 @@
 #include "hailo/expected.hpp"
 
 #include "stream_common/stream_internal.hpp"
-#include "vdevice/vdevice_stream.hpp"
+#include "vdevice/scheduler/scheduled_stream.hpp"
 #include "vdevice/pipeline_multiplexer.hpp"
 
 
@@ -34,14 +34,16 @@ enum output_run_once_handle_t {
 class VDeviceInputStreamMultiplexerWrapper : public InputStreamBase {
 public:
     virtual ~VDeviceInputStreamMultiplexerWrapper() = default;
-    static Expected<std::unique_ptr<VDeviceInputStreamMultiplexerWrapper>> create(std::shared_ptr<VDeviceInputStreamBase> vdevice_input_stream,
-        std::string network_name, std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_core_op_handle_t core_ops_scheduler_handle,
+    static Expected<std::unique_ptr<VDeviceInputStreamMultiplexerWrapper>> create(
+        std::shared_ptr<ScheduledInputStream> base_stream,
+        std::string network_name, std::shared_ptr<PipelineMultiplexer> multiplexer,
         multiplexer_core_op_handle_t core_op_multiplexer_handle = 0);
     Expected<std::unique_ptr<VDeviceInputStreamMultiplexerWrapper>> clone(multiplexer_core_op_handle_t core_op_multiplexer_handle);
 
+    virtual hailo_status set_buffer_mode(StreamBufferMode buffer_mode) override;
     virtual const hailo_stream_info_t &get_info() const override;
     virtual const CONTROL_PROTOCOL__nn_stream_config_t &get_nn_stream_config() override;
-    virtual hailo_status activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers) override;
+    virtual hailo_status activate_stream() override;
     virtual hailo_status deactivate_stream() override;
     virtual hailo_stream_interface_t get_interface() const override;
     virtual std::chrono::milliseconds get_timeout() const override;
@@ -49,24 +51,22 @@ public:
     virtual hailo_status clear_abort() override;
     virtual bool is_scheduled() override;
 
-    virtual hailo_status send_pending_buffer(const device_id_t &device_id) override;
+    virtual hailo_status launch_transfer(const device_id_t &device_id) override;
     virtual Expected<size_t> get_buffer_frames_size() const override;
-    virtual Expected<size_t> get_pending_frames_count() const override;
 
 protected:
     virtual hailo_status write_impl(const MemoryView &buffer) override;
 
 private:
-    VDeviceInputStreamMultiplexerWrapper(std::shared_ptr<VDeviceInputStreamBase> &vdevice_input_stream,
-        std::string network_name, std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_core_op_handle_t core_ops_scheduler_handle,
+    VDeviceInputStreamMultiplexerWrapper(std::shared_ptr<ScheduledInputStream> base_stream,
+        std::string network_name, std::shared_ptr<PipelineMultiplexer> multiplexer,
         multiplexer_core_op_handle_t core_op_multiplexer_handle, hailo_status &status);
 
     virtual hailo_status set_timeout(std::chrono::milliseconds timeout) override;
     virtual hailo_status flush() override;
 
-    std::shared_ptr<VDeviceInputStreamBase> m_vdevice_input_stream;
+    std::shared_ptr<ScheduledInputStream> m_base_stream;
     std::shared_ptr<PipelineMultiplexer> m_multiplexer;
-    scheduler_core_op_handle_t m_core_ops_scheduler_handle;
     multiplexer_core_op_handle_t m_core_op_multiplexer_handle;
     std::string m_network_name;
 
@@ -77,36 +77,35 @@ class VDeviceOutputStreamMultiplexerWrapper : public OutputStreamBase {
 public:
     virtual ~VDeviceOutputStreamMultiplexerWrapper() noexcept = default;
 
-    static Expected<std::unique_ptr<VDeviceOutputStreamMultiplexerWrapper>> create(std::shared_ptr<VDeviceOutputStreamBase> vdevice_output_stream,
-        std::string network_name, std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_core_op_handle_t core_ops_scheduler_handle,
+    static Expected<std::unique_ptr<VDeviceOutputStreamMultiplexerWrapper>> create(
+        std::shared_ptr<OutputStreamBase> base_stream,
+        std::string network_name, std::shared_ptr<PipelineMultiplexer> multiplexer,
         multiplexer_core_op_handle_t core_op_multiplexer_handle = 0);
     Expected<std::unique_ptr<VDeviceOutputStreamMultiplexerWrapper>> clone(multiplexer_core_op_handle_t core_op_multiplexer_handle);
 
+    virtual hailo_status set_buffer_mode(StreamBufferMode buffer_mode) override;
     virtual const hailo_stream_info_t &get_info() const override;
     virtual const CONTROL_PROTOCOL__nn_stream_config_t &get_nn_stream_config() override;
-    virtual hailo_status activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers) override;
+    virtual hailo_status activate_stream() override;
     virtual hailo_status deactivate_stream() override;
     virtual hailo_stream_interface_t get_interface() const override;
     virtual std::chrono::milliseconds get_timeout() const override;
-    virtual hailo_status set_next_device_to_read(const device_id_t &device_id) override;
+    virtual hailo_status launch_transfer(const device_id_t &device_id) override;
     virtual hailo_status abort() override;
     virtual hailo_status clear_abort() override;
     virtual bool is_scheduled() override;
     virtual Expected<size_t> get_buffer_frames_size() const override;
-    virtual Expected<size_t> get_pending_frames_count() const override;
 
 private:
-    VDeviceOutputStreamMultiplexerWrapper(std::shared_ptr<VDeviceOutputStreamBase> &vdevice_output_stream,
-        std::string network_name, std::shared_ptr<PipelineMultiplexer> multiplexer, scheduler_core_op_handle_t core_ops_scheduler_handle,
+    VDeviceOutputStreamMultiplexerWrapper(std::shared_ptr<OutputStreamBase> base_stream,
+        std::string network_name, std::shared_ptr<PipelineMultiplexer> multiplexer,
         multiplexer_core_op_handle_t core_op_multiplexer_handle, hailo_status &status);
 
     virtual hailo_status set_timeout(std::chrono::milliseconds timeout) override;
-    virtual hailo_status read_impl(MemoryView &buffer) override;
-    virtual hailo_status read(MemoryView buffer) override;
+    virtual hailo_status read_impl(MemoryView buffer) override;
 
-    std::shared_ptr<VDeviceOutputStreamBase> m_vdevice_output_stream;
+    std::shared_ptr<OutputStreamBase> m_base_stream;
     std::shared_ptr<PipelineMultiplexer> m_multiplexer;
-    scheduler_core_op_handle_t m_core_ops_scheduler_handle;
     multiplexer_core_op_handle_t m_core_op_multiplexer_handle;
     std::string m_network_name;
     EventPtr m_read_event;
index 29649988ed582c63694f92391f324aadcac5983b..1e72b242c4e9186b63469c993e6e31c8c6100d2b 100644 (file)
@@ -3,20 +3,14 @@ cmake_minimum_required(VERSION 3.0.0)
 set(SRC_FILES
     ${CMAKE_CURRENT_SOURCE_DIR}/vdma_device.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/vdma_config_core_op.cpp
-    ${CMAKE_CURRENT_SOURCE_DIR}/vdma_config_activated_core_op.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/vdma_config_manager.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/vdma_stream.cpp
-    ${CMAKE_CURRENT_SOURCE_DIR}/vdma_stream_base.cpp
-    ${CMAKE_CURRENT_SOURCE_DIR}/vdma_async_stream.cpp
+    ${CMAKE_CURRENT_SOURCE_DIR}/circular_stream_buffer_pool.cpp
 
     ${CMAKE_CURRENT_SOURCE_DIR}/pcie/pcie_device.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/integrated/integrated_device.cpp
 
-    ${CMAKE_CURRENT_SOURCE_DIR}/channel/channel_state.cpp
-    ${CMAKE_CURRENT_SOURCE_DIR}/channel/channel_base.cpp
-    ${CMAKE_CURRENT_SOURCE_DIR}/channel/buffered_channel.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/channel/boundary_channel.cpp
-    ${CMAKE_CURRENT_SOURCE_DIR}/channel/async_channel.cpp
     ${CMAKE_CURRENT_SOURCE_DIR}/channel/interrupts_dispatcher.cpp
 
     ${CMAKE_CURRENT_SOURCE_DIR}/memory/descriptor_list.cpp
diff --git a/hailort/libhailort/src/vdma/channel/async_channel.cpp b/hailort/libhailort/src/vdma/channel/async_channel.cpp
deleted file mode 100644 (file)
index d104b39..0000000
+++ /dev/null
@@ -1,255 +0,0 @@
-/**\r
- * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.\r
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)\r
-**/\r
-/**\r
- * @file async_channel.cpp\r
- * @brief Implementation of the AsyncChannel class\r
- **/\r
-\r
-#include "async_channel.hpp"\r
-#include "hailo/hailort.h"\r
-#include "hailo/hailort_common.hpp"\r
-\r
-namespace hailort\r
-{\r
-namespace vdma\r
-{\r
-\r
-Expected<AsyncChannelPtr> AsyncChannel::create(vdma::ChannelId channel_id, Direction direction,\r
-    HailoRTDriver &driver, uint32_t descs_count, uint16_t desc_page_size, const std::string &stream_name,\r
-    LatencyMeterPtr latency_meter, uint16_t transfers_per_axi_intr)\r
-{\r
-    hailo_status status = HAILO_UNINITIALIZED;\r
-    auto channel_ptr = make_shared_nothrow<vdma::AsyncChannel>(channel_id, direction, driver, descs_count,\r
-        desc_page_size, stream_name, latency_meter, transfers_per_axi_intr, status);\r
-    CHECK_NOT_NULL_AS_EXPECTED(channel_ptr, HAILO_OUT_OF_HOST_MEMORY);\r
-    CHECK_SUCCESS_AS_EXPECTED(status, "Failed creating AsyncChannel");\r
-    return channel_ptr;\r
-}\r
-\r
-AsyncChannel::AsyncChannel(vdma::ChannelId channel_id, Direction direction, HailoRTDriver &driver,\r
-                           uint32_t descs_count, uint16_t desc_page_size, const std::string &stream_name,\r
-                           LatencyMeterPtr latency_meter, uint16_t transfers_per_axi_intr,\r
-                           hailo_status &status) :\r
-    BoundaryChannel(BoundaryChannel::Type::ASYNC, channel_id, direction, driver, descs_count, desc_page_size,\r
-                    stream_name, latency_meter, transfers_per_axi_intr, status)\r
-{\r
-    // Check that base constructor was successful\r
-    if (HAILO_SUCCESS != status) {\r
-        LOGGER__ERROR("Failed building Vdma Channel base class");\r
-        return;\r
-    }\r
-\r
-    status = HAILO_SUCCESS;\r
-}\r
-\r
-hailo_status AsyncChannel::transfer_async(TransferRequest &&transfer_request)\r
-{\r
-    CHECK_ARG_NOT_NULL(transfer_request.buffer.data());\r
-    CHECK(0 != transfer_request.buffer.size(), HAILO_INVALID_ARGUMENT, "Buffer is empty (size 0)");\r
-\r
-    auto is_new_mapping = true;\r
-    MappedBufferPtr mapped_buffer = nullptr;\r
-    if (transfer_request.mapped_buffer != nullptr) {\r
-        assert(transfer_request.buffer.data() == transfer_request.mapped_buffer->data());\r
-        assert(transfer_request.buffer.size() == transfer_request.mapped_buffer->size());\r
-        CHECK(transfer_request.mapped_buffer->storage().type() == BufferStorage::Type::DMA, HAILO_INVALID_ARGUMENT,\r
-            "Buffer must be dma-able (provided buffer type {})", transfer_request.mapped_buffer->storage().type());\r
-\r
-        // Map if not already mapped\r
-        const auto mapping_direction = (m_direction == Direction::H2D) ? HAILO_DMA_BUFFER_DIRECTION_H2D : HAILO_DMA_BUFFER_DIRECTION_D2H;\r
-        auto is_new_mapping_exp = transfer_request.mapped_buffer->storage().dma_map(m_driver, mapping_direction);\r
-        CHECK_EXPECTED_AS_STATUS(is_new_mapping_exp);\r
-        is_new_mapping = is_new_mapping_exp.release();\r
-\r
-        auto mapped_buffer_exp = transfer_request.mapped_buffer->storage().get_dma_mapped_buffer(m_driver.device_id());\r
-        CHECK_EXPECTED_AS_STATUS(mapped_buffer_exp);\r
-        mapped_buffer = mapped_buffer_exp.release();\r
-    } else {\r
-        auto mapped_buffer_exp = MappedBuffer::create_shared(m_driver, m_direction,\r
-            transfer_request.buffer.size(), transfer_request.buffer.data());\r
-        CHECK_EXPECTED_AS_STATUS(mapped_buffer_exp);\r
-        mapped_buffer = mapped_buffer_exp.release();\r
-    }\r
-\r
-    if (!is_new_mapping) {\r
-        // The buffer has been previously mapped, so it needs to be sync'd from host to device.\r
-        // * If the buffer is mapped H2D/BOTH, then synchronize will make sure the device "sees" the most "up to date"\r
-        //   version of the buffer.\r
-        // * If the buffer is mapped D2H, it might have been changed by the host between the time it was mapped and the\r
-        //   current async transfer. Synchronizing will transfer ownership to the device, so that when the transfer is\r
-        //   complete, the host will "see" an "up to date" version of the buffer.\r
-        auto status = mapped_buffer->synchronize(HailoRTDriver::DmaSyncDirection::TO_DEVICE);\r
-        CHECK_SUCCESS(status);\r
-    }\r
-\r
-    std::lock_guard<RecursiveSharedMutex> state_guard(m_state->mutex());\r
-    if (!m_state->m_is_channel_activated) {\r
-        return HAILO_STREAM_NOT_ACTIVATED;\r
-    }\r
-    if (m_state->m_is_aborted) {\r
-        LOGGER__INFO("Tried to write to aborted channel {}", m_channel_id);\r
-        return HAILO_STREAM_ABORTED_BY_USER;\r
-    }\r
-\r
-    if (Direction::H2D == m_direction) {\r
-        return transfer_h2d(mapped_buffer, transfer_request.callback);\r
-    } else {\r
-        return transfer_d2h(mapped_buffer, transfer_request.callback);\r
-    }\r
-}\r
-\r
-hailo_status AsyncChannel::cancel_pending_transfers()\r
-{\r
-    std::lock_guard<RecursiveSharedMutex> state_guard(m_state->mutex());\r
-    for (auto &pending_buffer_info : m_state->m_pending_buffers) {\r
-        if (pending_buffer_info.on_transfer_done) {\r
-            pending_buffer_info.on_transfer_done(HAILO_STREAM_ABORTED_BY_USER);\r
-            // Release our references to user buffer and callback.\r
-            pending_buffer_info = PendingBuffer{};\r
-        } else {\r
-            LOGGER__WARNING("No transfer done callback found for transfer (channel {}); skipping", m_channel_id);\r
-        }\r
-    }\r
-\r
-    return HAILO_SUCCESS;\r
-}\r
-\r
-hailo_status AsyncChannel::complete_channel_activation(uint32_t /* transfer_size */, bool /* resume_pending_transfers */)\r
-{\r
-    return HAILO_SUCCESS;\r
-}\r
-\r
-hailo_status AsyncChannel::complete_channel_deactivation()\r
-{\r
-    // Note: We don't reset channel counters here as the resource manager will signal pending transfers\r
-    //       (i.e. transfers in m_pending_buffers) via cancel_pending_async_transfers.\r
-    //       The counters are reset in the channel activation\r
-    return HAILO_SUCCESS;\r
-}\r
-\r
-hailo_status AsyncChannel::transfer_sync(void */* buf */, size_t /* count */, std::chrono::milliseconds /* timeout */)\r
-{\r
-    return HAILO_NOT_IMPLEMENTED;\r
-}\r
-\r
-hailo_status AsyncChannel::write_buffer(const MemoryView &/* buffer */, std::chrono::milliseconds /* timeout */,\r
-    const std::function<bool()> &/* should_cancel */)\r
-{\r
-    return HAILO_NOT_IMPLEMENTED;\r
-}\r
-\r
-hailo_status AsyncChannel::send_pending_buffer()\r
-{\r
-    return HAILO_NOT_IMPLEMENTED;\r
-}\r
-\r
-void AsyncChannel::notify_all()\r
-{}\r
-\r
-Expected<BoundaryChannel::BufferState> AsyncChannel::get_buffer_state()\r
-{\r
-    return make_unexpected(HAILO_NOT_IMPLEMENTED);\r
-}\r
-\r
-Expected<size_t> AsyncChannel::get_h2d_pending_frames_count()\r
-{\r
-    return make_unexpected(HAILO_NOT_IMPLEMENTED);\r
-}\r
-\r
-Expected<size_t> AsyncChannel::get_d2h_pending_descs_count()\r
-{\r
-    return make_unexpected(HAILO_NOT_IMPLEMENTED);\r
-}\r
-\r
-hailo_status AsyncChannel::transfer_d2h(MappedBufferPtr mapped_buffer, const InternalTransferDoneCallback &callback)\r
-{\r
-    InterruptsDomain first_desc_interrupts_domain = InterruptsDomain::NONE;\r
-    // Provide FW interrupt only in the end of the last transfer in the batch\r
-    InterruptsDomain last_desc_interrupts_domain = (m_state->m_accumulated_transfers + 1 == m_transfers_per_axi_intr) ?\r
-        InterruptsDomain::BOTH : InterruptsDomain::HOST;\r
-\r
-    const auto status = prepare_descriptors(mapped_buffer, callback, first_desc_interrupts_domain,\r
-        last_desc_interrupts_domain);\r
-    if (HAILO_QUEUE_IS_FULL == status) {\r
-        return status;\r
-    }\r
-    CHECK_SUCCESS(status);\r
-\r
-    m_state->m_accumulated_transfers = (m_state->m_accumulated_transfers + 1) % m_transfers_per_axi_intr;\r
-\r
-    return HAILO_SUCCESS;\r
-}\r
-\r
-hailo_status AsyncChannel::transfer_h2d(MappedBufferPtr mapped_buffer, const InternalTransferDoneCallback &callback)\r
-{\r
-    // For h2d, only the host need to get transfer done interrupts\r
-    InterruptsDomain last_desc_interrupts_domain = InterruptsDomain::HOST;\r
-    // If we measure latency, we need interrupt on the first descriptor\r
-    InterruptsDomain first_desc_interrupts_domain = (m_latency_meter != nullptr) ?\r
-        InterruptsDomain::HOST : InterruptsDomain::NONE;\r
-\r
-    return prepare_descriptors(mapped_buffer, callback, first_desc_interrupts_domain,\r
-        last_desc_interrupts_domain);\r
-}\r
-\r
-hailo_status AsyncChannel::prepare_descriptors(MappedBufferPtr mapped_buffer,\r
-    const InternalTransferDoneCallback &callback, InterruptsDomain first_desc_interrupts_domain,\r
-    InterruptsDomain last_desc_interrupts_domain)\r
-{\r
-    assert(mapped_buffer != nullptr);\r
-\r
-    const auto desired_desc_num = m_desc_list->descriptors_in_buffer(mapped_buffer->size());\r
-    CHECK(desired_desc_num <= MAX_DESCS_COUNT, HAILO_INTERNAL_FAILURE);\r
-    const uint16_t desc_num = static_cast<uint16_t>(desired_desc_num);\r
-\r
-    const auto num_available = get_num_available();\r
-    const auto num_processed = CB_TAIL(m_state->m_descs);\r
-    const auto num_free = CB_AVAIL(m_state->m_descs, num_available, num_processed);\r
-    if (num_free < desc_num) {\r
-        return HAILO_QUEUE_IS_FULL;\r
-    }\r
-\r
-    const auto status = m_desc_list->configure_to_use_buffer(*mapped_buffer, m_channel_id, num_available);\r
-    CHECK_SUCCESS(status);\r
-\r
-    if (nullptr != m_latency_meter) {\r
-        // Program first descriptor\r
-        m_desc_list->program_single_descriptor((*m_desc_list)[num_available], m_desc_list->desc_page_size(),\r
-            first_desc_interrupts_domain);\r
-    }\r
-    auto actual_desc_count = m_desc_list->program_last_descriptor(mapped_buffer->size(), last_desc_interrupts_domain,\r
-        num_available);\r
-    CHECK_EXPECTED_AS_STATUS(actual_desc_count, "Failed to program desc_list for channel {}", m_channel_id);\r
-    assert (actual_desc_count.value() == desc_num);\r
-    assert(desc_num > 0);\r
-    const auto last_desc_avail = static_cast<uint16_t>((num_available + desc_num - 1) & m_state->m_descs.size_mask);\r
-\r
-    const auto wrapped_callback = [this, mapped_buffer, callback](hailo_status callback_status) {\r
-        if (HAILO_SUCCESS != callback_status) {\r
-            // No need to sync, just forward the callback.\r
-            callback(callback_status);\r
-            return;\r
-        }\r
-\r
-        // The device may only change the contents of mapped_buffer, if it was mapped in Direction::D2H\r
-        // (not Direction::BOTH because channels are either D2H or H2D). Hence, we don't need to sync H2D\r
-        // buffers to the host (the host's "view" of the buffer is "up to date").\r
-        if (m_direction == Direction::D2H) {\r
-            auto sync_status = mapped_buffer->synchronize(HailoRTDriver::DmaSyncDirection::TO_HOST);\r
-            if (sync_status != HAILO_SUCCESS) {\r
-                LOGGER__ERROR("Failed to sync buffer to host with status {}", sync_status);\r
-                callback_status = sync_status;\r
-            }\r
-        }\r
-\r
-        callback(callback_status);\r
-    };\r
-\r
-    m_state->add_pending_buffer(num_available, last_desc_avail, m_direction, wrapped_callback, mapped_buffer);\r
-    return inc_num_available(desc_num);\r
-}\r
-\r
-} /* namespace vdma */\r
-} /* namespace hailort */\r
diff --git a/hailort/libhailort/src/vdma/channel/async_channel.hpp b/hailort/libhailort/src/vdma/channel/async_channel.hpp
deleted file mode 100644 (file)
index d2ae258..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-/**\r
- * Copyright (c) 2022 Hailo Technologies Ltd. All rights reserved.\r
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)\r
-**/\r
-/**\r
- * @file async_channel.hpp\r
- * @brief AsyncChannel - Implements the BoundaryChannel interface, allowing for asyc send/recv and zero copy io\r
- **/\r
-\r
-#ifndef _HAILO_ASYNC_CHANNEL_HPP_\r
-#define _HAILO_ASYNC_CHANNEL_HPP_\r
-\r
-#include "hailo/hailort.h"\r
-\r
-#include "vdma/channel/boundary_channel.hpp"\r
-#include "vdma/channel/channel_state.hpp"\r
-\r
-#include <functional>\r
-\r
-\r
-namespace hailort\r
-{\r
-namespace vdma\r
-{\r
-\r
-class AsyncChannel;\r
-using AsyncChannelPtr = std::shared_ptr<AsyncChannel>;\r
-\r
-class AsyncChannel : public BoundaryChannel\r
-{\r
-public:\r
-    static Expected<AsyncChannelPtr> create(vdma::ChannelId channel_id, Direction direction, HailoRTDriver &driver,\r
-        uint32_t descs_count, uint16_t desc_page_size, const std::string &stream_name = "", LatencyMeterPtr latency_meter = nullptr,\r
-        uint16_t transfers_per_axi_intr = 1);\r
-\r
-    AsyncChannel(vdma::ChannelId channel_id, Direction direction, HailoRTDriver &driver, uint32_t descs_count,\r
-        uint16_t desc_page_size, const std::string &stream_name, LatencyMeterPtr latency_meter, uint16_t transfers_per_axi_intr,\r
-        hailo_status &status);\r
-    AsyncChannel(AsyncChannel &&) = delete;\r
-    AsyncChannel(const AsyncChannel &) = delete;\r
-    AsyncChannel &operator=(AsyncChannel &&) = delete;\r
-    AsyncChannel &operator=(const AsyncChannel &) = delete;\r
-    virtual ~AsyncChannel() = default;\r
-\r
-    virtual hailo_status complete_channel_activation(uint32_t transfer_size, bool resume_pending_transfers) override;\r
-    virtual hailo_status complete_channel_deactivation() override;\r
-\r
-    virtual hailo_status transfer_async(TransferRequest &&transfer_request) override;\r
-    virtual hailo_status cancel_pending_transfers() override;\r
-\r
-    virtual hailo_status transfer_sync(void *buf, size_t count, std::chrono::milliseconds timeout) override;\r
-    // TODO: don't want\r
-    virtual hailo_status write_buffer(const MemoryView &buffer, std::chrono::milliseconds timeout,\r
-        const std::function<bool()> &should_cancel) override;\r
-    // TODO: don't want\r
-    virtual hailo_status send_pending_buffer() override;\r
-    // TODO: don't want\r
-    virtual void notify_all() override;\r
-\r
-    // TODO: don't want\r
-    virtual Expected<BoundaryChannel::BufferState> get_buffer_state() override;\r
-    // TODO: don't want\r
-    virtual Expected<size_t> get_h2d_pending_frames_count() override;\r
-    // TODO: don't want\r
-    virtual Expected<size_t> get_d2h_pending_descs_count() override;\r
-\r
-private:\r
-    hailo_status transfer_d2h(MappedBufferPtr mapped_buffer, const InternalTransferDoneCallback &user_callback);\r
-    hailo_status transfer_h2d(MappedBufferPtr mapped_buffer, const InternalTransferDoneCallback &user_callback);\r
-    hailo_status prepare_descriptors(MappedBufferPtr mapped_buffer, const InternalTransferDoneCallback &user_callback,\r
-        InterruptsDomain first_desc_interrupts_domain, InterruptsDomain last_desc_interrupts_domain);\r
-};\r
-\r
-} /* namespace vdma */\r
-} /* namespace hailort */\r
-\r
-#endif /* _HAILO_ASYNC_CHANNEL_HPP_ */\r
index 9a298d0a1c5bf16d1476c55e44a4525f09a5786e..3feb827c529330c80ed3f4dc3b1cd485d3b3f4f6 100644 (file)
@@ -12,8 +12,7 @@
 #include "common/os_utils.hpp"
 
 #include "vdma/channel/boundary_channel.hpp"
-#include "vdma/channel/buffered_channel.hpp"
-#include "vdma/channel/async_channel.hpp"
+#include "vdma/memory/vdma_buffer.hpp"
 
 #include <list>
 #include <chrono>
@@ -27,365 +26,423 @@ namespace vdma {
 
 Expected<BoundaryChannelPtr> BoundaryChannel::create(vdma::ChannelId channel_id, Direction direction,
     HailoRTDriver &driver, uint32_t descs_count, uint16_t desc_page_size, const std::string &stream_name,
-    LatencyMeterPtr latency_meter, uint16_t transfers_per_axi_intr, Type type)
+    LatencyMeterPtr latency_meter)
 {
-    switch (type)
-    {
-    case Type::BUFFERED:
-        {
-            auto buffered_channel = BufferedChannel::create(channel_id, direction, driver, descs_count, desc_page_size,
-                stream_name, latency_meter, transfers_per_axi_intr);
-            CHECK_EXPECTED(buffered_channel);
-
-            // Upcasting
-            return std::static_pointer_cast<BoundaryChannel>(buffered_channel.value());
-        }
-    
-    case Type::ASYNC:
-        {
-            auto async_channel = AsyncChannel::create(channel_id, direction, driver, descs_count, desc_page_size,
-                stream_name, latency_meter, transfers_per_axi_intr);
-            CHECK_EXPECTED(async_channel);
-
-            // Upcasting
-            return std::static_pointer_cast<BoundaryChannel>(async_channel.value());
-        }
-    }
-
-    // Shouldn't get here
-    return make_unexpected(HAILO_INVALID_ARGUMENT);
+    hailo_status status = HAILO_UNINITIALIZED;
+    auto channel_ptr = make_shared_nothrow<BoundaryChannel>(channel_id, direction, driver, descs_count,
+        desc_page_size, stream_name, latency_meter, status);
+    CHECK_NOT_NULL_AS_EXPECTED(channel_ptr, HAILO_OUT_OF_HOST_MEMORY);
+    CHECK_SUCCESS_AS_EXPECTED(status, "Failed creating BoundaryChannel");
+    return channel_ptr;
 }
 
-BoundaryChannel::BoundaryChannel(Type type, vdma::ChannelId channel_id, Direction direction, HailoRTDriver &driver,
+BoundaryChannel::BoundaryChannel(vdma::ChannelId channel_id, Direction direction, HailoRTDriver &driver,
                                  uint32_t descs_count, uint16_t desc_page_size, const std::string &stream_name,
-                                 LatencyMeterPtr latency_meter, uint16_t transfers_per_axi_intr, hailo_status &status) :
-    ChannelBase(channel_id, direction, driver, descs_count, desc_page_size, stream_name, latency_meter, status),
-    m_type(type),
-    m_user_interrupt_callback(ignore_processing_complete),
-    m_transfers_per_axi_intr(transfers_per_axi_intr)
+                                 LatencyMeterPtr latency_meter, hailo_status &status) :
+    m_channel_id(channel_id),
+    m_direction(direction),
+    m_driver(driver),
+    m_host_registers(driver, channel_id, direction),
+    m_desc_list(nullptr),
+    m_stream_name(stream_name),
+    m_latency_meter(latency_meter),
+    m_is_channel_activated(false),
+    m_ongoing_transfers((latency_meter != nullptr) ? ONGOING_TRANSFERS_SIZE/2 : ONGOING_TRANSFERS_SIZE),
+    m_last_bounded_buffer(BoundedBuffer{nullptr, 0, 0})
 {
-    // Check that base constructor was successful
-    if (HAILO_SUCCESS != status) {
-        LOGGER__ERROR("Failed building vdma channel base class");
+    if (Direction::BOTH == direction) {
+        LOGGER__ERROR("Boundary channels must be unidirectional");
+        status = HAILO_INVALID_ARGUMENT;
         return;
     }
 
-    if (Direction::BOTH == direction) {
-        LOGGER__ERROR("Boundary channels must be unidirectional");
+    if (channel_id.channel_index >= VDMA_CHANNELS_PER_ENGINE) {
+        LOGGER__ERROR("Invalid DMA channel index {}", channel_id.channel_index);
         status = HAILO_INVALID_ARGUMENT;
         return;
     }
 
-    if (m_transfers_per_axi_intr == 0) {
-        LOGGER__ERROR("Invalid transfers per axi interrupt");
+    if (channel_id.engine_index >= driver.dma_engines_count()) {
+        LOGGER__ERROR("Invalid DMA engine index {}, max {}", channel_id.engine_index, driver.dma_engines_count());
         status = HAILO_INVALID_ARGUMENT;
         return;
     }
-}
 
-void BoundaryChannel::clear_pending_buffers_descriptors()
-{
-    for (const auto &pending_buffer : m_state->m_pending_buffers) {
-        const auto last_desc_index = pending_buffer.last_desc;
+    CB_INIT(m_descs, descs_count);
 
-        // Clear relevant descriptors from previous transfer
-        if (nullptr != m_latency_meter) {
-            const auto latency_desc_index = pending_buffer.latency_measure_desc;
-            m_desc_list->clear_descriptor(latency_desc_index);
-        }
-        m_desc_list->clear_descriptor(last_desc_index);
+    status = allocate_descriptor_list(descs_count, desc_page_size);
+    if (HAILO_SUCCESS != status) {
+        LOGGER__ERROR("Failed to allocate Vdma buffer for channel transfer! status={}", status);
+        return;
     }
+
+    status = HAILO_SUCCESS;
 }
 
 hailo_status BoundaryChannel::trigger_channel_completion(uint16_t hw_num_processed)
 {
-    PendingBuffersQueue completed_buffers{PENDING_BUFFERS_SIZE};
+    // NOTE: right now, we can retake the 'completion' descriptor for a new transfer before handling the interrupt.
+    //      we should have our own pointers indicating whats free instead of reading from HW.
 
-    {
-        // NOTE: right now, we can retake the 'completion' descriptor for a new transfer before handling the interrupt.
-        //      we should have our own pointers indicating whats free instead of reading from HW.
+    std::unique_lock<std::mutex> lock(m_channel_mutex);
 
-        std::unique_lock<RecursiveSharedMutex> state_guard(m_state->mutex());
+    if (!m_is_channel_activated) {
+        return HAILO_STREAM_NOT_ACTIVATED;
+    }
 
-        if (m_state->m_is_aborted) {
-            return HAILO_STREAM_ABORTED_BY_USER;
-        }
+    // Although the hw_num_processed should be a number between 0 and m_descs.size-1, if m_desc.size < 0x10000
+    // (the maximum desc size), the actual hw_num_processed is a number between 1 and m_descs.size. Therefore the
+    // value can be m_descs.size, in this case we change it to zero.
+    hw_num_processed = static_cast<uint16_t>(hw_num_processed & m_descs.size_mask);
+
+    if (m_latency_meter != nullptr) {
+        // The latency meter gets an updated hw_num_processed via a call to vdma_interrupts_read_timestamps
+        // (the desc index of the last measured timestamp returned from that ioctl). Since update_latency_meter
+        // processed m_ongoing_transfers based on this hw_num_processed, and this function (i.e.
+        // trigger_channel_completion) also processes m_ongoing_transfers based on the value of hw_num_processed,
+        // we want the two to be the same. Hence, we'll use the more up to date num_processed returned by
+        // update_latency_meter.
+        // TODO: fix update_latency_meter flow (HRT-10284)
+        auto latency_meter_hw_num_processed = update_latency_meter();
+        CHECK_EXPECTED_AS_STATUS(latency_meter_hw_num_processed);
+        hw_num_processed = latency_meter_hw_num_processed.value();
+    }
 
-        if (!m_state->m_is_channel_activated) {
-            return HAILO_STREAM_NOT_ACTIVATED;
-        }
+    while (!m_ongoing_transfers.empty()) {
+        // Reading previous_num_processed inside the loop since on_transfer_complete may increase this value.
+        const auto previous_num_processed = static_cast<uint16_t>(CB_TAIL(m_descs));
 
-        // Although the hw_num_processed should be a number between 0 and m_descs.size-1, if m_desc.size < 0x10000
-        // (the maximum desc size), the actual hw_num_processed is a number between 1 and m_descs.size. Therefore the
-        // value can be m_descs.size, in this case we change it to zero.
-        hw_num_processed = static_cast<uint16_t>(hw_num_processed & m_state->m_descs.size_mask);
-
-        if (m_latency_meter != nullptr) {
-            // The latency meter gets an updated hw_num_processed via a call to vdma_interrupts_read_timestamps
-            // (the desc index of the last measured timestamp returned from that ioctl). Since update_latency_meter
-            // processed m_pending_buffers based on this hw_num_processed, and this function (i.e.
-            // trigger_channel_completion) also processes m_pending_buffers based on the value of hw_num_processed,
-            // we want the two to be the same. Hence, we'll use the more up to date num_processed returned by
-            // update_latency_meter.
-            // TODO: fix update_latency_meter flow (HRT-10284)
-            auto latency_meter_hw_num_processed = update_latency_meter();
-            CHECK_EXPECTED_AS_STATUS(latency_meter_hw_num_processed);
-            hw_num_processed = latency_meter_hw_num_processed.value();
+        if (!is_transfer_complete(m_ongoing_transfers.front(), previous_num_processed, hw_num_processed)) {
+            break;
         }
 
-        const auto previous_num_processed = static_cast<uint16_t>(CB_TAIL(m_state->m_descs));
-
-        // Calculate pending_buffers_count before iteration, because the iteration removes done transfers.
-        const auto pending_buffers_count = m_state->m_pending_buffers.size();
-        for (size_t i = 0; i < pending_buffers_count; i++) {
-            if (!is_complete(m_state->m_pending_buffers.front(), previous_num_processed, hw_num_processed)) {
-                break;
-            }
+        auto transfer = std::move(m_ongoing_transfers.front());
+        m_ongoing_transfers.pop_front();
 
-            // Move item from pending_buffers to completed_buffers
-            completed_buffers.push_back(std::move(m_state->m_pending_buffers.front()));
-            m_state->m_pending_buffers.pop_front();
-        }
-    }
+        hailo_status complete_status = HAILO_SUCCESS;
 
-    // completed_buffers were copied from m_pending_buffers inside the lock. Now we are free to process them and call
-    // the right completion callbacks without state mutex held.
-    for (auto &pending_buffer : completed_buffers) {
-        on_pending_buffer_irq(pending_buffer);
-    }
+        #ifndef NDEBUG
+            auto &last_desc = (*m_desc_list)[transfer.last_desc];
+            if (!last_desc.is_done() || last_desc.is_error()) {
+                LOGGER__ERROR("Error while processing descriptor {} of DMA {} on device {} DESC_STATUS=0x{:x}.",
+                    transfer.last_desc, m_channel_id, m_driver.device_id(), last_desc.status());
+                complete_status = HAILO_INTERNAL_FAILURE;
+            }
+        #endif
 
-    if (!completed_buffers.empty()) {
-        m_state->transfer_buffer_cv().notify_all();
+        on_transfer_complete(lock, transfer, complete_status);
     }
 
     return HAILO_SUCCESS;
 }
 
-void BoundaryChannel::register_interrupt_callback(const ProcessingCompleteCallback &callback)
-{
-    std::lock_guard<RecursiveSharedMutex> state_guard(m_state->mutex());
-    m_user_interrupt_callback = callback;
-}
-
-CONTROL_PROTOCOL__host_buffer_info_t BoundaryChannel::get_boundary_buffer_info(uint32_t transfer_size)
+CONTROL_PROTOCOL__host_buffer_info_t BoundaryChannel::get_boundary_buffer_info(uint32_t transfer_size) const
 {
     // Boundary channels always have scatter gather buffers
     return VdmaBuffer::get_host_buffer_info(VdmaBuffer::Type::SCATTER_GATHER, m_desc_list->dma_address(), 
         m_desc_list->desc_page_size(), m_desc_list->count(), transfer_size);
 }
 
-hailo_status BoundaryChannel::abort()
+hailo_status BoundaryChannel::activate()
 {
-    {
-        std::lock_guard<RecursiveSharedMutex> state_guard(m_state->mutex());
-        m_state->m_is_aborted = true;
-    }
+    std::lock_guard<std::mutex> lock(m_channel_mutex);
 
-    m_state->transfer_buffer_cv().notify_all();
+    CHECK(!m_is_channel_activated, HAILO_INTERNAL_FAILURE,
+        "Vdma channel {} is already activated", m_channel_id);
+    m_is_channel_activated = true;
+    assert(m_ongoing_transfers.empty());
+    m_last_timestamp_num_processed = 0;
+    CB_RESET(m_descs);
 
     return HAILO_SUCCESS;
 }
 
-hailo_status BoundaryChannel::clear_abort()
+hailo_status BoundaryChannel::deactivate()
 {
-    std::lock_guard<RecursiveSharedMutex> state_guard(m_state->mutex());
-    m_state->m_is_aborted = false;
+    std::unique_lock<std::mutex> lock(m_channel_mutex);
+    m_is_channel_activated = false;
+
+    // Note: OngoingTransfers held by m_ongoing_transfers may still hold copies of the current callback
+    // which in turn holds a reference to *this. Since we deactivate the channel there's no risk that
+    // these callbacks will be called and we don't need to reset this callback.
 
     return HAILO_SUCCESS;
 }
 
-hailo_status BoundaryChannel::activate(uint32_t transfer_size, bool resume_pending_transfers)
+hailo_status BoundaryChannel::launch_transfer(TransferRequest &&transfer_request, bool user_owns_buffer)
 {
-    std::lock_guard<RecursiveSharedMutex> state_guard(m_state->mutex());
+    std::unique_lock<std::mutex> lock(m_channel_mutex);
+    if (!m_is_channel_activated) {
+        return HAILO_STREAM_NOT_ACTIVATED;
+    }
 
-    CHECK(!m_state->m_is_channel_activated, HAILO_INTERNAL_FAILURE,
-        "Vdma channel {} is already activated", m_channel_id);
-    m_state->m_is_channel_activated = true;
-    clear_pending_buffers_descriptors();
-    m_state->reset_counters();
+    if (m_ongoing_transfers.size() >= get_max_ongoing_transfers(transfer_request.buffer.size())) {
+        return HAILO_QUEUE_IS_FULL;
+    }
 
-    auto status = complete_channel_activation(transfer_size, resume_pending_transfers);
-    if (HAILO_SUCCESS != status) {
-        m_state->m_is_channel_activated = false;
-        return status;
+    auto mapped_buffer_exp = transfer_request.buffer.map_buffer(m_driver, m_direction);
+    CHECK_EXPECTED_AS_STATUS(mapped_buffer_exp);
+    auto mapped_buffer = mapped_buffer_exp.release();
+
+    // Syncing the buffer to device change its ownership from host to the device.
+    // We sync on D2H as well if the user owns the buffer since the buffer might have been changed by
+    // the host between the time it was mapped and the current async transfer. If the buffer is not owned by the user,
+    // it won't be accessed for write.
+    if ((Direction::H2D == m_direction) || user_owns_buffer) {
+        auto status = transfer_request.buffer.synchronize(m_driver, HailoRTDriver::DmaSyncDirection::TO_DEVICE);
+        CHECK_SUCCESS(status);
     }
 
-    return HAILO_SUCCESS;
-}
+    const auto desired_desc_num = m_desc_list->descriptors_in_buffer(transfer_request.buffer.size());
+    CHECK(desired_desc_num <= MAX_DESCS_COUNT, HAILO_INTERNAL_FAILURE);
+    const uint16_t desc_num = static_cast<uint16_t>(desired_desc_num);
 
-hailo_status BoundaryChannel::deactivate()
-{
-    std::unique_lock<RecursiveSharedMutex> state_guard(m_state->mutex());
-    {
-        CHECK(m_state->m_is_channel_activated, HAILO_INTERNAL_FAILURE,
-            "Vdma channel {} is not activated", m_channel_id);
-        m_state->m_is_channel_activated = false;
+    const auto num_available = get_num_available();
+    const auto last_desc_avail = static_cast<uint16_t>((num_available + desc_num - 1) & m_descs.size_mask);
 
-        // Note: PendingBuffers held by m_pending_buffers may still hold copies of the current m_transfer_done_callback,
-        //       which in turn holds a reference to *this. Since we stop the m_wait_interrupts_thread there's no risk that
-        //       these callbacks will be called and we don't need to reset this callback.
+    auto status = prepare_descriptors(transfer_request.buffer.size(), num_available, mapped_buffer,
+        transfer_request.buffer.offset());
+    CHECK_SUCCESS(status);
 
-        auto status = complete_channel_deactivation();
-        CHECK_SUCCESS(status);
-    }
-    m_state->m_can_transfer_buffer_cv.notify_all();
+    add_ongoing_transfer(std::move(transfer_request), num_available, last_desc_avail);
+
+    status = inc_num_available(desc_num);
+    CHECK_SUCCESS(status);
 
     return HAILO_SUCCESS;
 }
 
-BoundaryChannel::Type BoundaryChannel::type() const
+void BoundaryChannel::cancel_pending_transfers()
 {
-    return m_type;
+    std::unique_lock<std::mutex> lock(m_channel_mutex);
+    while (!m_ongoing_transfers.empty()) {
+        auto transfer = std::move(m_ongoing_transfers.front());
+        m_ongoing_transfers.pop_front();
+
+        on_transfer_complete(lock, transfer, HAILO_STREAM_ABORTED_BY_USER);
+    }
 }
 
-hailo_status BoundaryChannel::set_transfers_per_axi_intr(uint16_t transfers_per_axi_intr)
+size_t BoundaryChannel::get_max_ongoing_transfers(size_t transfer_size) const
 {
-    CHECK(0 != transfers_per_axi_intr, HAILO_INVALID_ARGUMENT, "Invalid transfers per axi interrupt");
-    m_transfers_per_axi_intr = transfers_per_axi_intr;
-    return HAILO_SUCCESS;
+    const auto descs_in_transfer = m_desc_list->descriptors_in_buffer(transfer_size);
+    const auto descs_count = CB_SIZE(m_descs);
+    size_t max_transfers_in_buffer = (descs_count - 1) / descs_in_transfer;
+
+    return std::min(max_transfers_in_buffer, m_ongoing_transfers.capacity());
 }
 
-hailo_status BoundaryChannel::flush(const std::chrono::milliseconds &timeout)
+Expected<uint16_t> BoundaryChannel::update_latency_meter()
 {
-    if (Direction::D2H == m_direction) {
-        // We are not buffering user data
-        return HAILO_SUCCESS;
+    uint16_t last_num_processed = m_last_timestamp_num_processed;
+
+    auto timestamp_list = m_driver.vdma_interrupts_read_timestamps(m_channel_id);
+    CHECK_EXPECTED(timestamp_list);
+
+    if (0 == timestamp_list->count) {
+        // No new timestamps for this channel, return the previous result
+        return Expected<uint16_t>(last_num_processed);
     }
 
-    std::unique_lock<RecursiveSharedMutex> state_guard(m_state->mutex());
-    hailo_status status = HAILO_SUCCESS; // Best effort
-    bool was_successful = m_state->transfer_buffer_cv().wait_for(state_guard, timeout, [this, &status] () {
-        if (m_state->m_is_aborted) {
-            status = HAILO_STREAM_ABORTED_BY_USER;
-            return true; // return true so that the wait will finish
-        }
-        if (!m_state->m_is_channel_activated) {
-            status = HAILO_STREAM_NOT_ACTIVATED;
-            return true; // return true so that the wait will finish
+    // TODO: now we have more iterations than we need. We know that the pending buffers + the timestamp list
+    // are ordered. If ongoing_transfers[i] is not in any of the timestamps_list[0, 1, ... k], then
+    // also ongoing_transfers[i+1,i+2,...]
+    // not in those timestamps
+
+    for (const auto &transfer : m_ongoing_transfers) {
+        uint16_t latency_desc = static_cast<uint16_t>(transfer.latency_measure_desc);
+        for (size_t i = 0; i < timestamp_list->count; i++) {
+            const auto &irq_timestamp = timestamp_list->timestamp_list[i];
+            const auto desc_num_processed = static_cast<uint16_t>(irq_timestamp.desc_num_processed & m_descs.size_mask);
+            if (is_desc_between(last_num_processed, desc_num_processed, latency_desc)) {
+                if (m_direction == Direction::H2D) {
+                    m_latency_meter->add_start_sample(irq_timestamp.timestamp);
+                }
+                else {
+                    m_latency_meter->add_end_sample(m_stream_name, irq_timestamp.timestamp);
+                }
+                break;
+            }
         }
-        return m_state->m_pending_buffers.empty();
-    });
-    CHECK(was_successful, HAILO_TIMEOUT, "Got HAILO_TIMEOUT while waiting for channel {} interrupts on flush", m_channel_id);
-    return status;
-}
+    }
 
-bool BoundaryChannel::is_ready_for_transfer_h2d(size_t buffer_size)
-{
-    return has_room_in_desc_list(buffer_size);
+    m_last_timestamp_num_processed = static_cast<uint16_t>(
+        timestamp_list->timestamp_list[timestamp_list->count-1].desc_num_processed & m_descs.size_mask);
+    return Expected<uint16_t>(m_last_timestamp_num_processed);
 }
 
-bool BoundaryChannel::is_ready_for_transfer_d2h(size_t buffer_size)
+bool BoundaryChannel::is_transfer_complete(const OngoingTransfer &transfer, uint16_t previous_num_processed,
+    uint16_t current_num_processed) const
 {
-    return has_room_in_desc_list(buffer_size);
+    // Transfer is complete if its last descriptor is in [previous_num_processed, current_num_processed) or
+    // the the buffer is empty (previous_num_processed == get_num_available())
+    return is_desc_between(previous_num_processed, current_num_processed, transfer.last_desc) ||
+        (current_num_processed == get_num_available());
 }
 
-bool BoundaryChannel::has_room_in_desc_list(size_t buffer_size)
+void BoundaryChannel::on_transfer_complete(std::unique_lock<std::mutex> &lock,
+    OngoingTransfer &transfer, hailo_status complete_status)
 {
-    size_t desired_desc_num = m_desc_list->descriptors_in_buffer(buffer_size);
-    assert(desired_desc_num <= MAX_DESCS_COUNT);
-    int desc_num = static_cast<int>(desired_desc_num);
+    // Clear relevant descriptors from previous transfer
+    if (nullptr != m_latency_meter) {
+        m_desc_list->clear_descriptor(transfer.latency_measure_desc);
+    }
+    m_desc_list->clear_descriptor(transfer.last_desc);
+
+    // We increase desc num_proc (can happen only in this flow). After it is increased -
+    //  1. On D2H channels - the output can be read by the user.
+    //  2. On H2D channels - new input can be written to the buffer.
+    _CB_SET(m_descs.tail, (transfer.last_desc + 1) & m_descs.size_mask);
+
+    // Finally, we notify user callbacks registered with the transfer.
+    // We want to make sure that the callbacks are called after the descriptors can be reused (So the user will
+    // be able to start new transfer).
+    lock.unlock();
+
+    if (Direction::D2H == m_direction) {
+        auto sync_status = transfer.request.buffer.synchronize(m_driver, HailoRTDriver::DmaSyncDirection::TO_HOST);
+        if (HAILO_SUCCESS != sync_status) {
+            LOGGER__ERROR("Failed to sync buffer for output channel {} device {}", m_channel_id, m_driver.device_id());
+            if (HAILO_SUCCESS != complete_status) {
+                complete_status = sync_status;
+            }
+        }
 
-    if (m_state->m_pending_buffers.full()) {
-        return false;
     }
 
-    int num_available = get_num_available();
-    int num_processed = CB_TAIL(m_state->m_descs);
+    transfer.request.callback(complete_status);
+    lock.lock();
+}
 
-    if (desc_num == m_state->m_descs.size) {
-        // Special case when the checking if the buffer is empty
-        return num_available == num_processed;
+hailo_status BoundaryChannel::prepare_descriptors(size_t transfer_size, uint16_t starting_desc,
+    MappedBufferPtr mapped_buffer, size_t buffer_offset)
+{
+    if (mapped_buffer != nullptr) {
+        CHECK((buffer_offset % m_desc_list->desc_page_size()) == 0, HAILO_INTERNAL_FAILURE,
+            "Buffer offset {} must be desc page size aligned {}", buffer_offset, m_desc_list->desc_page_size());
+        const size_t buffer_offset_in_descs = buffer_offset / m_desc_list->desc_page_size();
+        if (!is_buffer_already_configured(mapped_buffer, buffer_offset_in_descs, starting_desc)) {
+            // We need to configure the buffer now.
+
+            // First, store information on the buffer.
+            m_last_bounded_buffer.buffer = mapped_buffer;
+            m_last_bounded_buffer.starting_desc = starting_desc;
+            m_last_bounded_buffer.buffer_offset_in_descs = static_cast<uint16_t>(buffer_offset_in_descs);
+
+            // Now we want that m_desc_list[starting_desc] will be mapped into mapped_buffer[buffer_offset].
+            // The descriptors list configure always starts from buffer_offset=0, so in order to achieve our
+            // configuration, we configure the buffer starting from desc=(starting_desc - buffer_offset_in_desc).
+            // Then, after configuring buffer_offset bytes from the buffer, the desc_index will be starting desc.
+            const int desc_diff = static_cast<int>(starting_desc) - static_cast<int>(buffer_offset_in_descs);
+            const auto configure_starting_desc = static_cast<uint16_t>(m_descs.size + desc_diff) % m_descs.size;
+
+            // Finally do the actual configuration.
+            auto status = m_desc_list->configure_to_use_buffer(*mapped_buffer, m_channel_id, configure_starting_desc);
+            CHECK_SUCCESS(status);
+        }
     }
 
-    int num_free = CB_AVAIL(m_state->m_descs, num_available, num_processed);
-    if (num_free < desc_num) {
-        return false;
+    if ((nullptr != m_latency_meter) && (m_direction == Direction::H2D)) {
+        // If we measure latency, we need an interrupt on the first descriptor for each H2D channel.
+        m_desc_list->program_single_descriptor((*m_desc_list)[starting_desc], m_desc_list->desc_page_size(),
+            InterruptsDomain::HOST);
     }
+    auto last_desc_interrupts_domain = InterruptsDomain::HOST;
+    // TODO: HRT-11188 - fix starting_desc parameter
+    auto actual_desc_count = m_desc_list->program_last_descriptor(transfer_size, last_desc_interrupts_domain,
+        starting_desc);
+    CHECK_EXPECTED_AS_STATUS(actual_desc_count, "Failed to program desc_list for channel {}", m_channel_id);
 
-    return true;
+    return HAILO_SUCCESS;
 }
 
-hailo_status BoundaryChannel::wait(size_t buffer_size, std::chrono::milliseconds timeout,
-    bool stop_if_deactivated)
+bool BoundaryChannel::is_buffer_already_configured(MappedBufferPtr buffer, size_t buffer_offset_in_descs,
+    size_t starting_desc) const
 {
-    std::unique_lock<RecursiveSharedMutex> state_guard(m_state->mutex());
-    assert(state_guard.owns_lock());
-
-    const auto max_transfer_size = m_desc_list->desc_page_size() * m_desc_list->count();
-    CHECK(buffer_size < max_transfer_size, HAILO_INVALID_ARGUMENT,
-        "Requested transfer size ({}) must be smaller than ({})", buffer_size, max_transfer_size);
-
-    auto is_ready_for_transfer = (Direction::H2D == m_direction) ?
-        std::bind(&BoundaryChannel::is_ready_for_transfer_h2d, this, buffer_size) :
-        std::bind(&BoundaryChannel::is_ready_for_transfer_d2h, this, buffer_size);
-
-    auto status = HAILO_SUCCESS; // Best effort
-    bool was_successful = m_state->transfer_buffer_cv().wait_for(state_guard, timeout,
-        [this, is_ready_for_transfer, stop_if_deactivated, &status] () {
-            if (m_state->m_is_aborted) {
-                status = HAILO_STREAM_ABORTED_BY_USER;
-                return true; // return true so that the wait will finish
-            }
-            if (stop_if_deactivated && !m_state->m_is_channel_activated) {
-                status = HAILO_STREAM_NOT_ACTIVATED;
-                return true; // return true so that the wait will finish
-            }
+    if (m_last_bounded_buffer.buffer != buffer) {
+        // Last buffer is nullptr or not the same as the given.
+        return false;
+    }
 
-            return is_ready_for_transfer();
-        }
-    );
-    CHECK(was_successful, HAILO_TIMEOUT, "Got HAILO_TIMEOUT while waiting for channel {} interrupts", m_channel_id);
-    return status;
+    // If the diff between starting_desc and m_last_bounded_buffer.starting_desc and the diff between
+    // buffer_offset_in_descs - m_last_bounded_buffer.buffer_offset_in_descs are equal, it means that the buffer is
+    // already configured.
+    // Note that we don't afraid of overflow since buffer_offset_in_descs * desc_page_size() must fit inside the buffer.
+    const auto starting_desc_diff = (starting_desc - m_last_bounded_buffer.starting_desc) % m_descs.size;
+    const auto buffer_offset_diff_in_descs = (buffer_offset_in_descs - m_last_bounded_buffer.buffer_offset_in_descs) % m_descs.size;
+    return starting_desc_diff == buffer_offset_diff_in_descs;
 }
 
-bool BoundaryChannel::is_complete(const PendingBuffer &pending_buffer, uint16_t previous_num_processed,
-    uint16_t current_num_processed)
+void BoundaryChannel::add_ongoing_transfer(TransferRequest &&transfer_request, uint16_t first_desc, uint16_t last_desc)
 {
-    // Transfer is complete if its last descriptor is in [previous_num_processed, current_num_processed) or
-    // the the buffer is empty (previous_num_processed == get_num_available())
-    return is_desc_between(previous_num_processed, current_num_processed, pending_buffer.last_desc) ||
-        (current_num_processed == get_num_available());
+    OngoingTransfer transfer{};
+    transfer.request = std::move(transfer_request);
+    transfer.last_desc = last_desc;
+    transfer.latency_measure_desc = (m_direction == HailoRTDriver::DmaDirection::H2D) ? first_desc : last_desc;
+    m_ongoing_transfers.push_back(std::move(transfer));
 }
 
+hailo_status BoundaryChannel::inc_num_available(uint16_t value)
+{
+    int num_available = get_num_available();
+    int num_processed = CB_TAIL(m_descs);
+    int num_free = CB_AVAIL(m_descs, num_available, num_processed);
+    if (value > num_free) {
+        return HAILO_OUT_OF_DESCRIPTORS;
+    }
+
+    CB_ENQUEUE(m_descs, value);
+    num_available = (num_available + value) & m_descs.size_mask;
+
+    return m_host_registers.set_num_available(static_cast<uint16_t>(num_available));
+}
 
-void BoundaryChannel::on_pending_buffer_irq(PendingBuffer &pending_buffer)
+bool BoundaryChannel::is_desc_between(uint16_t begin, uint16_t end, uint16_t desc)
 {
-#ifndef NDEBUG
-    auto &last_desc = (*m_desc_list)[pending_buffer.last_desc];
-    if (!last_desc.is_done() || last_desc.is_error()) {
-        LOGGER__ERROR("Error while processing descriptor {} of DMA {} on device {} DESC_STATUS=0x{:x}.",
-            pending_buffer.last_desc, m_channel_id, m_driver.device_id(), last_desc.status());
-        pending_buffer.on_transfer_done(HAILO_INTERNAL_FAILURE);
-        return;
+    if (begin == end) {
+        // There is nothing between
+        return false;
     }
-#endif
+    if (begin < end) {
+        // desc needs to be in [begin, end)
+        return (begin <= desc) && (desc < end);
+    }
+    else {
+        // desc needs to be in [0, end) or [begin, m_descs.size()-1]
+        return (desc < end) || (begin <= desc);
+    }
+}
 
-    {
-        std::unique_lock<RecursiveSharedMutex> state_guard(m_state->mutex());
+uint16_t BoundaryChannel::get_num_available() const
+{
+    uint16_t num_available = (uint16_t)CB_HEAD(m_descs);
 
-        // First, we want to call m_user_interrupt_callback. This callback is meant to be called right after we
-        // got an interrupt and before the user can read the frame or write a new frame.
-        // We call this callback inside the lock to make sure it wont be called when the channel is aborted.
-        if (!m_state->m_is_aborted) {
-            m_user_interrupt_callback();
-        }
+#ifndef NDEBUG
+    // Validate synchronization with HW
+    auto hw_num_avail = m_host_registers.get_num_available();
+    assert(hw_num_avail);
 
-        // Then we increase desc num_proc (can happen only in this flow). After it is increased -
-        //  1. On D2H channels - the output can be read by the user.
-        //  2. On H2D channels - new input can be written to the buffer.
-        // Clear relevant descriptors from previous transfer
-        if (nullptr != m_latency_meter) {
-            m_desc_list->clear_descriptor(pending_buffer.latency_measure_desc);
-        }
-        m_desc_list->clear_descriptor(pending_buffer.last_desc);
+    // On case of channel aborted, the num_available is set to 0 (so we don't accept sync)
+    auto is_aborted_exp = m_host_registers.is_aborted();
+    assert(is_aborted_exp);
 
-        _CB_SET(m_state->m_descs.tail, (pending_buffer.last_desc + 1) & m_state->m_descs.size_mask);
+    if (!is_aborted_exp.value()) {
+        assert(hw_num_avail.value() == num_available);
     }
+#endif
+    return num_available;
+}
 
-    // Finally, we notify user callbacks registered with the transfer.
-    // We want to make sure that the callbacks are called after the descriptors can be reused (So the user will
-    // be able to start new transfer).
-    pending_buffer.on_transfer_done(HAILO_SUCCESS);
+hailo_status BoundaryChannel::allocate_descriptor_list(uint32_t descs_count, uint16_t desc_page_size)
+{
+    static const bool CIRCULAR = true;
+    auto desc_list_exp = DescriptorList::create(descs_count, desc_page_size, CIRCULAR, m_driver);
+    CHECK_EXPECTED_AS_STATUS(desc_list_exp);
+
+    m_desc_list = make_shared_nothrow<DescriptorList>(desc_list_exp.release());
+    CHECK_NOT_NULL(m_desc_list, HAILO_OUT_OF_HOST_MEMORY);
+
+    return HAILO_SUCCESS;
 }
 
 } /* namespace vdma */
index 6b7580e521aa8b4aada1993a3f4272725da3bd6f..e2533d5ded94816735bd72dd32a588dee0f52d33 100644 (file)
@@ -4,24 +4,20 @@
  **/
 /**
  * @file boundary_channel.hpp
- * @brief BoundaryChannel - vdma boundary channel interface
- *      The hierarchy is as follows:
- *        -------------------------------------------------------------------------
- *        |             ChannelBase               | (Base class - includes state) |
- *        |                   |                   |                               |
- *        |           BoundaryChannel             | (Boundary interface)          |
- *        |            /           \              |                               |
- *        | AsyncChannel       BufferedChannel    | (Impls)                       |
- *        -------------------------------------------------------------------------
+ * @brief BoundaryChannel - vdma boundary channel
  **/
 
 #ifndef _HAILO_VDMA_BOUNDARY_CHANNEL_HPP_
 #define _HAILO_VDMA_BOUNDARY_CHANNEL_HPP_
 
-#include "hailo/hailort.h"
-#include "hailo/stream.hpp"
+#include "vdma/channel/vdma_channel_regs.hpp"
+#include "vdma/channel/channel_id.hpp"
+#include "vdma/memory/descriptor_list.hpp"
+#include "stream_common/transfer_common.hpp"
 
-#include "vdma/channel/channel_base.hpp"
+#include "common/latency_meter.hpp"
+
+#include "context_switch_defs.h"
 
 #include <memory>
 
 namespace hailort {
 namespace vdma {
 
+struct OngoingTransfer {
+    TransferRequest request;
+    uint16_t last_desc;
+    uint16_t latency_measure_desc;
+};
+
 class BoundaryChannel;
 using BoundaryChannelPtr = std::shared_ptr<BoundaryChannel>;
-
-using ProcessingCompleteCallback = std::function<void()>;
-
-class BoundaryChannel : public ChannelBase
+class BoundaryChannel final
 {
 public:
-    enum class Type
-    {
-        BUFFERED = 0,
-        ASYNC
-    };
+    using Direction = HailoRTDriver::DmaDirection;
 
     static Expected<BoundaryChannelPtr> create(vdma::ChannelId channel_id, Direction direction, HailoRTDriver &driver,
-        uint32_t descs_count, uint16_t desc_page_size, const std::string &stream_name = "", LatencyMeterPtr latency_meter = nullptr,
-        uint16_t transfers_per_axi_intr = 1, Type type = Type::BUFFERED);
+        uint32_t descs_count, uint16_t desc_page_size, const std::string &stream_name = "", LatencyMeterPtr latency_meter = nullptr);
 
-    BoundaryChannel(Type type, vdma::ChannelId channel_id, Direction direction, HailoRTDriver &driver, uint32_t descs_count,
-        uint16_t desc_page_size, const std::string &stream_name, LatencyMeterPtr latency_meter, uint16_t transfers_per_axi_intr,
+    BoundaryChannel(vdma::ChannelId channel_id, Direction direction, HailoRTDriver &driver, uint32_t descs_count,
+        uint16_t desc_page_size, const std::string &stream_name, LatencyMeterPtr latency_meter,
         hailo_status &status);
     BoundaryChannel(const BoundaryChannel &other) = delete;
     BoundaryChannel &operator=(const BoundaryChannel &other) = delete;
@@ -57,92 +51,97 @@ public:
     virtual ~BoundaryChannel() = default;
 
     // Called after the FW activated the channel.
-    hailo_status activate(uint32_t transfer_size, bool resume_pending_transfers);
+    hailo_status activate();
 
     // Called before the FW deactivated the channel.
     hailo_status deactivate();
 
-    Type type() const;
-    hailo_status set_transfers_per_axi_intr(uint16_t transfers_per_axi_intr);
-
-    void clear_pending_buffers_descriptors();
     hailo_status trigger_channel_completion(uint16_t hw_num_processed);
 
-    // Register some new interrupt callback (and reset previous).
-    // Note - when reseting an old callback, it may still be called (until interrupts are stopped).
-    void register_interrupt_callback(const ProcessingCompleteCallback &callback);
-
-    CONTROL_PROTOCOL__host_buffer_info_t get_boundary_buffer_info(uint32_t transfer_size);
-    virtual hailo_status abort();
-    virtual hailo_status clear_abort();
-
-    // For D2H channels, we don't buffer data
-    // Hence there's nothing to be "flushed" and the function will return with HAILO_SUCCESS
-    virtual hailo_status flush(const std::chrono::milliseconds &timeout);
-
-    // Blocks until buffer_size bytes can transferred to/from the channel or until timeout has elapsed.
-    // If stop_if_deactivated is true, this function will return HAILO_STREAM_NOT_ACTIVATED after deactivate()
-    // is called. Otherwise, this function can be used to access the buffer while the channel is not active.
-    hailo_status wait(size_t buffer_size, std::chrono::milliseconds timeout, bool stop_if_deactivated=false);
-
-    // Transfers count bytes to/from buf via the channel.
-    // Blocks until the transfer can be registered or timeout has elapsed. Hence, calling 'wait(buffer_size, timeout)'
-    // prior to 'transfer(buf, buffer_size)' is redundant.
-    virtual hailo_status transfer_sync(void *buf, size_t count, std::chrono::milliseconds timeout) = 0;
-
-    // TODO: can write_buffer + send_pending_buffer move to BufferedChannel? (HRT-9105)
-    // Either write_buffer + send_pending_buffer or transfer (h2d) should be used on a given channel, not both
-    virtual hailo_status write_buffer(const MemoryView &buffer, std::chrono::milliseconds timeout,
-        const std::function<bool()> &should_cancel) = 0;
-    virtual hailo_status send_pending_buffer() = 0;
-
-    // When the transfer is complete (i.e. data is written to/from buffer with a D2H/H2D channel) callback is called
-    // transfer_request.buffer can't be freed/changed until callback is called.
-    virtual hailo_status transfer_async(TransferRequest &&transfer_request) = 0;
-
     // Calls all pending transfer callbacks (if they exist), marking them as canceled by passing
     // HAILO_STREAM_ABORTED_BY_USER as a status to the callbacks.
     // Note: This function is to be called on a deactivated channel object. Calling on an active channel will lead to
     // unexpected results
-    virtual hailo_status cancel_pending_transfers() = 0;
+    void cancel_pending_transfers();
 
-    virtual void notify_all() = 0;
+    // user_owns_buffer is set when the buffer is owned by the user (otherwise we may have some assumtions).
+    hailo_status launch_transfer(TransferRequest &&transfer_request, bool user_owns_buffer);
 
-    class BufferState {
-    public:
-        std::vector<std::pair<uint16_t, Buffer>> desc_buffer_pairing;
-        uint16_t num_avail;
-        uint16_t num_processed;
-        uint16_t hw_num_avail;
-        uint16_t hw_num_processed;
-    };
+    size_t get_max_ongoing_transfers(size_t transfer_size) const;
 
-    // Assumes that the channel is idle; doesn't block changes to the channel
-    // To be used for debugging purposes
-    // TODO: these will move to BufferedChannel (HRT-9105)
-    virtual Expected<BufferState> get_buffer_state() = 0;
-    virtual Expected<size_t> get_h2d_pending_frames_count() = 0;
-    virtual Expected<size_t> get_d2h_pending_descs_count() = 0;
+    CONTROL_PROTOCOL__host_buffer_info_t get_boundary_buffer_info(uint32_t transfer_size) const;
 
-protected:
-    static void ignore_processing_complete() {}
-    void stop_interrupts_thread(std::unique_lock<RecursiveSharedMutex> &lock);
-    virtual bool is_ready_for_transfer_h2d(size_t buffer_size);
-    virtual bool is_ready_for_transfer_d2h(size_t buffer_size);
+    vdma::ChannelId get_channel_id() const
+    {
+        return m_channel_id;
+    }
 
-    // Called after activate/deactivate with the state mutex held
-    virtual hailo_status complete_channel_activation(uint32_t transfer_size, bool resume_pending_transfers) = 0;
-    virtual hailo_status complete_channel_deactivation() = 0;
+    const std::string &stream_name() const
+    {
+        return m_stream_name;
+    }
 
-    const Type m_type;
-    ProcessingCompleteCallback m_user_interrupt_callback;
-    uint16_t m_transfers_per_axi_intr;
+    std::shared_ptr<DescriptorList> get_desc_list()
+    {
+        return m_desc_list;
+    }
 
 private:
-    bool has_room_in_desc_list(size_t buffer_size);
-    bool is_complete(const PendingBuffer &pending_buffer, uint16_t previous_num_processed,
-        uint16_t current_num_processed);
-    void on_pending_buffer_irq(PendingBuffer &buffer);
+    static void empty_transfer_done_callback(hailo_status){}
+
+    // Returns the desc index of the last desc whose timestamp was measured in the driver
+    Expected<uint16_t> update_latency_meter();
+
+    bool is_transfer_complete(const OngoingTransfer &transfer, uint16_t previous_num_processed,
+        uint16_t current_num_processed) const;
+    void on_transfer_complete(std::unique_lock<std::mutex> &lock, OngoingTransfer &transfer,
+        hailo_status complete_status);
+    hailo_status prepare_descriptors(size_t transfer_size, uint16_t starting_desc,
+        MappedBufferPtr mapped_buffer, size_t buffer_offset);
+
+    bool is_buffer_already_configured(MappedBufferPtr buffer, size_t buffer_offset_in_descs, size_t starting_desc) const;
+    void add_ongoing_transfer(TransferRequest &&transfer_request, uint16_t first_desc, uint16_t last_desc);
+
+    static bool is_desc_between(uint16_t begin, uint16_t end, uint16_t desc);
+    uint16_t get_num_available() const;
+    hailo_status inc_num_available(uint16_t value);
+    hailo_status allocate_descriptor_list(uint32_t descs_count, uint16_t desc_page_size);
+
+    const vdma::ChannelId m_channel_id;
+    const Direction m_direction;
+    HailoRTDriver &m_driver;
+    VdmaChannelRegs m_host_registers;
+    std::shared_ptr<DescriptorList> m_desc_list; // Host side descriptor list
+    const std::string m_stream_name;
+    circbuf_t m_descs;
+    LatencyMeterPtr m_latency_meter;
+    bool m_is_channel_activated;
+    std::mutex m_channel_mutex;
+    CircularArray<OngoingTransfer> m_ongoing_transfers;
+
+    // Contains the last num_processed of the last interrupt (only used on latency measurement)
+    uint16_t m_last_timestamp_num_processed;
+
+    struct BoundedBuffer {
+        MappedBufferPtr buffer;
+
+        // The buffer is bounded starting from this descriptor.
+        uint16_t starting_desc;
+
+        // Offset inside the buffer (in desc_page_size granularity) of the "actual start" of the buffer.
+        // It implies that:
+        //      desc_list[starting_desc] will point to buffer[buffers_desc_offset * desc_page_size].
+        uint16_t buffer_offset_in_descs;
+    };
+
+    // We store the last bounded buffer as cache in order to avoid unnecessary descriptors list reprogramming.
+    // It is good enough to store only the last bounded buffer because we have two modes of execution:
+    //      1. User allocated buffers - On each transfer we bind new buffer. Even if the user always uses the same
+    //         buffers, due to the circular nature of descriptor list, reprogramming will almost always be needed (So
+    //         cacheing won't help).
+    //      2. Single circular buffer (internally) - In this case we don't need to bind each time (maybe after the
+    //         channel is re-activated). Caching the last bounded buffer is enough.
+    BoundedBuffer m_last_bounded_buffer;
 };
 
 } /* namespace vdma */
diff --git a/hailort/libhailort/src/vdma/channel/buffered_channel.cpp b/hailort/libhailort/src/vdma/channel/buffered_channel.cpp
deleted file mode 100644 (file)
index 55602d0..0000000
+++ /dev/null
@@ -1,557 +0,0 @@
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file buffered_channel.cpp
- * @brief Implementation of the BufferedChannel class
- **/
-
-#include "hailo/hailort_common.hpp"
-
-#include "common/logger_macros.hpp"
-
-#include "vdma/channel/buffered_channel.hpp"
-#include "hw_consts.hpp"
-
-#include <list>
-#include <chrono>
-#include <thread>
-#include <iostream>
-
-
-namespace hailort {
-namespace vdma {
-
-Expected<BufferedChannelPtr> BufferedChannel::create(vdma::ChannelId channel_id, Direction direction,
-    HailoRTDriver &driver, uint32_t descs_count, uint16_t desc_page_size, const std::string &stream_name,
-    LatencyMeterPtr latency_meter, uint16_t transfers_per_axi_intr)
-{
-    hailo_status status = HAILO_UNINITIALIZED;
-    auto channel_ptr = make_shared_nothrow<vdma::BufferedChannel>(channel_id, direction, driver, descs_count,
-        desc_page_size, stream_name, latency_meter, transfers_per_axi_intr, status);
-    CHECK_NOT_NULL_AS_EXPECTED(channel_ptr, HAILO_OUT_OF_HOST_MEMORY);
-    CHECK_SUCCESS_AS_EXPECTED(status, "Failed creating BufferedChannel");
-
-    return channel_ptr;
-}
-
-BufferedChannel::BufferedChannel(vdma::ChannelId channel_id, Direction direction, HailoRTDriver &driver,
-                                 uint32_t descs_count, uint16_t desc_page_size, const std::string &stream_name,
-                                 LatencyMeterPtr latency_meter, uint16_t transfers_per_axi_intr, hailo_status &status) :
-    BoundaryChannel(BoundaryChannel::Type::BUFFERED, channel_id, direction, driver, descs_count, desc_page_size,
-                    stream_name, latency_meter, transfers_per_axi_intr, status),
-    m_channel_buffer(nullptr),
-    m_pending_buffers_sizes(0),
-    m_pending_num_avail_offset(0)
-{
-    // Check that base constructor was successful
-    if (HAILO_SUCCESS != status) {
-        LOGGER__ERROR("Failed building boundary channel base class");
-        return;
-    }
-
-    auto mapped_buffer = MappedBuffer::create_shared(driver, direction, descs_count * desc_page_size);
-    if (!mapped_buffer) {
-        LOGGER__ERROR("Failed building mapped vdma buffer");
-        status = mapped_buffer.status();
-        return;
-    }
-    m_channel_buffer = mapped_buffer.release();
-
-    status = m_desc_list->configure_to_use_buffer(*m_channel_buffer, channel_id, 0);
-    if (HAILO_SUCCESS != status) {
-        LOGGER__ERROR("Failed binding vdma buffer to desc list");
-        return;
-    }
-
-    m_pending_buffers_sizes = CircularArray<size_t>(descs_count);
-
-    status = HAILO_SUCCESS;
-}
-
-hailo_status BufferedChannel::complete_channel_deactivation()
-{
-    const auto status = store_channel_buffer_state();
-    CHECK_SUCCESS(status);
-
-    if (Direction::H2D == m_direction) {
-        clear_pending_buffers_descriptors();
-        // For H2D channels we reset counters as we want to allow writes to the start of the buffer while the channel is stopped
-        m_state->reset_counters();
-    }
-
-    return HAILO_SUCCESS;
-}
-
-hailo_status BufferedChannel::store_channel_buffer_state()
-{
-    // TODO: If a D2H channel is deactivated before all of it's pending frames have recv'd ints
-    //       we'll store a tail value that won't be up to date when the channel is activated again.
-    //       Potentially, we might overwrite frames in that situation. Note that we can't flush() in the case
-    //       of D2H channels (as we can with H2D channels), because num_avail may be greater than the number of frames
-    //       that will be recv'd on a given channel. E.g., upon channel activation for the first time we call
-    //       prepare_d2h_pending_descriptors with the maximum number of descs possible for this channel, which will
-    //       accommodate X frames. If the usert only sends Y < X frames on the input channel, only Y output frames will
-    //       be recv'd (assuming one output frame per input frame). Hence, flush() won't return (we won't dequeue all
-    //       pending buffers). This needs to be handled by the sched that uses this feature. (HRT-9456)
-    auto tail = get_hw_num_processed();
-    CHECK_EXPECTED_AS_STATUS(tail);
-
-    const auto temp = m_state->m_previous_tail;
-    m_state->m_previous_tail = (tail.value() + m_state->m_previous_tail) & m_state->m_descs.size_mask;
-    m_state->m_desc_list_delta = temp - m_state->m_previous_tail;
-
-    return HAILO_SUCCESS;
-}
-
-Expected<size_t> BufferedChannel::get_h2d_pending_frames_count()
-{
-    return m_pending_buffers_sizes.size();
-}
-
-Expected<size_t> BufferedChannel::get_d2h_pending_descs_count()
-{
-    std::lock_guard<RecursiveSharedMutex> state_guard(m_state->mutex());
-
-    int num_proc = CB_TAIL(m_state->m_descs);
-    int desc_num_ready = CB_PROG(m_state->m_descs, num_proc, m_state->m_d2h_read_desc_index);
-
-    return desc_num_ready;
-}
-
-hailo_status BufferedChannel::prepare_d2h_pending_descriptors(uint32_t transfer_size, uint32_t transfers_count)
-{
-    // on D2H no need for interrupt of first descriptor
-    const auto first_desc_interrupts_domain = InterruptsDomain::NONE;
-    for (uint32_t i = 0; i < transfers_count; i++) {
-        // Provide FW interrupt only in the end of the last transfer in the batch
-        auto last_desc_interrutps_domain =
-            (static_cast<uint32_t>(m_transfers_per_axi_intr - 1) == (i % m_transfers_per_axi_intr)) ?
-                InterruptsDomain::BOTH : InterruptsDomain::HOST;
-        auto status = prepare_descriptors(transfer_size, first_desc_interrupts_domain, last_desc_interrutps_domain);
-        if (HAILO_STREAM_NOT_ACTIVATED == status) {
-            LOGGER__INFO("preparing descriptors failed because channel is not activated");
-            return status;
-        }
-        CHECK_SUCCESS(status, "Failed prepare desc status={}", status);
-    }
-
-    // We assume each output transfer is in the same size
-    m_state->m_accumulated_transfers += ((m_state->m_accumulated_transfers + transfers_count) % m_transfers_per_axi_intr);
-
-    return HAILO_SUCCESS;
-}
-
-hailo_status BufferedChannel::complete_channel_activation(uint32_t transfer_size, bool resume_pending_transfers)
-{
-    auto status = HAILO_UNINITIALIZED;
-
-    // We should have no active transfers now
-    if (resume_pending_transfers) {
-        // We want the first descriptor (at index zero) to point to where the descriptor at index
-        // m_state->m_previous_tail currently points to:
-        // * In the case of a D2H channel, m_state->m_previous_tail is the index of the desc where the hw would next
-        //   write to (num_proc). Hence, the hw will now write exactly where it left off. Previously unread frames from
-        //   the user (pointed to by m_state->m_d2h_read_desc_index) can still be read (the hw won't overwrite them).
-        // * In the case of a H2D channel, m_state->m_previous_tail is the index of the desc where the hw would next
-        //   read from (num_proc). Hence, the hw will now read exactly where it left off. Previously written frames
-        //   from the user (that appear before m_state->m_previous_tail), will not be re-written.
-        const uint32_t starting_desc_offset = (m_desc_list->count() - m_state->m_previous_tail) % m_desc_list->count();
-        status = m_desc_list->configure_to_use_buffer(*m_channel_buffer, m_channel_id,
-            starting_desc_offset);
-        CHECK_SUCCESS(status);
-
-        if (Direction::D2H == m_direction) {
-            // m_d2h_read_desc_index, which is relative to the first desc, needs to shift by m_desc_list_delta
-            m_state->m_d2h_read_desc_index = (m_state->m_d2h_read_desc_index + m_state->m_desc_list_delta) & m_state->m_descs.size_mask;
-        }
-    } else {
-        // We're not resuming pending transfers - clear relevant pointers.
-        m_state->reset_previous_state_counters();
-    }
-
-    if ((Direction::D2H == m_direction) && (transfer_size != 0)) {
-        const auto max_transfers_in_buffer = get_transfers_count_in_buffer(transfer_size);
-        const auto transfers_in_buffer = std::min(max_transfers_in_buffer, m_state->m_pending_buffers.capacity());
-        const auto pending_descs = get_d2h_pending_descs_count();
-        const auto descs_in_transfer = m_desc_list->descriptors_in_buffer(transfer_size);
-        const auto pending_transfers = pending_descs.value() / descs_in_transfer;
-        // We prepare descs in advance for D2H channels:
-        // (1) The channel's buffer can store up to 'max_transfers_in_buffer' frames of size transfer_size
-        // (2) However, we can allow at most 'm_state->m_pending_buffers.capacity()' transfers. We can't store more than
-        //     that in the pending buffers circular array.
-        // (3) There are 'pending_transfers' frames from the previous channel activation (we assume that the same
-        //     'transfer_size' was used)
-        // (4) Hence, we have room for 'min(transfers_in_buffer, pending_buffers.capacity()) - pending_transfers' frames in the buffer currently.
-        const auto transfers_count = transfers_in_buffer - pending_transfers;
-        status = prepare_d2h_pending_descriptors(transfer_size, static_cast<uint32_t>(transfers_count));
-        CHECK_SUCCESS(status);
-    }
-
-    return HAILO_SUCCESS;
-}
-
-hailo_status BufferedChannel::transfer_sync(void *buf, size_t count, std::chrono::milliseconds timeout)
-{
-    CHECK_NOT_NULL(buf, HAILO_INVALID_ARGUMENT);
-    CHECK(0 != count, HAILO_INVALID_ARGUMENT);
-
-    auto status = wait(count, timeout);
-    if ((HAILO_STREAM_NOT_ACTIVATED == status) || (HAILO_STREAM_ABORTED_BY_USER == status)) {
-        LOGGER__INFO("wait failed because channel {} is not activated/aborted (status {})", m_channel_id, status);
-        return status;
-    }
-    CHECK_SUCCESS(status, "wait failed with status {} (channel id: {}, timeout: {}ms)", status, m_channel_id, timeout.count());
-
-    std::unique_lock<RecursiveSharedMutex> state_guard(m_state->mutex());
-    if (Direction::H2D == m_direction) {
-        status = transfer_h2d(buf, count);
-    } else {
-        status = transfer_d2h(buf, count);
-    }
-
-    if ((HAILO_STREAM_NOT_ACTIVATED == status) || (HAILO_STREAM_ABORTED_BY_USER == status)) {
-        LOGGER__INFO("transfer failed because channel {} is not activated/aborted (status {})", m_channel_id, status);
-        return status;
-    }
-    CHECK_SUCCESS(status, "transfer failed with status {} (channel id: {}, timeout: {}ms)", status, m_channel_id, timeout.count());
-
-    return HAILO_SUCCESS;
-}
-
-hailo_status BufferedChannel::write_buffer_impl(const MemoryView &buffer)
-{
-    const uint32_t desired_desc_num = m_desc_list->descriptors_in_buffer(buffer.size());
-    const uint32_t desc_avail = (get_num_available() + m_pending_num_avail_offset) & m_state->m_descs.size_mask;
-    assert(desired_desc_num <= MAX_DESCS_COUNT);
-    assert(CB_AVAIL(m_state->m_descs, desc_avail, CB_TAIL(m_state->m_descs)) >= desired_desc_num);
-
-    const size_t buffer_write_offset = ((desc_avail + m_state->m_previous_tail) & m_state->m_descs.size_mask) * m_desc_list->desc_page_size();
-    const auto status = write_to_channel_buffer_cyclic(buffer, buffer_write_offset);
-    CHECK_SUCCESS(status);
-
-    m_pending_num_avail_offset = static_cast<uint16_t>(m_pending_num_avail_offset + desired_desc_num);
-
-    CHECK(!m_pending_buffers_sizes.full(), HAILO_INVALID_OPERATION, "Cannot add more pending buffers!");
-    m_pending_buffers_sizes.push_back(buffer.size());
-    return HAILO_SUCCESS;
-}
-
-hailo_status BufferedChannel::write_to_channel_buffer_cyclic(const MemoryView &buffer, size_t channel_buffer_write_offset)
-{
-    CHECK(buffer.size() <= m_channel_buffer->size(), HAILO_INSUFFICIENT_BUFFER,
-        "Can't write {} bytes to channel buffer (channel buffer size {})",
-        buffer.size(), m_channel_buffer->size());
-
-    static const auto SYNC_TO_DEIVCE = HailoRTDriver::DmaSyncDirection::TO_DEVICE;
-    const auto size_to_end = m_channel_buffer->size() - channel_buffer_write_offset;
-    const auto first_chunk_size = std::min(size_to_end, buffer.size());
-    const auto first_chunk_addr = static_cast<uint8_t *>(m_channel_buffer->user_address()) + channel_buffer_write_offset;
-
-    // Copy from buffer to m_channel_buffer and then synchronize
-    std::memcpy(first_chunk_addr, buffer.data(), first_chunk_size);
-    auto status = m_channel_buffer->synchronize(channel_buffer_write_offset, first_chunk_size, SYNC_TO_DEIVCE);
-    CHECK_SUCCESS(status);
-
-    const auto remaining_size = buffer.size() - first_chunk_size;
-    if (remaining_size > 0) {
-        // Copy the remainder from buffer to m_channel_buffer and then synchronize
-        std::memcpy(m_channel_buffer->user_address(), buffer.data() + first_chunk_size, remaining_size);
-        status = m_channel_buffer->synchronize(0, remaining_size, SYNC_TO_DEIVCE);
-        CHECK_SUCCESS(status);
-    }
-
-    return HAILO_SUCCESS;
-}
-
-hailo_status BufferedChannel::read_from_channel_buffer_cyclic(uint8_t *dest_buffer, size_t read_size, size_t channel_buffer_read_offset)
-{
-    CHECK(read_size <= m_channel_buffer->size(), HAILO_INSUFFICIENT_BUFFER,
-        "Can't read {} bytes from channel buffer (channel buffer size {})",
-        read_size, m_channel_buffer->size());
-
-    static const auto SYNC_TO_HOST = HailoRTDriver::DmaSyncDirection::TO_HOST;
-    const auto size_to_end = m_channel_buffer->size() - channel_buffer_read_offset;
-    const auto first_chunk_size = std::min(size_to_end, read_size);
-    const auto first_chunk_addr = static_cast<uint8_t *>(m_channel_buffer->user_address()) + channel_buffer_read_offset;
-
-    // Synchronize m_channel_buffer and copy to dest_buffer
-    auto status = m_channel_buffer->synchronize(channel_buffer_read_offset, first_chunk_size, SYNC_TO_HOST);
-    CHECK_SUCCESS(status);
-    std::memcpy(dest_buffer, first_chunk_addr, first_chunk_size);
-
-    const auto remaining_size = read_size - first_chunk_size;
-    if (remaining_size > 0) {
-        // Synchronize m_channel_buffer and copy remainder to dest_buffer
-        status = m_channel_buffer->synchronize(0, remaining_size, SYNC_TO_HOST);
-        CHECK_SUCCESS(status);
-        std::memcpy(dest_buffer + first_chunk_size, m_channel_buffer->user_address(), remaining_size);
-    }
-
-    return HAILO_SUCCESS;
-}
-
-Expected<BoundaryChannel::BufferState> BufferedChannel::get_buffer_state()
-{
-    BoundaryChannel::BufferState result;
-    result.num_avail = static_cast<uint16_t>(CB_HEAD(m_state->m_descs));
-    result.num_processed = static_cast<uint16_t>(CB_TAIL(m_state->m_descs));
-    auto hw_num_avail = m_host_registers.get_num_available();
-    CHECK_EXPECTED(hw_num_avail);
-    result.hw_num_avail = hw_num_avail.release();
-    auto hw_num_processed = get_hw_num_processed();
-    CHECK_EXPECTED(hw_num_processed);
-    result.hw_num_processed = hw_num_processed.release();
-
-    // Get a snapshot of the channel buffer
-    auto channel_buffer_copy = Buffer::create(m_channel_buffer->size());
-    CHECK_EXPECTED(channel_buffer_copy);
-    const auto status = read_from_channel_buffer_cyclic(channel_buffer_copy->data(), channel_buffer_copy->size(), 0);
-    CHECK_SUCCESS_AS_EXPECTED(status);
-
-    for (size_t offset = 0; offset < channel_buffer_copy->size(); offset += m_desc_list->desc_page_size()) {
-        auto chunk = Buffer::create(channel_buffer_copy->data() + offset, m_desc_list->desc_page_size());
-        CHECK_EXPECTED(chunk);
-        const auto abs_index = offset / m_desc_list->desc_page_size();
-        const auto desc_num = (abs_index >= static_cast<uint16_t>(m_state->m_previous_tail)) ?
-            abs_index - m_state->m_previous_tail :
-            m_state->m_descs.size - m_state->m_previous_tail + abs_index;
-        result.desc_buffer_pairing.emplace_back(static_cast<uint16_t>(desc_num), chunk.release());
-    }
-
-    return result;
-}
-
-hailo_status BufferedChannel::write_buffer(const MemoryView &buffer, std::chrono::milliseconds timeout,
-    const std::function<bool()> &should_cancel)
-{
-    std::unique_lock<RecursiveSharedMutex> state_guard(m_state->mutex());
-
-    // Checking in advance so as not to timeout
-    CHECK(buffer.size() <= m_channel_buffer->size(), HAILO_INSUFFICIENT_BUFFER,
-        "Can't write {} bytes to channel buffer (channel buffer size {})",
-        buffer.size(), m_channel_buffer->size());
-
-    size_t desired_desc_num = m_desc_list->descriptors_in_buffer(buffer.size());
-    hailo_status channel_completion_status = HAILO_SUCCESS;
-    bool was_successful = m_state->transfer_buffer_cv().wait_for(state_guard, timeout, [this, desired_desc_num,
-        &should_cancel, &channel_completion_status] () {
-        if (m_state->m_is_aborted) {
-            return true;
-        }
-
-        if (should_cancel()) {
-            channel_completion_status = HAILO_STREAM_ABORTED_BY_USER;
-            return true;
-        }
-        // Limit writes to not surpass size of m_pending_buffers
-        size_t written_buffers_count = m_pending_buffers_sizes.size();
-        size_t sent_buffers_count = m_state->m_pending_buffers.size();
-        if (written_buffers_count + sent_buffers_count >= m_state->m_pending_buffers.capacity()) {
-            return false;
-        }
-
-        return is_ready_for_write(static_cast<uint16_t>(desired_desc_num));
-    });
-    if (m_state->m_is_aborted || (HAILO_STREAM_ABORTED_BY_USER == channel_completion_status)) {
-        LOGGER__INFO("wait_for in write_buffer was aborted!");
-        return HAILO_STREAM_ABORTED_BY_USER;
-    }
-    CHECK(was_successful, HAILO_TIMEOUT, "Got HAILO_TIMEOUT while waiting for descriptors in write_buffer (channel_id={})",
-        m_channel_id);
-    CHECK_SUCCESS(channel_completion_status);
-
-    return write_buffer_impl(buffer);
-}
-
-hailo_status BufferedChannel::send_pending_buffer_impl()
-{
-    CHECK(!m_pending_buffers_sizes.empty(), HAILO_INVALID_OPERATION, "There are no pending buffers to send!");
-
-    // For h2d, only the host need to get transfer done interrupts
-    InterruptsDomain last_desc_interrupts_domain = InterruptsDomain::HOST;
-    // If we measure latency, we need interrupt on the first descriptor
-    InterruptsDomain first_desc_interrupts_domain = (m_latency_meter != nullptr) ?
-        InterruptsDomain::HOST : InterruptsDomain::NONE;
-
-    auto status = prepare_descriptors(m_pending_buffers_sizes.front(), first_desc_interrupts_domain, last_desc_interrupts_domain);
-    if (HAILO_STREAM_NOT_ACTIVATED == status) {
-        LOGGER__INFO("sending pending buffer failed because stream is not activated");
-        // Stream was aborted during transfer - reset pending buffers
-        m_pending_num_avail_offset = 0;
-        while (m_pending_buffers_sizes.size() > 0) {
-            m_pending_buffers_sizes.pop_front();
-        }
-        return status;
-    }
-    CHECK_SUCCESS(status);
-    m_state->m_accumulated_transfers = (m_state->m_accumulated_transfers + 1) % m_transfers_per_axi_intr;
-
-    size_t desired_desc_num = m_desc_list->descriptors_in_buffer(m_pending_buffers_sizes.front());
-    m_pending_num_avail_offset = static_cast<uint16_t>(m_pending_num_avail_offset - desired_desc_num);
-
-    m_pending_buffers_sizes.pop_front();
-
-    return HAILO_SUCCESS;
-}
-
-hailo_status BufferedChannel::send_pending_buffer()
-{
-    {
-        std::lock_guard<RecursiveSharedMutex> state_guard(m_state->mutex());
-
-        auto status = send_pending_buffer_impl();
-        if (HAILO_STREAM_NOT_ACTIVATED == status) {
-            LOGGER__INFO("stream is not activated");
-            return HAILO_STREAM_NOT_ACTIVATED;
-        } else {
-            CHECK_SUCCESS(status);
-        }
-    }
-    m_state->transfer_buffer_cv().notify_one();
-
-    return HAILO_SUCCESS;
-}
-
-hailo_status BufferedChannel::transfer_async(TransferRequest &&)
-{
-    return HAILO_NOT_IMPLEMENTED;
-}
-
-hailo_status BufferedChannel::cancel_pending_transfers()
-{
-    return HAILO_NOT_IMPLEMENTED;
-}
-
-hailo_status BufferedChannel::transfer_h2d(void *buf, size_t count)
-{
-    auto status = write_buffer_impl(MemoryView(buf, count));
-    CHECK_SUCCESS(status);
-
-    status = send_pending_buffer_impl();
-    if (HAILO_STREAM_NOT_ACTIVATED == status) {
-        return status;
-    } else {
-        CHECK_SUCCESS(status);
-    }
-
-    return HAILO_SUCCESS;
-}
-
-hailo_status BufferedChannel::transfer_d2h(void *buf, size_t count)
-{
-    hailo_status status = HAILO_UNINITIALIZED;
-    // Provide FW interrupt only in the end of the last transfer in the batch
-    InterruptsDomain first_desc_interrupts_domain = InterruptsDomain::NONE;
-    InterruptsDomain last_desc_interrupts_domain = (m_state->m_accumulated_transfers + 1 == m_transfers_per_axi_intr) ?
-        InterruptsDomain::BOTH : InterruptsDomain::HOST;
-
-    auto desired_desc_num = m_desc_list->descriptors_in_buffer(count);
-    assert(desired_desc_num <= MAX_DESCS_COUNT);
-    int desc_num = static_cast<int>(desired_desc_num);
-
-    int num_processed = CB_TAIL(m_state->m_descs);
-    int num_ready = CB_PROG(m_state->m_descs, num_processed, m_state->m_d2h_read_desc_index);
-    CHECK(num_ready >= desc_num, HAILO_OUT_OF_DESCRIPTORS,
-        "{} descriptors desired but only {} available", desc_num, num_ready);
-
-    const auto channel_buffer_read_offset = m_state->m_d2h_read_desc_index_abs * m_desc_list->desc_page_size();
-    status = read_from_channel_buffer_cyclic(static_cast<uint8_t *>(buf), count, channel_buffer_read_offset);
-    CHECK_SUCCESS(status);
-
-    m_state->m_d2h_read_desc_index = (m_state->m_d2h_read_desc_index + desc_num) & m_state->m_descs.size_mask;
-    m_state->m_d2h_read_desc_index_abs = (m_state->m_d2h_read_desc_index_abs + desc_num) & m_state->m_descs.size_mask;
-
-    // prepare descriptors for next recv
-    if (m_state->m_is_channel_activated) {
-        status = prepare_descriptors(count, first_desc_interrupts_domain, last_desc_interrupts_domain);
-        if (HAILO_STREAM_NOT_ACTIVATED == status) {
-            LOGGER__INFO("transfer d2h failed because stream is not activated");
-            return status;
-        }
-        CHECK_SUCCESS(status);
-    }
-
-    m_state->m_accumulated_transfers = (m_state->m_accumulated_transfers + 1) % m_transfers_per_axi_intr;
-
-    return HAILO_SUCCESS;
-}
-
-hailo_status BufferedChannel::prepare_descriptors(size_t transfer_size, InterruptsDomain first_desc_interrupts_domain,
-    InterruptsDomain last_desc_interrupts_domain)
-{
-    if (!m_state->m_is_channel_activated) {
-        return HAILO_STREAM_NOT_ACTIVATED;
-    }
-
-    // Calculate desired descriptors for the buffer
-    size_t desired_desc_num = m_desc_list->descriptors_in_buffer(transfer_size);
-    assert(desired_desc_num <= MAX_DESCS_COUNT);
-    uint16_t desc_num = static_cast<uint16_t>(desired_desc_num);
-
-    const auto num_available = get_num_available();
-    const auto num_processed = CB_TAIL(m_state->m_descs);
-    const auto num_free = CB_AVAIL(m_state->m_descs, num_available, num_processed);
-    if (num_free < desc_num) {
-        return HAILO_OUT_OF_DESCRIPTORS;
-    }
-
-    if (nullptr != m_latency_meter) {
-        // Program first descriptor
-        m_desc_list->program_single_descriptor((*m_desc_list)[num_available], m_desc_list->desc_page_size(),
-            first_desc_interrupts_domain);
-    }
-    auto actual_desc_count = m_desc_list->program_last_descriptor(transfer_size, last_desc_interrupts_domain,
-        num_available);
-    if (!actual_desc_count) {
-        LOGGER__ERROR("Failed to program desc_list for channel {}", m_channel_id);
-        return actual_desc_count.status();
-    }
-    assert(actual_desc_count.value() == desc_num);
-    assert(desc_num > 0);
-    const auto last_desc_avail = static_cast<uint16_t>((num_available + desc_num - 1) & m_state->m_descs.size_mask);
-
-    m_state->add_pending_buffer(num_available, last_desc_avail, m_direction);
-    return inc_num_available(desc_num);
-}
-
-bool BufferedChannel::is_ready_for_write(const uint16_t desired_desc_num)
-{
-    const auto has_space_in_buffers = !m_state->m_pending_buffers.full();
-    const uint32_t desc_avail = (get_num_available() + m_pending_num_avail_offset) & m_state->m_descs.size_mask;
-    const int num_free = CB_AVAIL(m_state->m_descs, desc_avail, CB_TAIL(m_state->m_descs));
-    const auto has_desc_space = (num_free >= desired_desc_num);
-
-    return (has_space_in_buffers && has_desc_space);
-}
-
-bool BufferedChannel::is_ready_for_transfer_d2h(size_t buffer_size)
-{
-    size_t desired_desc_num = m_desc_list->descriptors_in_buffer(buffer_size);
-    assert(desired_desc_num <= MAX_DESCS_COUNT);
-    int desc_num = static_cast<int>(desired_desc_num);
-
-    if (m_state->m_pending_buffers.full()) {
-        return false;
-    }
-
-    int num_processed = CB_TAIL(m_state->m_descs);
-    int num_ready = CB_PROG(m_state->m_descs, num_processed, m_state->m_d2h_read_desc_index);
-    if (num_ready < desc_num) {
-        return false;
-    }
-    return true;
-}
-
-void BufferedChannel::notify_all()
-{
-    {
-        // Acquire mutex to make sure the notify_all will wake the blocking threads on the cv
-        std::lock_guard<RecursiveSharedMutex> state_guard(m_state->mutex());
-    }
-    m_state->transfer_buffer_cv().notify_all();
-}
-
-} /* namespace vdma */
-} /* namespace hailort */
diff --git a/hailort/libhailort/src/vdma/channel/buffered_channel.hpp b/hailort/libhailort/src/vdma/channel/buffered_channel.hpp
deleted file mode 100644 (file)
index ac0d8c4..0000000
+++ /dev/null
@@ -1,91 +0,0 @@
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file buffered_channel.hpp
- * @brief BufferedChannel - Implements the BoundaryChannel interface, allowing for buffering of frames
- *                          by managing a vdma buffer
- **/
-
-#ifndef _HAILO_VDMA_BUFFERED_CHANNEL_HPP_
-#define _HAILO_VDMA_BUFFERED_CHANNEL_HPP_
-
-
-#include "hailo/hailort.h"
-#include "vdma/memory/mapped_buffer.hpp"
-#include "vdma/channel/boundary_channel.hpp"
-
-
-namespace hailort {
-namespace vdma {
-
-class BufferedChannel;
-using BufferedChannelPtr = std::shared_ptr<BufferedChannel>;
-
-class BufferedChannel : public BoundaryChannel
-{
-public:
-    static Expected<BufferedChannelPtr> create(vdma::ChannelId channel_id, Direction direction, HailoRTDriver &driver,
-        uint32_t descs_count, uint16_t desc_page_size, const std::string &stream_name = "", LatencyMeterPtr latency_meter = nullptr,
-        uint16_t transfers_per_axi_intr = 1);
-
-    BufferedChannel(vdma::ChannelId channel_id, Direction direction, HailoRTDriver &driver, uint32_t descs_count,
-        uint16_t desc_page_size, const std::string &stream_name, LatencyMeterPtr latency_meter, uint16_t transfers_per_axi_intr, hailo_status &status);
-    BufferedChannel(const BufferedChannel &other) = delete;
-    BufferedChannel &operator=(const BufferedChannel &other) = delete;
-    BufferedChannel(BufferedChannel &&other) = delete;
-    BufferedChannel &operator=(BufferedChannel &&other) = delete;
-    virtual ~BufferedChannel() = default;
-
-    // Writes/reads from the channel buffer. This function can work even if the channel is not activated (for example -
-    // reading data if it is ready).
-    virtual hailo_status transfer_sync(void *buf, size_t count, std::chrono::milliseconds timeout) override;
-    // Either write_buffer + send_pending_buffer or transfer (h2d) should be used on a given channel, not both
-    virtual hailo_status write_buffer(const MemoryView &buffer, std::chrono::milliseconds timeout,
-        const std::function<bool()> &should_cancel) override;
-    virtual hailo_status send_pending_buffer() override;
-    // TODO: merge with "transfer_sync(void *buf, size_t count)"? (HRT-10207)
-    virtual hailo_status transfer_async(TransferRequest &&) override;
-    virtual hailo_status cancel_pending_transfers() override;
-    virtual hailo_status complete_channel_activation(uint32_t transfer_size, bool resume_pending_transfers) override;
-    virtual hailo_status complete_channel_deactivation() override;
-
-    // Assumes that the channel is idle; doesn't block changes to the channel
-    // To be used for debugging purposes
-    virtual Expected<BoundaryChannel::BufferState> get_buffer_state() override;
-    virtual Expected<size_t> get_h2d_pending_frames_count() override;
-    virtual Expected<size_t> get_d2h_pending_descs_count() override;
-
-    virtual void notify_all() override;
-
-private:
-
-    hailo_status transfer_h2d(void *buf, size_t count);
-    hailo_status write_buffer_impl(const MemoryView &buffer);
-    hailo_status write_to_channel_buffer_cyclic(const MemoryView &buffer, size_t channel_buffer_write_offset);
-    hailo_status read_from_channel_buffer_cyclic(uint8_t *dest_buffer, size_t read_size, size_t channel_buffer_read_offset);
-    hailo_status send_pending_buffer_impl();
-    hailo_status transfer_d2h(void *buf, size_t count);
-    hailo_status prepare_descriptors(size_t transfer_size, InterruptsDomain first_desc_interrupts_domain,
-        InterruptsDomain last_desc_interrupts_domain);
-    hailo_status prepare_d2h_pending_descriptors(uint32_t transfer_size, uint32_t transfers_count);
-    bool is_ready_for_write(const uint16_t desired_desc_num);
-    virtual bool is_ready_for_transfer_d2h(size_t buffer_size) override;
-    hailo_status store_channel_buffer_state();
-
-    // TODO: m_channel_buffer gets bound to ChannelBase::m_desc_list meaning the desc in that list point to dma addrs
-    //       that back m_channel_buffer. Because ChannelBase gets dtor'd after BufferedChannel, m_channel_buffer ChannelBase::m_desc_list
-    //       will point to a freed buffer. This is ok because the channel objects only get dtor'd after they are deactivated by the fw.
-    //       Might want to enforce this in hailort as well (e.g. desc lists can hold shared_ptrs to MappedBuffer while they are bound).
-    //       (HRT-9110)
-    std::shared_ptr<MappedBuffer> m_channel_buffer;
-    // Using CircularArray because it won't allocate or free memory wile pushing and popping. The fact that it is circular is not relevant here
-    CircularArray<size_t> m_pending_buffers_sizes;
-    std::atomic_uint16_t m_pending_num_avail_offset;
-};
-
-} /* namespace vdma */
-} /* namespace hailort */
-
-#endif  // _HAILO_VDMA_BUFFERED_CHANNEL_HPP_
\ No newline at end of file
diff --git a/hailort/libhailort/src/vdma/channel/channel_base.cpp b/hailort/libhailort/src/vdma/channel/channel_base.cpp
deleted file mode 100644 (file)
index e872e73..0000000
+++ /dev/null
@@ -1,216 +0,0 @@
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file channel_base.cpp
- * @brief Base class of Boundary Channel - responsible for all the basic vdma channel functionality that interacts with the
- *  driver and the registers
- *      The hierarchy is as follows:
- *        --------------------------------------------------------------------------------------------------------------
- *        |                    ChannelBase            | (Base class - includes state and buffers)
- *        |                          |                              |
- *        |                  BoundaryChannel          | (handles Boundary channels)
- *        --------------------------------------------------------------------------------------------------------------
- **/
-#include "vdma/channel/channel_base.hpp"
-
-
-namespace hailort {
-namespace vdma {
-
-ChannelBase::ChannelBase(vdma::ChannelId channel_id, Direction direction, HailoRTDriver &driver, uint32_t descs_count,
-                         uint16_t desc_page_size, const std::string &stream_name, LatencyMeterPtr latency_meter, hailo_status &status) :
-    m_channel_id(channel_id),
-    m_direction(direction),
-    m_driver(driver),
-    m_host_registers(driver, channel_id, direction),
-    m_desc_list(nullptr),
-    m_stream_name(stream_name),
-    m_latency_meter(latency_meter)
-{
-    if (channel_id.channel_index >= VDMA_CHANNELS_PER_ENGINE) {
-        LOGGER__ERROR("Invalid DMA channel index {}", channel_id.channel_index);
-        status = HAILO_INVALID_ARGUMENT;
-        return;
-    }
-
-    if (channel_id.engine_index >= driver.dma_engines_count()) {
-        LOGGER__ERROR("Invalid DMA engine index {}, max {}", channel_id.engine_index, driver.dma_engines_count());
-        status = HAILO_INVALID_ARGUMENT;
-        return;
-    }
-
-    auto state = VdmaChannelState::create(descs_count, (nullptr != m_latency_meter));
-    if(!state) {
-        LOGGER__ERROR("Failed to create channel's state");
-        status = state.status();
-        return;
-    }
-    m_state = state.release();
-
-    status = allocate_descriptor_list(descs_count, desc_page_size);
-    if (HAILO_SUCCESS != status) {
-        LOGGER__ERROR("Failed to allocate Vdma buffer for channel transfer! status={}", status);
-        return;
-    }
-
-    status = HAILO_SUCCESS;
-}
-
-hailo_status ChannelBase::set_num_avail_value(uint16_t new_value)
-{
-    // TODO - HRT-7885 : add check in driver
-    CHECK(m_state->m_is_channel_activated, HAILO_STREAM_NOT_ACTIVATED,
-        "Error, can't set num available when stream is not activated");
-
-    auto status = m_host_registers.set_num_available(new_value);
-    CHECK_SUCCESS(status, "Fail to write vdma num available register");
-
-#ifndef NDEBUG
-    // Validate synchronization with HW
-    auto hw_num_avail = m_host_registers.get_num_available();
-    assert(hw_num_avail);
-    assert(hw_num_avail.value() == new_value);
-#endif
-    return HAILO_SUCCESS;
-}
-
-hailo_status ChannelBase::inc_num_available(uint16_t value)
-{
-    //TODO: validate that count is added.
-    int num_available = get_num_available();
-    int num_processed = CB_TAIL(m_state->m_descs);
-    int num_free = CB_AVAIL(m_state->m_descs, num_available, num_processed);
-    if (value > num_free) {
-        return HAILO_OUT_OF_DESCRIPTORS;
-    }
-
-    CB_ENQUEUE(m_state->m_descs, value);
-    num_available = (num_available + value) & m_state->m_descs.size_mask;
-    return set_num_avail_value(static_cast<uint16_t>(num_available));
-}
-
-bool ChannelBase::is_desc_between(uint16_t begin, uint16_t end, uint16_t desc)
-{
-    if (begin == end) {
-        // There is nothing between
-        return false;
-    }
-    if (begin < end) {
-        // desc needs to be in [begin, end)
-        return (begin <= desc) && (desc < end);
-    }
-    else {
-        // desc needs to be in [0, end) or [begin, m_state->m_descs.size()-1]
-        return (desc < end) || (begin <= desc);
-    }
-}
-
-uint16_t ChannelBase::get_num_available()
-{
-    uint16_t num_available = (uint16_t)CB_HEAD(m_state->m_descs);
-
-#ifndef NDEBUG
-    // Validate synchronization with HW
-    auto hw_num_avail = m_host_registers.get_num_available();
-    assert(hw_num_avail);
-
-    // On case of channel aborted, the num_available is set to 0 (so we don't accept sync)
-    auto is_aborted_exp = m_host_registers.is_aborted();
-    assert(is_aborted_exp);
-
-    if (m_state->m_is_channel_activated && !is_aborted_exp.value()) {
-        assert(hw_num_avail.value() == num_available);
-    }
-#endif
-    return num_available;
-}
-
-void ChannelBase::set_num_proc_value(uint16_t new_value)
-{
-    assert(new_value < m_state->m_descs.size);
-    _CB_SET(m_state->m_descs.tail, new_value);
-}
-
-Expected<uint16_t> ChannelBase::get_hw_num_processed()
-{
-    auto hw_num_processed = m_host_registers.get_num_processed();
-    CHECK_EXPECTED(hw_num_processed, "Fail to read vdma num processed register");
-
-    // Although the hw_num_processed should be a number between 0 and m_descs.size-1, if
-    // m_desc.size < 0x10000 (the maximum desc size), the actual hw_num_processed is a number
-    // between 1 and m_descs.size. Therefore the value can be m_descs.size, in this case we change it
-    // to zero.
-    return static_cast<uint16_t>(hw_num_processed.value() & m_state->m_descs.size_mask);
-}
-
-ChannelBase::Direction ChannelBase::other_direction(Direction direction)
-{
-    return (Direction::H2D == direction) ? Direction::D2H : Direction::H2D;
-}
-
-hailo_status ChannelBase::allocate_descriptor_list(uint32_t descs_count, uint16_t desc_page_size)
-{
-    static const bool CIRCULAR = true;
-    auto desc_list_exp = DescriptorList::create(descs_count, desc_page_size, CIRCULAR, m_driver);
-    CHECK_EXPECTED_AS_STATUS(desc_list_exp);
-
-    m_desc_list = make_shared_nothrow<DescriptorList>(desc_list_exp.release());
-    CHECK_NOT_NULL(m_desc_list, HAILO_OUT_OF_HOST_MEMORY);
-
-    return HAILO_SUCCESS;
-}
-
-size_t ChannelBase::get_transfers_count_in_buffer(size_t transfer_size)
-{
-    const auto descs_in_transfer = m_desc_list->descriptors_in_buffer(transfer_size);
-    const auto descs_count = CB_SIZE(m_state->m_descs);
-    return (descs_count - 1) / descs_in_transfer;
-}
-
-Expected<uint16_t> ChannelBase::update_latency_meter()
-{
-    uint16_t last_num_processed = m_state->m_last_timestamp_num_processed;
-
-    auto timestamp_list = m_driver.vdma_interrupts_read_timestamps(m_channel_id);
-    CHECK_EXPECTED(timestamp_list);
-
-    if (0 == timestamp_list->count) {
-        // No new timestamps for this channel, return the previous result
-        return Expected<uint16_t>(last_num_processed);
-    }
-
-    // TODO: now we have more iterations than we need. We know that the pending buffers + the timestamp list
-    // are ordered. If pending_buffer[i] is not in any of the timestamps_list[0, 1, ... k], then also pending_buffer[i+1,i+2,...]
-    // not in those timestamps
-
-    for (const auto &pending_buffer : m_state->m_pending_buffers) {
-        uint16_t latency_desc = static_cast<uint16_t>(pending_buffer.latency_measure_desc);
-        for (size_t i = 0; i < timestamp_list->count; i++) {
-            const auto &irq_timestamp = timestamp_list->timestamp_list[i];
-            const auto desc_num_processed = static_cast<uint16_t>(irq_timestamp.desc_num_processed & m_state->m_descs.size_mask);
-            if (is_desc_between(last_num_processed, desc_num_processed, latency_desc)) {
-                if (m_direction == Direction::H2D) {
-                    m_latency_meter->add_start_sample(irq_timestamp.timestamp);
-                }
-                else {
-                    m_latency_meter->add_end_sample(m_stream_name, irq_timestamp.timestamp);
-                }
-                break;
-            }
-        }
-    }
-
-    m_state->m_last_timestamp_num_processed = static_cast<uint16_t>(
-        timestamp_list->timestamp_list[timestamp_list->count-1].desc_num_processed & m_state->m_descs.size_mask);
-    return Expected<uint16_t>(m_state->m_last_timestamp_num_processed);
-}
-
-uint32_t ChannelBase::calculate_descriptors_count(uint32_t buffer_size) const
-{
-    return DescriptorList::calculate_descriptors_count(buffer_size, 1, m_desc_list->desc_page_size());
-}
-
-} /* namespace vdma */
-} /* namespace hailort */
diff --git a/hailort/libhailort/src/vdma/channel/channel_base.hpp b/hailort/libhailort/src/vdma/channel/channel_base.hpp
deleted file mode 100644 (file)
index 8ae5342..0000000
+++ /dev/null
@@ -1,104 +0,0 @@
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file channel_base.hpp
- * @brief Base class of Boundary Channel - responsible for all the basic vdma channel functionality that interacts with the
- *  driver and the registers
- *      The hierarchy is as follows:
- *        --------------------------------------------------------------------------------------------------------------
- *        |                    ChannelBase            | (Base class - includes state and buffers)
- *        |                          |                              |
- *        |                  BoundaryChannel          | (handles Boundary channels)
- *        --------------------------------------------------------------------------------------------------------------
- **/
-
-#ifndef _HAILO_VDMA_CHANNEL_BASE_HPP_
-#define _HAILO_VDMA_CHANNEL_BASE_HPP_
-
-#include "hailo/hailort.h"
-#include "hailo/expected.hpp"
-#include "hailo/buffer.hpp"
-
-#include "common/latency_meter.hpp"
-
-#include "vdma/channel/vdma_channel_regs.hpp"
-#include "vdma/memory/sg_buffer.hpp"
-#include "vdma/memory/descriptor_list.hpp"
-#include "vdma/channel/channel_id.hpp"
-#include "vdma/channel/channel_state.hpp"
-
-#include <mutex>
-#include <condition_variable>
-
-
-namespace hailort {
-namespace vdma {
-
-class ChannelBase
-{
-public:
-    using Direction = HailoRTDriver::DmaDirection;
-
-    ChannelBase(vdma::ChannelId channel_id, Direction direction, HailoRTDriver &driver, uint32_t descs_count,
-        uint16_t desc_page_size, const std::string &stream_name, LatencyMeterPtr latency_meter, hailo_status &status);
-    ChannelBase(const ChannelBase &other) = delete;
-    ChannelBase &operator=(const ChannelBase &other) = delete;
-    ChannelBase(ChannelBase &&other) = delete;
-    ChannelBase &operator=(ChannelBase &&other) = delete;
-    virtual ~ChannelBase() = default;
-
-    vdma::ChannelId get_channel_id() const
-    {
-        return m_channel_id;
-    }
-
-    uint16_t get_page_size()
-    {
-        return m_desc_list->desc_page_size();
-    }
-
-    const std::string &stream_name() const
-    {
-        return m_stream_name;
-    }
-
-    size_t get_transfers_count_in_buffer(size_t transfer_size);
-    size_t get_buffer_size() const;
-    uint32_t calculate_descriptors_count(uint32_t buffer_size) const;
-
-    std::shared_ptr<DescriptorList> get_desc_list()
-    {
-        return m_desc_list;
-    }
-
-protected:
-    const vdma::ChannelId m_channel_id;
-    const Direction m_direction;
-    HailoRTDriver &m_driver;
-    VdmaChannelRegs m_host_registers;
-    std::shared_ptr<DescriptorList> m_desc_list; // Host side descriptor list
-    const std::string m_stream_name;
-    std::unique_ptr<VdmaChannelState> m_state;
-    LatencyMeterPtr m_latency_meter;
-
-    static bool is_desc_between(uint16_t begin, uint16_t end, uint16_t desc);
-    // Returns the desc index of the last desc whose timestamp was measured in the driver
-    Expected<uint16_t> update_latency_meter();
-    Expected<bool> is_aborted();
-    hailo_status set_num_avail_value(uint16_t new_value);
-    uint16_t get_num_available();
-    void set_num_proc_value(uint16_t new_value);
-    Expected<uint16_t> get_hw_num_processed();
-    hailo_status inc_num_available(uint16_t value);
-    static Direction other_direction(const Direction direction);
-
-private:
-    hailo_status allocate_descriptor_list(uint32_t descs_count, uint16_t desc_page_size);
-};
-
-} /* namespace vdma */
-} /* namespace hailort */
-
-#endif /* _HAILO_VDMA_CHANNEL_BASE_HPP_ */
\ No newline at end of file
diff --git a/hailort/libhailort/src/vdma/channel/channel_state.cpp b/hailort/libhailort/src/vdma/channel/channel_state.cpp
deleted file mode 100644 (file)
index 2afebb2..0000000
+++ /dev/null
@@ -1,240 +0,0 @@
-#include "vdma/channel/channel_state.hpp"
-
-
-namespace hailort {
-namespace vdma {
-
-#ifndef _MSC_VER
-RecursiveSharedMutex::RecursiveSharedMutex()
-{
-    // Make sharable mutex
-    pthread_mutexattr_t mutex_attrs{};
-    int err = pthread_mutexattr_init(&mutex_attrs);
-    if (0 != err) {
-        LOGGER__CRITICAL("Failed init mutex attr, aborting");
-        std::abort();
-    }
-
-    err = pthread_mutexattr_setpshared(&mutex_attrs, PTHREAD_PROCESS_SHARED);
-    if (0 != err) {
-        LOGGER__CRITICAL("pthread_mutexattr_setpshared failed");
-        std::abort();
-    }
-
-    err = pthread_mutexattr_settype(&mutex_attrs, PTHREAD_MUTEX_RECURSIVE);
-    if (0 != err) {
-        LOGGER__CRITICAL("pthread_mutexattr_settype failed");
-        std::abort();
-    }
-
-    err = pthread_mutex_init(&m_mutex, &mutex_attrs);
-    if (0 != pthread_mutexattr_destroy(&mutex_attrs)) {
-        LOGGER__CRITICAL("Failed destroy mutexattr");
-        // continue
-    }
-    if (0 != err) {
-        LOGGER__CRITICAL("Failed init mutex, aborting");
-        std::abort();
-    }
-}
-
-RecursiveSharedMutex::~RecursiveSharedMutex()
-{
-    int err = pthread_mutex_destroy(&m_mutex);
-    if (0 != err) {
-        LOGGER__ERROR("Failed destroy shared mutex, errno {}", err);
-    }
-}
-
-void RecursiveSharedMutex::lock()
-{
-    int err = pthread_mutex_lock(&m_mutex);
-    if (0 != err) {
-        LOGGER__ERROR("Failed lock shared mutex, errno {}", err);
-        std::abort();
-    }
-}
-
-void RecursiveSharedMutex::unlock()
-{
-    int err = pthread_mutex_unlock(&m_mutex);
-    if (0 != err) {
-        LOGGER__ERROR("Failed unlock shared mutex, errno {}", err);
-        std::abort();
-    }
-}
-
-SharedConditionVariable::SharedConditionVariable()
-{
-    // Make sharable condvar
-    pthread_condattr_t cond_attrs{};
-    int err = pthread_condattr_init(&cond_attrs);
-    if (0 != err) {
-        LOGGER__CRITICAL("Failed init condition variable attr, aborting");
-        std::abort();
-    }
-
-    err = pthread_condattr_setpshared(&cond_attrs, PTHREAD_PROCESS_SHARED);
-    if (0 != err) {
-        LOGGER__CRITICAL("pthread_condattr_setpshared failed");
-        std::abort();
-    }
-
-    err = pthread_condattr_setclock(&cond_attrs, CLOCK_MONOTONIC);
-    if (0 != err) {
-        LOGGER__CRITICAL("pthread_condattr_setclock failed");
-        std::abort();
-    }
-
-    err = pthread_cond_init(&m_cond, &cond_attrs);
-    if (0 != pthread_condattr_destroy(&cond_attrs)) {
-        LOGGER__CRITICAL("Failed destroy condattr");
-        // continue
-    }
-    if (0 != err) {
-        LOGGER__CRITICAL("Failed init mutex, aborting");
-        std::abort();
-    }
-}
-
-SharedConditionVariable::~SharedConditionVariable()
-{
-    int err = pthread_cond_destroy(&m_cond);
-    if (0 != err) {
-        LOGGER__ERROR("Failed destory vdma channel condition varialbe, errno {}", err);
-    }
-}
-
-// Get the absolute time for the given timeout - calculate now() + timeout_ns
-// using system CLOCK_MONOTONIC (Used for pthread condition variable wait)
-static struct timespec get_absolute_time(std::chrono::nanoseconds timeout_ns)
-{
-    // Using chrono with timespec types to avoid casts
-    using ts_seconds = std::chrono::duration<decltype(timespec::tv_sec)>;
-    using ts_nanoseconds = std::chrono::duration<decltype(timespec::tv_nsec), std::nano>;
-
-    struct timespec current_ts{};
-    clock_gettime(CLOCK_MONOTONIC, &current_ts);
-
-    assert((current_ts.tv_sec + std::chrono::duration_cast<ts_seconds>(timeout_ns).count()) <
-        std::numeric_limits<decltype(timespec::tv_sec)>::max());
-    auto absolute_sec = ts_seconds(current_ts.tv_sec) + std::chrono::duration_cast<ts_seconds>(timeout_ns);
-    assert(current_ts.tv_nsec <= std::nano::den);
-    auto absolute_nsec = ts_nanoseconds(current_ts.tv_nsec) +
-        std::chrono::duration_cast<ts_nanoseconds>(timeout_ns % std::chrono::seconds(1));
-
-    // Nanos overflow
-    if (absolute_nsec.count() >= std::nano::den) {
-        absolute_sec += ts_seconds(1);
-        absolute_nsec = absolute_nsec % ts_seconds(1);
-    }
-
-    return timespec {
-        .tv_sec = absolute_sec.count(),
-        .tv_nsec = absolute_nsec.count()
-    };
-}
-
-bool SharedConditionVariable::wait_for(std::unique_lock<RecursiveSharedMutex> &lock, std::chrono::milliseconds timeout, std::function<bool()> condition)
-{
-    if (UINT32_MAX == timeout.count()) {
-        // Infinity wait
-        int err = 0;
-        while (!condition() && err == 0) {
-            err = pthread_cond_wait(&m_cond, lock.mutex()->native_handle());
-        }
-        if (err != 0) {
-            LOGGER__CRITICAL("Error waiting for shared condition variable: {}", err);
-            std::abort();
-        }
-        return true;
-    }
-    else if (0 == timeout.count()) {
-        // Special case for 0 timeout - we don't want to mess with absolute time
-        return condition();
-    } else {
-        // Timed wait
-        auto ts = get_absolute_time(timeout);
-
-        int err = 0;
-        while (!condition() && err == 0) {
-            err = pthread_cond_timedwait(&m_cond, lock.mutex()->native_handle(), &ts);
-        }
-        if ((err != 0) && (err != ETIMEDOUT)) {
-            LOGGER__CRITICAL("Error waiting for shared condition variable: {}", err);
-            std::abort();
-        }
-        return err == 0;
-    }
-}
-
-void SharedConditionVariable::notify_one()
-{
-    pthread_cond_signal(&m_cond);
-}
-
-void SharedConditionVariable::notify_all()
-{
-    pthread_cond_broadcast(&m_cond);
-}
-
-#endif /* _MSC_VER */
-
-Expected<std::unique_ptr<VdmaChannelState>> VdmaChannelState::create(uint32_t descs_count, bool measure_latency)
-{
-    // Note: we implement operator new so the state object will be shared with forked processes.
-    auto state = make_unique_nothrow<VdmaChannelState>(descs_count, measure_latency);
-    CHECK_NOT_NULL_AS_EXPECTED(state, HAILO_OUT_OF_HOST_MEMORY);
-    return state;
-}
-
-VdmaChannelState::VdmaChannelState(uint32_t descs_count, bool measure_latency) :
-    m_is_channel_activated(false),
-    // If we measuring latency, we may get 2 interrupts for each input channel (first descriptor and last descriptor).
-    // Hence we must limit the transfers count to half of the actual transfers count.
-    m_pending_buffers(measure_latency ? PENDING_BUFFERS_SIZE/2 : PENDING_BUFFERS_SIZE),
-    m_d2h_read_desc_index(0),
-    m_d2h_read_desc_index_abs(0),
-    m_is_aborted(false),
-    m_previous_tail(0),
-    m_desc_list_delta(0),
-    m_last_timestamp_num_processed(0),
-    m_accumulated_transfers(0)
-{
-    CB_INIT(m_descs, descs_count);
-}
-
-void VdmaChannelState::reset_counters()
-{
-    CB_RESET(m_descs);
-    m_pending_buffers.reset();
-    m_last_timestamp_num_processed = 0;
-    m_accumulated_transfers = 0;
-}
-
-void VdmaChannelState::reset_previous_state_counters()
-{
-    m_previous_tail = 0;
-    m_desc_list_delta = 0;
-    m_d2h_read_desc_index = 0;
-    m_d2h_read_desc_index_abs = 0;
-}
-
-void VdmaChannelState::add_pending_buffer(uint16_t first_desc, uint16_t last_desc, HailoRTDriver::DmaDirection direction,
-    const InternalTransferDoneCallback &on_transfer_done, MappedBufferPtr mapped_buffer)
-{
-    if (m_pending_buffers.full()) {
-        // TODO- HRT-8900 : Fix log and check if should return error
-        LOGGER__ERROR("no avail space");
-    }
-
-    PendingBuffer pending_buffer{};
-    pending_buffer.last_desc = last_desc;
-    pending_buffer.latency_measure_desc = (direction == HailoRTDriver::DmaDirection::H2D) ? first_desc : last_desc;
-    pending_buffer.on_transfer_done = on_transfer_done;
-    pending_buffer.mapped_buffer = mapped_buffer;
-    m_pending_buffers.push_back(std::move(pending_buffer));
-}
-
-} /* namespace vdma */
-} /* namespace hailort */
diff --git a/hailort/libhailort/src/vdma/channel/channel_state.hpp b/hailort/libhailort/src/vdma/channel/channel_state.hpp
deleted file mode 100644 (file)
index 5bc964e..0000000
+++ /dev/null
@@ -1,185 +0,0 @@
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file channel_state.hpp
- * @brief Current state of Vdma Channel
- *
- * <doc>
- **/
-
-#ifndef _HAILO_VDMA_CHANNEL_STATE_HPP_
-#define _HAILO_VDMA_CHANNEL_STATE_HPP_
-
-#include "hailo/hailort.h"
-#include "os/hailort_driver.hpp"
-#include "common/circular_buffer.hpp"
-#include "vdma/memory/mapped_buffer.hpp"
-#include "stream_common/async_common.hpp"
-
-#include <array>
-#include <condition_variable>
-
-#ifndef _MSC_VER
-#include <sys/mman.h>
-#endif
-
-
-namespace hailort {
-namespace vdma {
-
-struct PendingBuffer {
-    uint16_t last_desc;
-    uint16_t latency_measure_desc;
-    InternalTransferDoneCallback on_transfer_done;
-    MappedBufferPtr mapped_buffer;
-};
-
-// We use std::array for PendingBuffersQueue to avoid dynamic allocations allocations. We are doing it for two reasons:
-//  1. It relies on memory shared between process (so we can't have dynamic allocation).
-//  2. We put it on interrupt handler stack - we want to avoid allocations.
-using PendingBuffersQueue = CircularArray<PendingBuffer, std::array<PendingBuffer, PENDING_BUFFERS_SIZE>>;
-
-class ChannelBase;
-class BoundaryChannel;
-class AsyncChannel;
-class BufferedChannel;
-
-
-#ifndef _MSC_VER
-// Special mutex and condition variable objects that can be shared between forked processes (Not needed on windows, 
-// because there is no fork).
-class RecursiveSharedMutex final {
-public:
-    RecursiveSharedMutex();
-    ~RecursiveSharedMutex();
-
-    RecursiveSharedMutex(const RecursiveSharedMutex &) = delete;
-    RecursiveSharedMutex &operator=(const RecursiveSharedMutex &) = delete;
-    RecursiveSharedMutex(RecursiveSharedMutex &&) = delete;
-    RecursiveSharedMutex &operator=(RecursiveSharedMutex &&) = delete;
-
-    void lock();
-    void unlock();
-
-    pthread_mutex_t *native_handle()
-    {
-        return &m_mutex;
-    }
-
-private:
-    pthread_mutex_t m_mutex;
-};
-
-class SharedConditionVariable final {
-public:
-
-    SharedConditionVariable();
-    ~SharedConditionVariable();
-
-    SharedConditionVariable(const SharedConditionVariable &) = delete;
-    SharedConditionVariable &operator=(const SharedConditionVariable &) = delete;
-    SharedConditionVariable(SharedConditionVariable &&) = delete;
-    SharedConditionVariable &operator=(SharedConditionVariable &&) = delete;
-
-    bool wait_for(std::unique_lock<RecursiveSharedMutex> &lock, std::chrono::milliseconds timeout, std::function<bool()> condition);
-    void notify_one();
-    void notify_all();
-
-private:
-    pthread_cond_t m_cond;
-};
-#else /* _MSC_VER */
-using RecursiveSharedMutex = std::recursive_mutex;
-using SharedConditionVariable = std::condition_variable_any;
-#endif
-
-
-class VdmaChannelState final
-{
-public:
-    static Expected<std::unique_ptr<VdmaChannelState>> create(uint32_t descs_count, bool measure_latency);
-
-    VdmaChannelState(uint32_t descs_count, bool measure_latency);
-    VdmaChannelState(const VdmaChannelState &other) = delete;
-    VdmaChannelState(VdmaChannelState &&other) = delete;
-    ~VdmaChannelState() = default;
-
-    static void empty_transfer_done_callback(hailo_status){}
-
-    void reset_counters();
-    void reset_previous_state_counters();
-    // Each transfer on the channel is logged by a PendingBuffer:
-    // - first_desc/last_desc - first and last descriptors of the transfer
-    // - direction - transfer's direction
-    // - on_transfer_done - callback to be called once the transfer is complete (i.e. when an interrupt is received on last_desc)
-    // - context - transfer context
-    // - mapped_buffer - buffer's dma mapping (may be null)
-    void add_pending_buffer(uint16_t first_desc, uint16_t last_desc, HailoRTDriver::DmaDirection direction,
-        const InternalTransferDoneCallback &on_transfer_done = empty_transfer_done_callback,
-        MappedBufferPtr mapped_buffer = nullptr);
-
-    RecursiveSharedMutex &mutex()
-    {
-        return m_state_lock;
-    }
-
-    SharedConditionVariable &transfer_buffer_cv()
-    {
-        return m_can_transfer_buffer_cv;
-    }
-
-#ifndef _MSC_VER
-    // The VdmaChannelState must remain in a shared memory scope, so we implement the new/delete operators (only on
-    // non-windows machines).
-    void* operator new(std::size_t size) = delete;
-    void* operator new(std::size_t size, const std::nothrow_t&) throw() {
-        // Map a shared memory region into the virtual memory of the process
-        void* ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
-        if (ptr == MAP_FAILED) {
-            return nullptr;
-        }
-        return ptr;
-    }
-
-    // Custom operator delete function that unmaps the shared memory region
-    void operator delete(void* ptr, std::size_t size) {
-        munmap(ptr, size);
-    }
-#endif /* _MSC_VER */
-
-    friend class ChannelBase;
-    friend class BoundaryChannel;
-    friend class AsyncChannel;
-    friend class BufferedChannel;
-
-private:
-    RecursiveSharedMutex m_state_lock;
-    SharedConditionVariable m_can_transfer_buffer_cv;
-
-    bool m_is_channel_activated;
-
-    PendingBuffersQueue m_pending_buffers;
-    // TODO: describe why we must have our own num_available and num_proc.
-    // it's not just for efficiency but its critical to avoid a potential bug - see Avigail email.
-    // TODO: Consider C11 stdatomic
-    circbuf_t m_descs;
-    // m_d2h_read_desc_index and m_d2h_read_desc_index_abs are the index of the first desc containing frames to be
-    // copied to the user ("ready" frames in a D2H buffered channel). m_d2h_read_desc_index is relative to the
-    // first desc in the desc list, whereas m_d2h_read_desc_index_abs is relative to the start of the vdma buffer.
-    int m_d2h_read_desc_index;
-    int m_d2h_read_desc_index_abs;
-    bool m_is_aborted;
-    // Points to the tail of the desc list when the channel is stopped (starts at zero)
-    int m_previous_tail;
-    int m_desc_list_delta;
-    // Contains the last num_processed of the last interrupt (only used on latency measurement)
-    uint16_t m_last_timestamp_num_processed;
-    size_t m_accumulated_transfers;
-};
-
-} /* namespace hailort */
-} /* namespace hailort */
-
-#endif /* _HAILO_VDMA_CHANNEL_STATE_HPP_ */
\ No newline at end of file
index afd95be1ab8b0f34d2104df9e028839191ff6907..2299048437d322cf302f730f6b297b6664dbdb53 100644 (file)
@@ -36,7 +36,7 @@ public:
         m_direction(direction)
     {}
 
-    Expected<uint16_t> get_num_available()
+    Expected<uint16_t> get_num_available() const
     {
         return read_integer<uint16_t>(VDMA_CHANNEL_NUM_AVAIL_OFFSET);
     }
@@ -46,13 +46,8 @@ public:
         return write_integer<uint16_t>(VDMA_CHANNEL_NUM_AVAIL_OFFSET, value);
     }
 
-    Expected<uint16_t> get_num_processed()
-    {
-        return read_integer<uint16_t>(VDMA_CHANNEL_NUM_PROC_OFFSET);
-    }
-
 #ifndef NDEBUG
-    Expected<bool> is_aborted()
+    Expected<bool> is_aborted() const
     {
         const auto control_reg = read_integer<uint8_t>(VDMA_CHANNEL_CONTROL_OFFSET);
         CHECK_EXPECTED(control_reg);
@@ -63,7 +58,7 @@ public:
 private:
 
     template<typename IntegerType>
-    Expected<IntegerType> read_integer(uint32_t offset)
+    Expected<IntegerType> read_integer(uint32_t offset) const
     {
         auto value = m_driver.read_vdma_channel_register(m_channel_id, m_direction, offset, sizeof(IntegerType));
         CHECK_EXPECTED(value);
diff --git a/hailort/libhailort/src/vdma/circular_stream_buffer_pool.cpp b/hailort/libhailort/src/vdma/circular_stream_buffer_pool.cpp
new file mode 100644 (file)
index 0000000..18234e5
--- /dev/null
@@ -0,0 +1,128 @@
+/**
+ * Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file circular_stream_buffer_pool.cpp
+ **/
+
+#include "circular_stream_buffer_pool.hpp"
+#include "vdma/memory/descriptor_list.hpp"
+
+#include "utils.h"
+
+namespace hailort
+{
+
+Expected<std::unique_ptr<CircularStreamBufferPool>> CircularStreamBufferPool::create(HailoRTDriver &driver,
+    HailoRTDriver::DmaDirection direction, size_t desc_page_size, size_t descs_count, size_t transfer_size)
+{
+    // TODO: HRT-11220 calculate desc_count/desc_page_size base on transfer_size and queue_size
+    CHECK_AS_EXPECTED(is_powerof2(descs_count), HAILO_INTERNAL_FAILURE, "descs_count {} must be power of 2", descs_count);
+    CHECK_AS_EXPECTED(is_powerof2(desc_page_size), HAILO_INTERNAL_FAILURE, "desc_page_size {} must be power of 2",
+        desc_page_size);
+
+    const auto buffer_size = desc_page_size * descs_count;
+    CHECK_AS_EXPECTED(transfer_size < buffer_size, HAILO_INTERNAL_FAILURE, "Transfer size {} must be smaller than buffer size {}",
+        transfer_size, buffer_size);
+
+    auto mapped_buffer = allocate_buffer(driver, direction, buffer_size);
+    CHECK_EXPECTED(mapped_buffer);
+
+    auto circular_buffer_pool = make_unique_nothrow<CircularStreamBufferPool>(desc_page_size, descs_count,
+        transfer_size, mapped_buffer.release());
+    CHECK_NOT_NULL_AS_EXPECTED(circular_buffer_pool, HAILO_OUT_OF_HOST_MEMORY);
+
+    return circular_buffer_pool;
+}
+
+CircularStreamBufferPool::CircularStreamBufferPool(size_t desc_page_size, size_t descs_count, size_t transfer_size,
+    BufferPtr &&mapped_buffer) :
+        m_desc_page_size(desc_page_size),
+        m_transfer_size(transfer_size),
+        m_mapped_buffer(std::move(mapped_buffer)),
+        m_next_enqueue_desc_offset(0)
+{
+    assert(is_powerof2(descs_count) && (descs_count > 0));
+    assert(m_mapped_buffer->size() == (m_desc_page_size * descs_count));
+    CB_INIT(m_queue, descs_count);
+    m_queue.head = static_cast<int>(descs_count - 1);
+}
+
+size_t CircularStreamBufferPool::max_queue_size() const
+{
+    return (m_queue.size - 1) / DIV_ROUND_UP(m_transfer_size, m_desc_page_size);
+}
+
+size_t CircularStreamBufferPool::buffers_ready_to_dequeue() const
+{
+    const size_t descs_available = CB_PROG(m_queue, CB_HEAD(m_queue), CB_TAIL(m_queue));
+    return descs_available / descs_in_transfer();
+}
+
+Expected<TransferBuffer> CircularStreamBufferPool::dequeue()
+{
+    CHECK_AS_EXPECTED(buffers_ready_to_dequeue() > 0, HAILO_INTERNAL_FAILURE, "CircularStreamBufferPool is empty");
+
+    const size_t offset_in_buffer = CB_TAIL(m_queue) * m_desc_page_size;
+    CB_DEQUEUE(m_queue, descs_in_transfer());
+    return TransferBuffer {
+        m_mapped_buffer,
+        m_transfer_size,
+        offset_in_buffer
+    };
+}
+
+hailo_status CircularStreamBufferPool::enqueue(TransferBuffer &&buffer_info)
+{
+    const size_t descs_required = descs_in_transfer();
+    const size_t descs_available = CB_AVAIL(m_queue, CB_HEAD(m_queue), CB_TAIL(m_queue));
+    CHECK(descs_available >= descs_required, HAILO_INTERNAL_FAILURE, "Can enqueue without previous dequeue");
+    CHECK(buffer_info.base_buffer() == m_mapped_buffer, HAILO_INTERNAL_FAILURE, "Got the wrong buffer");
+    CHECK(buffer_info.size() == m_transfer_size, HAILO_INTERNAL_FAILURE, "Got invalid buffer size {}, expected {}",
+        buffer_info.size(), m_transfer_size);
+
+    const size_t expected_offset = m_next_enqueue_desc_offset * m_desc_page_size;
+    CHECK(buffer_info.offset() == expected_offset, HAILO_INTERNAL_FAILURE,
+        "Out of order enqueue is not supported in CircularStreamBufferPool. Got offset {}, expected {}",
+        buffer_info.offset(), expected_offset);
+
+    CB_ENQUEUE(m_queue, descs_required);
+    m_next_enqueue_desc_offset = (m_next_enqueue_desc_offset + descs_required) & m_queue.size_mask;
+    return HAILO_SUCCESS;
+}
+
+void CircularStreamBufferPool::reset_pointers()
+{
+    CB_RESET(m_queue);
+    m_queue.head = static_cast<int>(m_queue.size - 1);
+    m_next_enqueue_desc_offset = 0;
+}
+
+Expected<BufferPtr> CircularStreamBufferPool::allocate_buffer(HailoRTDriver &driver,
+    HailoRTDriver::DmaDirection direction, size_t size)
+{
+    auto dma_able_buffer = vdma::DmaAbleBuffer::create(driver, size);
+    CHECK_EXPECTED(dma_able_buffer);
+
+    auto dma_storage = make_shared_nothrow<DmaStorage>(dma_able_buffer.release());
+    CHECK_NOT_NULL_AS_EXPECTED(dma_storage, HAILO_OUT_OF_HOST_MEMORY);
+
+    // TODO HRT-11595: We map the buffer here to avoid mapping buffer during descriptors list creation (it cause
+    // deadlock on the linux driver). After HRT-11595, we won't need to call dma_map.
+    auto map_result = dma_storage->dma_map(driver, to_hailo_dma_direction(direction));
+    CHECK_EXPECTED(map_result);
+
+    auto mapped_buffer = make_shared_nothrow<Buffer>(std::move(dma_storage));
+    CHECK_NOT_NULL_AS_EXPECTED(mapped_buffer, HAILO_OUT_OF_HOST_MEMORY);
+
+    return mapped_buffer;
+}
+
+size_t CircularStreamBufferPool::descs_in_transfer() const
+{
+    assert(IS_FIT_IN_UINT16(m_desc_page_size));
+    return vdma::DescriptorList::descriptors_in_buffer(m_transfer_size, static_cast<uint16_t>(m_desc_page_size));
+}
+
+} /* namespace hailort */
diff --git a/hailort/libhailort/src/vdma/circular_stream_buffer_pool.hpp b/hailort/libhailort/src/vdma/circular_stream_buffer_pool.hpp
new file mode 100644 (file)
index 0000000..edb0caa
--- /dev/null
@@ -0,0 +1,77 @@
+/**
+ * Copyright (c) 2023 Hailo Technologies Ltd. All rights reserved.
+ * Distributed under the MIT license (https://opensource.org/licenses/MIT)
+**/
+/**
+ * @file circular_stream_buffer_pool.hpp
+ * @brief Single buffer used as a circular pool.
+ **/
+
+#ifndef _HAILO_CIRCULAR_STREAM_BUFFER_POOL_HPP_
+#define _HAILO_CIRCULAR_STREAM_BUFFER_POOL_HPP_
+
+#include "vdma/memory/mapped_buffer.hpp"
+#include "common/circular_buffer.hpp"
+#include "stream_common/stream_buffer_pool.hpp"
+
+#include <condition_variable>
+
+
+namespace hailort
+{
+
+// A buffer pool taken from a single virtually continuous buffer.
+// The buffer are dequeued in a circular way.
+// This class can be used in multiple threads without any lock if there is only one consumer (calls dequeue and
+// buffers_ready_to_dequeue)
+// and one producer (calls enqueue).
+class CircularStreamBufferPool final : public StreamBufferPool {
+public:
+    static Expected<std::unique_ptr<CircularStreamBufferPool>> create(HailoRTDriver &driver,
+        HailoRTDriver::DmaDirection direction, size_t desc_page_size, size_t descs_count, size_t transfer_size);
+
+    CircularStreamBufferPool(size_t desc_page_size, size_t descs_count, size_t transfer_size,
+        BufferPtr &&mapped_buffer);
+
+    virtual size_t max_queue_size() const override;
+    size_t buffers_ready_to_dequeue() const;
+
+    virtual Expected<TransferBuffer> dequeue() override;
+
+    virtual hailo_status enqueue(TransferBuffer &&buffer_info) override;
+
+    BufferPtr get_mapped_buffer() { return m_mapped_buffer; }
+
+    virtual void reset_pointers() override;
+
+private:
+    static Expected<BufferPtr> allocate_buffer(HailoRTDriver &driver,
+        HailoRTDriver::DmaDirection direction, size_t size);
+
+    size_t descs_in_transfer() const;
+
+    // We always work in desc_page_size granularity to avoid the need for reprogram descriptors.
+    const size_t m_desc_page_size;
+
+    const size_t m_transfer_size;
+
+    // m_mapped_buffer.size() must be CB_SIZE(m_queue) * m_desc_page_size
+    BufferPtr m_mapped_buffer;
+
+    // Head/tail based queue that manages the buffer pool.
+    // The head and tail are in m_desc_page_size granularity.
+    //
+    // If CB_HEAD(m_queue) == CB_TAIL(m_queue) the pool is empty.
+    // Otherwise, the buffers that can be in use starts from
+    //   CB_TAIL(m_queue) * m_desc_page_size (inclusive)
+    // until
+    //   CB_HEAD(m_queue) * m_desc_page_size (exclusive)
+    circbuf_t m_queue;
+
+    // Used to validate that the buffers are enqueued in order.
+    size_t m_next_enqueue_desc_offset;
+};
+
+} /* namespace hailort */
+
+#endif /* _HAILO_CIRCULAR_STREAM_BUFFER_POOL_HPP_ */
index c574d3d3e2c790d0270de6fc0bb5a97f691072d9..6b14d076f5c0f042b09d85ca5266daa211fda812 100644 (file)
@@ -39,7 +39,7 @@ Expected<std::unique_ptr<IntegratedDevice>> IntegratedDevice::create()
     return device;
 }
 
-IntegratedDevice::IntegratedDevice(HailoRTDriver &&driver, hailo_status &status) :
+IntegratedDevice::IntegratedDevice(std::unique_ptr<HailoRTDriver> &&driver, hailo_status &status) :
     VdmaDevice::VdmaDevice(std::move(driver), Device::Type::INTEGRATED)
 {
     status = update_fw_state();
@@ -54,7 +54,7 @@ IntegratedDevice::IntegratedDevice(HailoRTDriver &&driver, hailo_status &status)
 hailo_status IntegratedDevice::reset_impl(CONTROL_PROTOCOL__reset_type_t reset_type)
 {
     if (CONTROL_PROTOCOL__RESET_TYPE__NN_CORE == reset_type) {
-        return m_driver.reset_nn_core();
+        return m_driver->reset_nn_core();
     }
 
     LOGGER__ERROR("Can't reset IntegratedDevice, please use linux reboot");
index 856994b809ec01a28cb7b46a31e467299c37d7b0..eacae835a8e8f54863c0aaa1b0d64ea860867f7a 100644 (file)
@@ -52,7 +52,7 @@ protected:
     virtual hailo_status reset_impl(CONTROL_PROTOCOL__reset_type_t reset_type) override;
 
 private:
-    IntegratedDevice(HailoRTDriver &&driver, hailo_status &status);
+    IntegratedDevice(std::unique_ptr<HailoRTDriver> &&driver, hailo_status &status);
 };
 
 
index c48ecb3adf3df33316dbeee70f08090c376befe7..9f36b9969ce24859ffde2e1cf417cb4bdef6b0f7 100644 (file)
@@ -10,6 +10,8 @@
 #include "vdma/memory/descriptor_list.hpp"
 #include "utils.h"
 
+#include <numeric>
+
 namespace hailort {
 namespace vdma {
 
@@ -18,11 +20,11 @@ static constexpr uint32_t MIN_CCB_DESCRIPTORS_COUNT = 16;
 
 Expected<BufferSizesRequirements> BufferSizesRequirements::get_sg_buffer_requirements_single_transfer(
     uint16_t max_desc_page_size, uint16_t min_batch_size, uint16_t max_batch_size, uint32_t transfer_size,
-    bool is_circular, const bool force_default_page_size)
+    bool is_circular, const bool force_default_page_size, const bool force_batch_size)
 {
     // First, get the result for the min size
     auto results = get_sg_buffer_requirements_multiple_transfers(max_desc_page_size, min_batch_size,
-        {transfer_size}, is_circular, force_default_page_size);
+        {transfer_size}, is_circular, force_default_page_size, force_batch_size);
     CHECK_EXPECTED(results);
 
     // In order to fetch all descriptors, the amount of active descs is lower by one that the amount
@@ -39,7 +41,7 @@ Expected<BufferSizesRequirements> BufferSizesRequirements::get_sg_buffer_require
 
 Expected<BufferSizesRequirements> BufferSizesRequirements::get_sg_buffer_requirements_multiple_transfers(
     uint16_t max_desc_page_size, uint16_t batch_size, const std::vector<uint32_t> &transfer_sizes,
-    bool is_circular, const bool force_default_page_size)
+    bool is_circular, const bool force_default_page_size, const bool force_batch_size)
 {
     const uint16_t initial_desc_page_size = force_default_page_size ?
         DEFAULT_DESC_PAGE_SIZE : find_initial_desc_page_size(transfer_sizes);
@@ -58,6 +60,13 @@ Expected<BufferSizesRequirements> BufferSizesRequirements::get_sg_buffer_require
     CHECK_AS_EXPECTED(initial_desc_page_size >= min_desc_page_size, HAILO_INTERNAL_FAILURE,
         "Initial descriptor page size ({}) is smaller than minimum descriptor page size ({})",
         initial_desc_page_size, min_desc_page_size);
+    CHECK_AS_EXPECTED(MAX_DESCS_COUNT >= get_required_descriptor_count(transfer_sizes, max_desc_page_size),
+        HAILO_OUT_OF_DESCRIPTORS,
+        "Network shapes exceeds driver descriptors capabilities."
+        "Minimal descriptors count: {}, max allowed on the driver: {}."
+        "(A common cause for this error could be the large transfer size - which is {}).",
+        get_required_descriptor_count(transfer_sizes, max_desc_page_size), (MAX_DESCS_COUNT - 1),
+        std::accumulate(transfer_sizes.begin(), transfer_sizes.end(), 0));
 
     // Defined as uint32_t to prevent overflow (as we multiply it by two in each iteration of the while loop bellow)
     uint32_t local_desc_page_size = initial_desc_page_size;
@@ -69,11 +78,21 @@ Expected<BufferSizesRequirements> BufferSizesRequirements::get_sg_buffer_require
             "Descriptor page size needs to fit in 16B");
         local_desc_page_size = static_cast<uint16_t>(local_desc_page_size << 1);
 
-        CHECK_AS_EXPECTED(local_desc_page_size <= max_desc_page_size, HAILO_OUT_OF_DESCRIPTORS,
-            "Network shapes and batch size exceeds driver descriptors capabilities. "
-            "Required descriptors count: {}, max allowed on the driver: {}. "
-            "(A common cause for this error could be the batch size - which is {}).",
-            (batch_size * descs_count), (MAX_DESCS_COUNT - 1), batch_size);
+        if (local_desc_page_size > max_desc_page_size) {
+            if (force_batch_size) {
+                LOGGER__ERROR("Network shapes and batch size exceeds driver descriptors capabilities. "
+                "Required descriptors count: {}, max allowed on the driver: {}. "
+                "(A common cause for this error could be the batch size - which is {}).",
+                (batch_size * descs_count), (MAX_DESCS_COUNT - 1), batch_size);
+                return make_unexpected(HAILO_OUT_OF_DESCRIPTORS);
+            } else {
+                // If not forcing minimum batch (It's acceptable to run infer on lower batch instead of returning error)
+                // once reached over the max page size, stop
+                local_desc_page_size = max_desc_page_size;
+                descs_count = get_required_descriptor_count(transfer_sizes, static_cast<uint16_t>(local_desc_page_size));
+                break;
+            }
+        }
 
         descs_count = get_required_descriptor_count(transfer_sizes, static_cast<uint16_t>(local_desc_page_size));
     }
index 03568f8d1706a9870d35cd18a3ed4946840c181d..2e713f234de9149533886d98803f8f4e44bf85e9 100644 (file)
@@ -37,10 +37,10 @@ public:
 
     static Expected<BufferSizesRequirements> get_sg_buffer_requirements_single_transfer(uint16_t max_desc_page_size,
         uint16_t min_batch_size, uint16_t max_batch_size, uint32_t transfer_size, bool is_circular,
-        const bool force_default_page_size);
+        const bool force_default_page_size, const bool force_batch_size);
     static Expected<BufferSizesRequirements> get_sg_buffer_requirements_multiple_transfers(uint16_t max_desc_page_size,
         uint16_t batch_size, const std::vector<uint32_t> &transfer_sizes, bool is_circular,
-        const bool force_default_page_size);
+        const bool force_default_page_size, const bool force_batch_size);
 
     static Expected<BufferSizesRequirements> get_ccb_buffer_requirements_single_transfer(uint16_t batch_size,
         uint32_t transfer_size, bool is_circular);
index beac64609b3c65d93a7f98861e62eece2300c759..1f0b63a9eed6133d607851bdfb6866566c579bfb 100644 (file)
@@ -9,11 +9,21 @@
 
 #include "continuous_buffer.hpp"
 
+/* TODO - Support non default CCB page sizes */
+#define CCB_PAGE_SIZE (512)
+#define MAX_PAGES_PER_INTERRUPT (0x0003FFFF)
+#define MAX_CCB_BUFFER_SIZE (CCB_PAGE_SIZE * MAX_PAGES_PER_INTERRUPT)
+
 namespace hailort {
 namespace vdma {
 
 Expected<ContinuousBuffer> ContinuousBuffer::create(size_t size, HailoRTDriver &driver)
 {
+    if (size > MAX_CCB_BUFFER_SIZE) {
+        LOGGER__INFO("continious memory size {} must be smaller/equal to {}.", size, MAX_CCB_BUFFER_SIZE);
+        return make_unexpected(HAILO_OUT_OF_HOST_CMA_MEMORY);
+    }
+
     auto result = driver.vdma_continuous_buffer_alloc(size);
     /* Don't print error here since this might be expected error that the libhailoRT can recover from
         (out of host memory). If it's not the case, there is a print in hailort_driver.cpp file */
@@ -105,17 +115,6 @@ Expected<uint32_t> ContinuousBuffer::program_descriptors(size_t transfer_size, I
     return descriptors_in_buffer(transfer_size);
 }
 
-hailo_status ContinuousBuffer::reprogram_device_interrupts_for_end_of_batch(size_t transfer_size, uint16_t batch_size,
-        InterruptsDomain new_interrupts_domain)
-{
-    (void)transfer_size;
-    (void)batch_size;
-    (void)new_interrupts_domain;
-
-    // The descriptors in continuous mode are programmed by the hw, nothing to do here.
-    return HAILO_SUCCESS;
-}
-
 ContinuousBuffer::ContinuousBuffer(size_t size, HailoRTDriver &driver, uintptr_t handle, uint64_t dma_address,
     MmapBuffer<void> &&mmap) :
     m_size(size),
index 57b3ed538e254e1067ab36f560501803e3b029eb..98e9303602f65fad75789625cdc70ff0fbf3a0ac 100644 (file)
@@ -52,8 +52,6 @@ public:
 
     virtual Expected<uint32_t> program_descriptors(size_t transfer_size, InterruptsDomain last_desc_interrupts_domain,
         size_t desc_offset) override;
-    virtual hailo_status reprogram_device_interrupts_for_end_of_batch(size_t transfer_size, uint16_t batch_size,
-        InterruptsDomain new_interrupts_domain) override;
 
 private:
     ContinuousBuffer(size_t size, HailoRTDriver &driver, uintptr_t handle, uint64_t dma_address,
index a26409830e0a85bda1e97aebe2b9835756069325..baf39dd3e21ec1b2b8e3bc72992d6d5f221d2d24 100644 (file)
@@ -128,17 +128,6 @@ Expected<uint16_t> DescriptorList::program_last_descriptor(size_t transfer_size,
     return std::move(static_cast<uint16_t>(required_descriptors));
 }
 
-hailo_status DescriptorList::reprogram_descriptor_interrupts_domain(size_t desc_index,
-    InterruptsDomain interrupts_domain)
-{
-    if (desc_index >= count()){
-        LOGGER__ERROR("Requested desc (index={}) exceeds the number of descriptors in the list ({})", desc_index, count());
-        return HAILO_OUT_OF_DESCRIPTORS;
-    }
-    reprogram_single_descriptor_interrupts_domain((*this)[desc_index], interrupts_domain);
-    return HAILO_SUCCESS;
-}
-
 uint32_t DescriptorList::descriptors_in_buffer(size_t buffer_size) const
 {
     return descriptors_in_buffer(buffer_size, m_desc_page_size);
@@ -214,25 +203,6 @@ void DescriptorList::program_single_descriptor(VdmaDescriptor &descriptor, uint1
 #endif
 }
 
-void DescriptorList::reprogram_single_descriptor_interrupts_domain(VdmaDescriptor &descriptor,
-    InterruptsDomain interrupts_domain)
-{
-    // Set the IRQ control bits to zero
-    // Make all edits to the local variable local_pagesize_desc_ctrl that is on the stack to save read/writes to DDR
-    auto local_pagesize_desc_ctrl = (descriptor.PageSize_DescControl & ~DESC_IRQ_MASK);
-
-    if (InterruptsDomain::NONE == interrupts_domain) {
-        // Nothing else to do
-        descriptor.PageSize_DescControl = local_pagesize_desc_ctrl;
-        return;
-    }
-
-    local_pagesize_desc_ctrl |= (DESC_REQUREST_IRQ_PROCESSED | DESC_REQUREST_IRQ_ERR |
-        get_interrupts_bitmask(interrupts_domain));
-
-    descriptor.PageSize_DescControl = local_pagesize_desc_ctrl;
-}
-
 void DescriptorList::clear_descriptor(const size_t desc_index)
 {
     // Clear previous descriptor properties
index 25c3c355aa11a4dc23a508f9e9bc6fbf599d5776..de3715efe38b51e341d83c16dcf748427ca3080a 100644 (file)
@@ -95,6 +95,17 @@ enum class InterruptsDomain
     BOTH    = DEVICE | HOST
 };
 
+inline InterruptsDomain operator|(InterruptsDomain a, InterruptsDomain b)
+{
+    return static_cast<InterruptsDomain>(static_cast<int>(a) | static_cast<int>(b));
+}
+
+inline InterruptsDomain& operator|=(InterruptsDomain &a, InterruptsDomain b)
+{
+    a = a | b;
+    return a;
+}
+
 inline bool host_interuptes_enabled(InterruptsDomain interrupts_domain)
 {
     return 0 != (static_cast<uint32_t>(interrupts_domain) & static_cast<uint32_t>(InterruptsDomain::HOST));
@@ -160,7 +171,6 @@ public:
     Expected<uint16_t> program_last_descriptor(size_t transfer_size, InterruptsDomain last_desc_interrupts_domain,
         size_t desc_offset);
     void program_single_descriptor(VdmaDescriptor &descriptor, uint16_t page_size, InterruptsDomain interrupts_domain);
-    hailo_status reprogram_descriptor_interrupts_domain(size_t desc_index, InterruptsDomain interrupts_domain);
     void clear_descriptor(const size_t desc_index);
 
     uint32_t descriptors_in_buffer(size_t buffer_size) const;
@@ -174,7 +184,6 @@ private:
     VdmaDescriptor *desc_list() { return reinterpret_cast<VdmaDescriptor*>(m_desc_list_info.user_address); }
 
     uint32_t get_interrupts_bitmask(InterruptsDomain interrupts_domain);
-    void reprogram_single_descriptor_interrupts_domain(VdmaDescriptor &descriptor, InterruptsDomain interrupts_domain);
 
 
     DescriptorsListInfo m_desc_list_info;
index b179fbb4ed63b003d29d14a9af77473cda5a90e1..2a0d8eb93723a12a7acee395882ca9827f45b2da 100644 (file)
@@ -121,5 +121,95 @@ hailo_status MappedBuffer::synchronize(size_t offset, size_t count, HailoRTDrive
     return m_driver.vdma_buffer_sync(m_mapping_handle, sync_direction, offset, count);
 }
 
+hailo_status MappedBuffer::write(const void *buf_src, size_t count, size_t offset, bool should_sync)
+{
+    if ((count + offset) > size()) {
+        LOGGER__ERROR("Requested size {} from offset {} is more than the buffer size {}", count, offset, size());
+        return HAILO_INSUFFICIENT_BUFFER;
+    }
+
+    if (count > 0) {
+        auto dst_addr = static_cast<uint8_t*>(user_address()) + offset;
+        memcpy(dst_addr, buf_src, count);
+
+        if (should_sync) {
+            auto status = synchronize(offset, count, HailoRTDriver::DmaSyncDirection::TO_DEVICE);
+            CHECK_SUCCESS(status, "Failed synching vdma buffer on write");
+        }
+    }
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status MappedBuffer::read(void *buf_dst, size_t count, size_t offset, bool should_sync)
+{
+    if ((count + offset) > size()) {
+        LOGGER__ERROR("Requested size {} from offset {} is more than the buffer size {}", count, offset, size());
+        return HAILO_INSUFFICIENT_BUFFER;
+    }
+
+    if (count > 0) {
+        const auto src_addr = static_cast<uint8_t*>(user_address()) + offset;
+        if (should_sync) {
+            const auto status = synchronize(offset, count, HailoRTDriver::DmaSyncDirection::TO_HOST);
+            CHECK_SUCCESS(status, "Failed synching vdma buffer on read");
+        }
+
+        memcpy(buf_dst, src_addr, count);
+    }
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status MappedBuffer::write_cyclic(const void *buf_src, size_t count, size_t offset, bool should_sync)
+{
+    if (count > size()) {
+        LOGGER__ERROR("Requested size({}) is more than the buffer size {}", count, size());
+        return HAILO_INSUFFICIENT_BUFFER;
+    }
+
+    auto size_to_end = size() - offset;
+    auto copy_size = std::min(size_to_end, count);
+    auto status = write(buf_src, copy_size, offset, should_sync);
+    if (HAILO_SUCCESS != status) {
+        return status;
+    }
+
+    auto remaining_size = count - copy_size;
+    if (remaining_size > 0) {
+        status = write((uint8_t*)buf_src + copy_size, remaining_size, 0);
+        if (HAILO_SUCCESS != status) {
+            return status;
+        }
+    }
+
+    return HAILO_SUCCESS;
+}
+
+hailo_status MappedBuffer::read_cyclic(void *buf_dst, size_t count, size_t offset, bool should_sync)
+{
+    if (count > size()) {
+        LOGGER__ERROR("Requested size({}) is more than the buffer size {}", count, size());
+        return HAILO_INSUFFICIENT_BUFFER;
+    }
+
+    auto size_to_end = size() - offset;
+    auto copy_size = std::min(size_to_end, count);
+    auto status = read(buf_dst, copy_size, offset, should_sync);
+    if (HAILO_SUCCESS != status) {
+        return status;
+    }
+
+    auto remaining_size = count - copy_size;
+    if (remaining_size > 0) {
+        status = read((uint8_t*)buf_dst + copy_size, remaining_size, 0, should_sync);
+        if (HAILO_SUCCESS != status) {
+            return status;
+        }
+    }
+
+    return HAILO_SUCCESS;
+}
+
 } /* namespace vdma */
 } /* namespace hailort */
index 2fc876b479183d64474722ad75a65af9ab7b27da..9c864aa767d1ffb57a359690cc31c4413c6a1c32 100644 (file)
@@ -65,6 +65,32 @@ public:
     // TODO: validate that offset is cache aligned (HRT-9811)
     hailo_status synchronize(size_t offset, size_t count, HailoRTDriver::DmaSyncDirection sync_direction);
 
+    /**
+     * Copy data from buf_src parameter to this buffer.
+     */
+    hailo_status write(const void *buf_src, size_t count, size_t offset, bool should_sync = true);
+
+    /**
+     * Copy data from this buffer to buf_dst.
+     */
+    hailo_status read(void *buf_dst, size_t count, size_t offset, bool should_sync = true);
+
+    /**
+     * Copy data from buf_src parameter to this buffer.
+     *
+     * Similar to 'write' but if (offset + count) is larger than the buffer size, the copy continues
+     * from the start of the buffer.
+     */
+    hailo_status write_cyclic(const void *buf_src, size_t count, size_t offset,  bool should_sync = true);
+
+    /**
+     * Copy data from this buffer to buf_dst.
+     *
+     * Similar to 'read' but if (offset + count) is larger than the DmaMappedBuffer size, the copy continues
+     * from the start of the buffer.
+     */
+    hailo_status read_cyclic(void *buf_dst, size_t count, size_t offset, bool should_sync = true);
+
 private:
     MappedBuffer(HailoRTDriver &driver, std::shared_ptr<DmaAbleBuffer> buffer, HailoRTDriver::DmaDirection data_direction,
         hailo_status &status);
index 28a69730dc9fa9b0deb817aa3b9455567dafdc31..e85de25cf04e28cec6de305308b99a8bd649adce 100644 (file)
@@ -66,33 +66,11 @@ uint32_t SgBuffer::descs_count() const
 
 hailo_status SgBuffer::read(void *buf_dst, size_t count, size_t offset)
 {
-    CHECK(count + offset <= m_mapped_buffer->size(), HAILO_INSUFFICIENT_BUFFER);
-    if (count == 0) {
-        return HAILO_SUCCESS;
-    }
-
-    const auto status = m_mapped_buffer->synchronize(offset, count, HailoRTDriver::DmaSyncDirection::TO_HOST);
-    CHECK_SUCCESS(status, "Failed synching SgBuffer buffer on read");
-
-    const auto src_addr = static_cast<uint8_t*>(m_mapped_buffer->user_address()) + offset;
-    memcpy(buf_dst, src_addr, count);
-
-    return HAILO_SUCCESS;
+    return m_mapped_buffer->read(buf_dst, count, offset);
 }
 hailo_status SgBuffer::write(const void *buf_src, size_t count, size_t offset)
 {
-    CHECK(count + offset <= m_mapped_buffer->size(), HAILO_INSUFFICIENT_BUFFER);
-    if (count == 0) {
-        return HAILO_SUCCESS;
-    }
-
-    const auto dst_addr = static_cast<uint8_t*>(m_mapped_buffer->user_address()) + offset;
-    std::memcpy(dst_addr, buf_src, count);
-
-    const auto status = m_mapped_buffer->synchronize(offset, count, HailoRTDriver::DmaSyncDirection::TO_DEVICE);
-    CHECK_SUCCESS(status, "Failed synching SgBuffer buffer on write");
-
-    return HAILO_SUCCESS;
+    return m_mapped_buffer->write(buf_src, count, offset);
 }
 
 Expected<uint32_t> SgBuffer::program_descriptors(size_t transfer_size, InterruptsDomain last_desc_interrupts_domain,
@@ -101,14 +79,5 @@ Expected<uint32_t> SgBuffer::program_descriptors(size_t transfer_size, Interrupt
     return m_desc_list->program_last_descriptor(transfer_size, last_desc_interrupts_domain, desc_offset);
 }
 
-hailo_status SgBuffer::reprogram_device_interrupts_for_end_of_batch(size_t transfer_size, uint16_t batch_size,
-        InterruptsDomain new_interrupts_domain)
-{
-    const auto desc_per_transfer = m_desc_list->descriptors_in_buffer(transfer_size);
-    const auto num_desc_in_batch = desc_per_transfer * batch_size;
-    const auto last_desc_index_in_batch = num_desc_in_batch - 1;
-    return m_desc_list->reprogram_descriptor_interrupts_domain(last_desc_index_in_batch, new_interrupts_domain);
-}
-
 }
 }
\ No newline at end of file
index bb131575c466ccef74ec6a35ae8e8c70ee9cc8e3..38c6d45f8512986561ebc2822d664ae7f6ec7b1b 100644 (file)
@@ -51,8 +51,6 @@ public:
 
     virtual Expected<uint32_t> program_descriptors(size_t transfer_size, InterruptsDomain last_desc_interrupts_domain,
         size_t desc_offset) override;
-    virtual hailo_status reprogram_device_interrupts_for_end_of_batch(size_t transfer_size, uint16_t batch_size,
-        InterruptsDomain new_interrupts_domain) override;
 
 private:
     SgBuffer(std::shared_ptr<MappedBuffer> mapped_buffer, std::shared_ptr<DescriptorList> desc_list);
index d11393f51368691f3ecf5ce4dd38c7e7dc8a1271..97e6e75d45f86e97a551fa50d2ae082933d1bb63 100644 (file)
@@ -53,9 +53,7 @@ public:
 \r
     virtual Expected<uint32_t> program_descriptors(size_t transfer_size, InterruptsDomain last_desc_interrupts_domain,\r
         size_t desc_offset) = 0;\r
-    virtual hailo_status reprogram_device_interrupts_for_end_of_batch(size_t transfer_size, uint16_t batch_size,\r
-        InterruptsDomain new_interrupts_domain) = 0;\r
-        \r
+\r
     CONTROL_PROTOCOL__host_buffer_info_t get_host_buffer_info(uint32_t transfer_size);\r
     static CONTROL_PROTOCOL__host_buffer_info_t get_host_buffer_info(Type type, uint64_t dma_address,\r
         uint16_t desc_page_size, uint32_t total_desc_count, uint32_t transfer_size);\r
index 88fe6b2cebfc6b0ffe2ab4319a88f07d48b734f1..e8a7075fb4b894597e36505dab7baa0b0116de1d 100644 (file)
@@ -136,10 +136,10 @@ bool PcieDevice::pcie_device_infos_equal(const hailo_pcie_device_info_t &first,
     return bdf_equal && domain_equal;
 }
 
-PcieDevice::PcieDevice(HailoRTDriver &&driver, hailo_status &status) :
-    VdmaDevice::VdmaDevice(std::move(driver), Device::Type::PCIE)
+PcieDevice::PcieDevice(std::unique_ptr<HailoRTDriver> &&driver, hailo_status &status) :
+    VdmaDevice(std::move(driver), Device::Type::PCIE)
 {
-    if (driver.is_fw_loaded()) {
+    if (m_driver->is_fw_loaded()) {
         status = update_fw_state();
         if (HAILO_SUCCESS != status) {
             LOGGER__ERROR("update_fw_state() failed with status {}", status);
@@ -160,7 +160,7 @@ void PcieDevice::set_is_control_version_supported(bool value)
 
 Expected<hailo_device_architecture_t> PcieDevice::get_architecture() const
 {
-    if (!m_driver.is_fw_loaded()) {
+    if (!m_driver->is_fw_loaded()) {
         LOGGER__WARNING("FW is not loaded to the device. Please load FW before using the device.");
         return make_unexpected(HAILO_INVALID_OPERATION);
     }
@@ -170,12 +170,12 @@ Expected<hailo_device_architecture_t> PcieDevice::get_architecture() const
 
 hailo_status PcieDevice::direct_write_memory(uint32_t address, const void *buffer, uint32_t size)
 {
-    return m_driver.write_memory(HailoRTDriver::MemoryType::DIRECT_MEMORY, address, buffer, size);
+    return m_driver->write_memory(HailoRTDriver::MemoryType::DIRECT_MEMORY, address, buffer, size);
 }
 
 hailo_status PcieDevice::direct_read_memory(uint32_t address, void *buffer, uint32_t size)
 {
-    return m_driver.read_memory(HailoRTDriver::MemoryType::DIRECT_MEMORY, address, buffer, size);
+    return m_driver->read_memory(HailoRTDriver::MemoryType::DIRECT_MEMORY, address, buffer, size);
 }
 
 hailo_status PcieDevice::reset_impl(CONTROL_PROTOCOL__reset_type_t reset_type)
index 6bca1910ab60f1f97be2fded1a9b590026dd43b9..271de77fdcafd564e4e251d021730b324f15b9e5 100644 (file)
@@ -15,7 +15,6 @@
 #include "hailo/hailort.h"
 #include "hailo/expected.hpp"
 
-#include "vdma/channel/boundary_channel.hpp"
 #include "vdma/vdma_device.hpp"
 
 
@@ -57,7 +56,7 @@ public:
     virtual Expected<hailo_device_architecture_t> get_architecture() const override;
 
 private:
-    PcieDevice(HailoRTDriver &&driver, hailo_status &status);
+    PcieDevice(std::unique_ptr<HailoRTDriver> &&driver, hailo_status &status);
 
     static Expected<HailoRTDriver::DeviceInfo> find_device_info(const hailo_pcie_device_info_t &pcie_device_info);
 };
diff --git a/hailort/libhailort/src/vdma/vdma_async_stream.cpp b/hailort/libhailort/src/vdma/vdma_async_stream.cpp
deleted file mode 100644 (file)
index 22392f2..0000000
+++ /dev/null
@@ -1,330 +0,0 @@
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file vdma_async_stream.cpp
- * @brief Async vdma stream implementation
- **/
-
-#include "hailo/hailort_common.hpp"
-
-#include "vdma/vdma_async_stream.hpp"
-#include "common/os_utils.hpp"
-
-
-namespace hailort
-{
-
-VdmaAsyncInputStream::VdmaAsyncInputStream(VdmaDevice &device, vdma::BoundaryChannelPtr channel,
-                                           const LayerInfo &edge_layer, EventPtr core_op_activated_event,
-                                           uint16_t batch_size, std::chrono::milliseconds transfer_timeout,
-                                           hailo_stream_interface_t stream_interface, hailo_status &status) :
-    VdmaInputStreamBase(device, channel, edge_layer, core_op_activated_event, batch_size,
-                        transfer_timeout, stream_interface, status)
-{
-    // Checking status for base class c'tor
-    if (HAILO_SUCCESS != status) {
-        return;
-    }
-
-    if (channel->type() != vdma::BoundaryChannel::Type::ASYNC) {
-        LOGGER__ERROR("Can't create a async vdma stream with a non async channel. Received channel type {}", channel->type());
-        status = HAILO_INVALID_ARGUMENT;
-        return;
-    }
-
-    status = HAILO_SUCCESS;
-}
-
-hailo_status VdmaAsyncInputStream::wait_for_async_ready(size_t transfer_size, std::chrono::milliseconds timeout)
-{
-    const bool STOP_IF_DEACTIVATED = true;
-    return m_channel->wait(transfer_size, timeout, STOP_IF_DEACTIVATED);
-}
-
-Expected<size_t> VdmaAsyncInputStream::get_async_max_queue_size() const
-{
-    return get_buffer_frames_size();
-}
-
-hailo_status VdmaAsyncInputStream::write_buffer_only(const MemoryView &, const std::function<bool()> &)
-{
-    LOGGER__ERROR("The write_buffer_only function is not supported by async streams");
-    return HAILO_INVALID_OPERATION;
-}
-
-hailo_status VdmaAsyncInputStream::send_pending_buffer(const device_id_t &)
-{
-    LOGGER__ERROR("The send_pending_buffer function is not supported by async streams");
-    return HAILO_INVALID_OPERATION;
-}
-
-hailo_status VdmaAsyncInputStream::write_async(TransferRequest &&transfer_request)
-{
-    return m_channel->transfer_async(std::move(transfer_request));
-}
-
-hailo_status VdmaAsyncInputStream::write_impl(const MemoryView &)
-{
-    LOGGER__ERROR("Sync write is not supported by async streams");
-    return HAILO_INVALID_OPERATION;
-}
-
-/** Output stream **/
-
-VdmaAsyncOutputStream::VdmaAsyncOutputStream(VdmaDevice &device, vdma::BoundaryChannelPtr channel, const LayerInfo &edge_layer,
-                                             EventPtr core_op_activated_event, uint16_t batch_size,
-                                             std::chrono::milliseconds transfer_timeout, hailo_stream_interface_t interface,
-                                             hailo_status &status) :
-    VdmaOutputStreamBase(device, channel, edge_layer, core_op_activated_event, batch_size,
-                         transfer_timeout, interface, status)
-{
-    // Check status for base class c'tor
-    if (HAILO_SUCCESS != status) {
-        return;
-    }
-
-    if (channel->type() != vdma::BoundaryChannel::Type::ASYNC) {
-        LOGGER__ERROR("Can't create an async vdma stream with a non async channel. Received channel type {}", channel->type());
-        status = HAILO_INVALID_ARGUMENT;
-        return;
-    }
-
-    status = HAILO_SUCCESS;
-}
-
-hailo_status VdmaAsyncOutputStream::wait_for_async_ready(size_t transfer_size, std::chrono::milliseconds timeout)
-{
-    const bool STOP_IF_DEACTIVATED = true;
-    return m_channel->wait(transfer_size, timeout, STOP_IF_DEACTIVATED);
-}
-
-Expected<size_t> VdmaAsyncOutputStream::get_async_max_queue_size() const
-{
-    return get_buffer_frames_size();
-}
-
-hailo_status VdmaAsyncOutputStream::read_impl(MemoryView &)
-{
-    LOGGER__ERROR("Sync read is not supported by async streams");
-    return HAILO_INVALID_OPERATION;
-}
-
-hailo_status VdmaAsyncOutputStream::read_async(TransferRequest &&transfer_request)
-{
-    return m_channel->transfer_async(std::move(transfer_request));
-}
-
-/** Output nms stream **/
-VdmaAsyncOutputNmsStream::VdmaAsyncOutputNmsStream(VdmaDevice &device, vdma::BoundaryChannelPtr channel,
-                                                   const LayerInfo &edge_layer, EventPtr core_op_activated_event,
-                                                   uint16_t batch_size, std::chrono::milliseconds transfer_timeout,
-                                                   hailo_stream_interface_t interface, hailo_status &status) :
-    VdmaOutputStreamBase(device, channel, edge_layer, core_op_activated_event, batch_size,
-                         transfer_timeout, interface, status),
-    m_queue_max_size(channel->get_transfers_count_in_buffer(get_info().hw_frame_size)),
-    m_queue_mutex(),
-    m_abort_mutex(),
-    m_queue_cond(),
-    m_queue(),
-    m_stream_aborted(false),
-    m_should_quit(false),
-    m_worker_thread([this] { process_transfer_requests(); })
-{
-    // Check status for base class c'tor
-    if (HAILO_SUCCESS != status) {
-        return;
-    }
-
-    if (edge_layer.format.order != HAILO_FORMAT_ORDER_HAILO_NMS) {
-        // This shouldn't happen
-        LOGGER__ERROR("Can't create NMS vdma async output stream if edge layer order isn't NMS. Order received {}",
-            edge_layer.format.order);
-        status = HAILO_INTERNAL_FAILURE;
-        return;
-    }
-
-    // TODO: after adding NMS single int, we can create an async channel for async nms output stream (HRT-10553)
-    if (channel->type() != vdma::BoundaryChannel::Type::BUFFERED) {
-        LOGGER__ERROR("Can't create an async nms vdma stream with a non buffered channel. Received channel type {}", channel->type());
-        status = HAILO_INVALID_ARGUMENT;
-        return;
-    }
-
-    status = HAILO_SUCCESS;
-}
-
-VdmaAsyncOutputNmsStream::~VdmaAsyncOutputNmsStream()
-{
-    // VdmaAsyncOutputNmsStream::deactivate_stream() calls VdmaOutputStreamBase::deactivate_stream().
-    // Because this dtor (i.e. ~VdmaAsyncOutputNmsStream()) is called before ~VdmaOutputStreamBase(), calling
-    // VdmaOutputStreamBase::deactivate_stream() inside VdmaAsyncOutputNmsStream::deactivate_stream() will work.
-    if (this->is_stream_activated) {
-        const auto status = deactivate_stream();
-        if (HAILO_SUCCESS != status) {
-            LOGGER__ERROR("Failed to deactivate stream with error status {}", status);
-        }
-    }
-
-    if (m_worker_thread.joinable()) {
-        signal_thread_quit();
-        m_worker_thread.join();
-    }
-}
-
-hailo_status VdmaAsyncOutputNmsStream::wait_for_async_ready(size_t transfer_size, std::chrono::milliseconds timeout)
-{
-    CHECK(transfer_size == get_info().hw_frame_size, HAILO_INSUFFICIENT_BUFFER,
-        "On nms stream transfer_size should be {} (given size {})", get_info().hw_frame_size, transfer_size);
-    std::unique_lock<std::mutex> lock(m_queue_mutex);
-    auto result = m_queue_cond.wait_for(lock, timeout,
-        [&]{ return m_should_quit || m_stream_aborted || (m_queue.size() < m_queue_max_size); });
-    if (result) {
-        if (m_should_quit) {
-            return HAILO_STREAM_NOT_ACTIVATED;
-        }
-        return m_stream_aborted ? HAILO_STREAM_ABORTED_BY_USER : HAILO_SUCCESS;
-    }
-    return HAILO_TIMEOUT;
-}
-
-Expected<size_t> VdmaAsyncOutputNmsStream::get_async_max_queue_size() const
-{
-    return Expected<size_t>(m_queue_max_size);
-}
-
-hailo_status VdmaAsyncOutputNmsStream::read_async(TransferRequest &&transfer_request)
-{
-    {
-        std::lock_guard<std::mutex> lock(m_queue_mutex);
-        CHECK(!m_stream_aborted, HAILO_STREAM_ABORTED_BY_USER);
-        CHECK(m_queue.size() < m_queue_max_size, HAILO_QUEUE_IS_FULL, "No space left in nms queue");
-
-        m_queue.emplace(std::move(transfer_request));
-    }
-    m_queue_cond.notify_one();
-    return HAILO_SUCCESS;
-}
-
-hailo_status VdmaAsyncOutputNmsStream::read(MemoryView /* buffer */)
-{
-    // We need to override read() since VdmaAsyncOutputNmsStream impl's read_impl. This will cause read() to succeed,
-    // however this isn't desired for async streams.
-    LOGGER__ERROR("The read function is not supported by async streams");
-    return HAILO_INVALID_OPERATION;
-}
-
-hailo_status VdmaAsyncOutputNmsStream::abort()
-{
-    std::unique_lock<std::mutex> lock(m_abort_mutex);
-    const auto status = VdmaOutputStreamBase::abort();
-    CHECK_SUCCESS(status);
-
-    m_stream_aborted = true;
-
-    return HAILO_SUCCESS;
-}
-
-hailo_status VdmaAsyncOutputNmsStream::clear_abort()
-{
-    std::unique_lock<std::mutex> lock(m_abort_mutex);
-    const auto status = VdmaOutputStreamBase::clear_abort();
-    CHECK_SUCCESS(status);
-
-    m_stream_aborted = false;
-
-    return HAILO_SUCCESS;
-}
-
-hailo_status VdmaAsyncOutputNmsStream::read_impl(MemoryView &buffer)
-{
-    CHECK((buffer.size() % HailoRTCommon::HW_DATA_ALIGNMENT) == 0, HAILO_INVALID_ARGUMENT,
-        "Size must be aligned to {} (got {})", HailoRTCommon::HW_DATA_ALIGNMENT, buffer.size());
-
-    return m_channel->transfer_sync(buffer.data(), buffer.size(), m_transfer_timeout);
-}
-
-hailo_status VdmaAsyncOutputNmsStream::deactivate_stream()
-{
-    std::unique_lock<std::mutex> lock(m_queue_mutex);
-
-    // abort is called because read_nms may block on a non-aborted channel
-    auto status = abort();
-    CHECK_SUCCESS(status);
-
-    // Now for every transfer processed in process_transfer_requests(), we'll pass HAILO_STREAM_ABORTED_BY_USER to the
-    // callback.
-    status = VdmaOutputStreamBase::deactivate_stream();
-    CHECK_SUCCESS(status);
-
-    // Block until all transfers have been emptied from the queue
-    auto result = m_queue_cond.wait_for(lock, m_transfer_timeout, [&]{ return m_queue.empty(); });
-    CHECK(result, HAILO_TIMEOUT, "Timeout while deactivating async nms output stream");
-
-    return HAILO_SUCCESS;
-}
-
-hailo_status VdmaAsyncOutputNmsStream::activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers)
-{
-    std::unique_lock<std::mutex> lock(m_queue_mutex);
-    auto status = VdmaOutputStreamBase::activate_stream(dynamic_batch_size, resume_pending_stream_transfers);
-    CHECK_SUCCESS(status);
-
-    status = clear_abort();
-    CHECK_SUCCESS(status);
-
-    return HAILO_SUCCESS;
-}
-
-Expected<size_t> VdmaAsyncOutputNmsStream::get_buffer_frames_size() const
-{
-    return Expected<size_t>(m_queue_max_size);
-}
-
-void VdmaAsyncOutputNmsStream::signal_thread_quit()
-{
-    {
-        std::unique_lock<std::mutex> lock(m_queue_mutex);
-        m_should_quit = true;
-    }
-    m_queue_cond.notify_all();
-}
-
-void VdmaAsyncOutputNmsStream::process_transfer_requests()
-{
-    static const size_t FROM_START_OF_BUFFER = 0;
-    OsUtils::set_current_thread_name("ASYNC_NMS");
-
-    while (true) {
-        std::unique_lock<std::mutex> lock(m_queue_mutex);
-        m_queue_cond.wait(lock, [&]{ return m_should_quit || !m_queue.empty(); });
-        if (m_should_quit) {
-            break;
-        }
-
-        auto transfer_request = m_queue.front();
-        m_queue.pop();
-
-        lock.unlock();
-        auto status = read_nms(transfer_request.buffer.data(), FROM_START_OF_BUFFER, transfer_request.buffer.size());
-        lock.lock();
-
-        if (!this->is_stream_activated) {
-            LOGGER__TRACE("Stream is not active (previous status {})", status);
-            transfer_request.callback(HAILO_STREAM_ABORTED_BY_USER);
-        } else if (status != HAILO_SUCCESS) {
-            // TODO: timeout? stream aborted? (HRT-10513)
-            transfer_request.callback(status);
-        } else {
-            transfer_request.callback(HAILO_SUCCESS);
-        }
-
-        lock.unlock();
-
-        // We notify after calling the callback, so that deactivate_stream() will block until the queue is empty + all callbacks have been called
-        m_queue_cond.notify_one();
-    }
-}
-
-} /* namespace hailort */
diff --git a/hailort/libhailort/src/vdma/vdma_async_stream.hpp b/hailort/libhailort/src/vdma/vdma_async_stream.hpp
deleted file mode 100644 (file)
index eb48c17..0000000
+++ /dev/null
@@ -1,111 +0,0 @@
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file vdma_stream.hpp
- * @brief Async stream object over vDMA channel
- **/
-
-#ifndef _HAILO_VDMA_ASYNC_STREAM_HPP_
-#define _HAILO_VDMA_ASYNC_STREAM_HPP_
-
-#include "hailo/hailort.h"
-#include "hailo/expected.hpp"
-#include "hailo/stream.hpp"
-
-#include "vdma/vdma_stream_base.hpp"
-#include "vdma/vdma_device.hpp"
-#include "vdma/channel/async_channel.hpp"
-#include "vdevice/scheduler/scheduled_core_op_state.hpp"
-
-#include <thread>
-#include <queue>
-#include <mutex>
-#include <condition_variable>
-
-
-namespace hailort
-{
-
-class VdmaAsyncInputStream : public VdmaInputStreamBase
-{
-public:
-    VdmaAsyncInputStream(VdmaDevice &device, vdma::BoundaryChannelPtr channel, const LayerInfo &edge_layer,
-                         EventPtr core_op_activated_event, uint16_t batch_size,
-                         std::chrono::milliseconds transfer_timeout, hailo_stream_interface_t stream_interface,
-                         hailo_status &status);
-    virtual ~VdmaAsyncInputStream() = default;
-
-    virtual hailo_status wait_for_async_ready(size_t transfer_size, std::chrono::milliseconds timeout) override;
-    virtual Expected<size_t> get_async_max_queue_size() const override;
-
-    virtual hailo_status write_buffer_only(const MemoryView &buffer, const std::function<bool()> &should_cancel) override;
-    virtual hailo_status send_pending_buffer(const device_id_t &device_id) override;
-
-    virtual hailo_status write_async(TransferRequest &&transfer_request) override;
-
-protected:
-    virtual hailo_status write_impl(const MemoryView &buffer) override;
-};
-
-class VdmaAsyncOutputStream : public VdmaOutputStreamBase
-{
-public:
-    VdmaAsyncOutputStream(VdmaDevice &device, vdma::BoundaryChannelPtr channel, const LayerInfo &edge_layer,
-                          EventPtr core_op_activated_event, uint16_t batch_size,
-                          std::chrono::milliseconds transfer_timeout, hailo_stream_interface_t interface,
-                          hailo_status &status);
-    virtual ~VdmaAsyncOutputStream()  = default;
-
-    virtual hailo_status wait_for_async_ready(size_t transfer_size, std::chrono::milliseconds timeout) override;
-    virtual Expected<size_t> get_async_max_queue_size() const override;
-
-protected:
-    virtual hailo_status read_impl(MemoryView &buffer) override;
-    virtual hailo_status read_async(TransferRequest &&transfer_request) override;
-};
-
-// NMS requires multiple reads from the device + parsing the output. Hence, a background thread is needed.
-// This class opens a worker thread that processes nms transfers, signalling the user's callback upon completion.
-// read_async adds transfer requests to a producer-consumer queue
-class VdmaAsyncOutputNmsStream : public VdmaOutputStreamBase
-{
-public:
-    VdmaAsyncOutputNmsStream(VdmaDevice &device, vdma::BoundaryChannelPtr channel, const LayerInfo &edge_layer,
-                             EventPtr core_op_activated_event, uint16_t batch_size,
-                             std::chrono::milliseconds transfer_timeout, hailo_stream_interface_t interface,
-                             hailo_status &status);
-    virtual ~VdmaAsyncOutputNmsStream();
-
-    virtual hailo_status wait_for_async_ready(size_t transfer_size, std::chrono::milliseconds timeout) override;
-    virtual Expected<size_t> get_async_max_queue_size() const override;
-    virtual hailo_status read(MemoryView buffer) override;
-    virtual hailo_status abort() override;
-    virtual hailo_status clear_abort() override;
-
-private:
-    virtual hailo_status read_impl(MemoryView &buffer) override;
-    virtual hailo_status read_async(TransferRequest &&transfer_request) override;
-    virtual hailo_status deactivate_stream() override;
-    virtual hailo_status activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers) override;
-    virtual Expected<size_t> get_buffer_frames_size() const override;
-
-    void signal_thread_quit();
-    void process_transfer_requests();
-
-    // TODO: use SpscQueue (HRT-10554)
-    const size_t m_queue_max_size;
-    std::mutex m_queue_mutex;
-    std::mutex m_abort_mutex;
-    std::condition_variable m_queue_cond;
-    std::queue<TransferRequest> m_queue;
-    std::atomic_bool m_stream_aborted;
-    // m_should_quit is used to quit the thread (called on destruction)
-    bool m_should_quit;
-    std::thread m_worker_thread;
-};
-
-} /* namespace hailort */
-
-#endif /* _HAILO_VDMA_ASYNC_STREAM_HPP_ */
index 7d7810cca85baf19152acbdbeadc06dabbb86687..90eea950d523a839983b737a9ea9f4fd7ae7b04f 100644 (file)
@@ -24,11 +24,10 @@ Expected<VdmaConfigActivatedCoreOp> VdmaConfigActivatedCoreOp::create(
     // it will require a check that these params will be relevant for this one core op only.
     const hailo_activate_network_group_params_t &network_group_params,
     uint16_t dynamic_batch_size,
-    std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
-    std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,         
+    std::map<std::string, std::shared_ptr<InputStreamBase>> &input_streams,
+    std::map<std::string, std::shared_ptr<OutputStreamBase>> &output_streams,
     EventPtr core_op_activated_event,
     AccumulatorPtr deactivation_time_accumulator,
-    bool resume_pending_stream_transfers,
     CoreOp &core_op)
 {
     CHECK(!active_core_op_holder.is_any_active(), make_unexpected(HAILO_INVALID_OPERATION),
@@ -39,7 +38,10 @@ Expected<VdmaConfigActivatedCoreOp> VdmaConfigActivatedCoreOp::create(
     auto status = HAILO_UNINITIALIZED;
     VdmaConfigActivatedCoreOp object(core_op_name, network_group_params, dynamic_batch_size, input_streams, output_streams,
         std::move(resources_manager), active_core_op_holder, std::move(core_op_activated_event),
-        deactivation_time_accumulator, resume_pending_stream_transfers, core_op, status);
+        deactivation_time_accumulator, core_op, status);
+    if (HAILO_STREAM_ABORTED_BY_USER == status) {
+        return make_unexpected(status);
+    }
     CHECK_SUCCESS_AS_EXPECTED(status);
 
     return object;
@@ -49,13 +51,12 @@ VdmaConfigActivatedCoreOp::VdmaConfigActivatedCoreOp(
         const std::string &core_op_name,
         const hailo_activate_network_group_params_t &network_group_params,
         uint16_t dynamic_batch_size,
-        std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
-        std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
+        std::map<std::string, std::shared_ptr<InputStreamBase>> &input_streams,
+        std::map<std::string, std::shared_ptr<OutputStreamBase>> &output_streams,
         std::shared_ptr<ResourcesManager> &&resources_manager,
         ActiveCoreOpHolder &active_core_op_holder,
         EventPtr &&core_op_activated_event,
         AccumulatorPtr deactivation_time_accumulator,
-        bool resume_pending_stream_transfers,
         CoreOp &core_op,
         hailo_status &status) :
     ActivatedCoreOp(network_group_params, input_streams, output_streams,
@@ -64,8 +65,7 @@ VdmaConfigActivatedCoreOp::VdmaConfigActivatedCoreOp(
     m_should_reset_core_op(true),
     m_active_core_op_holder(active_core_op_holder),
     m_resources_manager(std::move(resources_manager)),
-    m_deactivation_time_accumulator(deactivation_time_accumulator),
-    m_keep_nn_config_during_reset(false)
+    m_deactivation_time_accumulator(deactivation_time_accumulator)
 {
     // Validate ActivatedCoreOp status
     if (HAILO_SUCCESS != status) {
@@ -73,7 +73,11 @@ VdmaConfigActivatedCoreOp::VdmaConfigActivatedCoreOp(
     }
     
     // We know core_op is a VdmaConfigCoreOp
-    status = core_op.activate_impl(dynamic_batch_size, resume_pending_stream_transfers);
+    status = core_op.activate_impl(dynamic_batch_size);
+    if (HAILO_STREAM_ABORTED_BY_USER == status) {
+        LOGGER__INFO("Core-op activation failed because it was aborted by user");
+        return;
+    }
     if (HAILO_SUCCESS != status) {
         LOGGER__ERROR("Error activating core-op");
         return;
@@ -86,8 +90,7 @@ VdmaConfigActivatedCoreOp::VdmaConfigActivatedCoreOp(VdmaConfigActivatedCoreOp &
     m_should_reset_core_op(std::exchange(other.m_should_reset_core_op, false)),
     m_active_core_op_holder(other.m_active_core_op_holder),
     m_resources_manager(std::move(other.m_resources_manager)),
-    m_deactivation_time_accumulator(std::move(other.m_deactivation_time_accumulator)),
-    m_keep_nn_config_during_reset(std::move(other.m_keep_nn_config_during_reset))
+    m_deactivation_time_accumulator(std::move(other.m_deactivation_time_accumulator))
 {}
 
 VdmaConfigActivatedCoreOp::~VdmaConfigActivatedCoreOp()
@@ -107,7 +110,7 @@ VdmaConfigActivatedCoreOp::~VdmaConfigActivatedCoreOp()
 
     auto vdma_config_core_op = core_op_ref.value();
 
-    status = vdma_config_core_op.get().deactivate_impl(m_keep_nn_config_during_reset);
+    status = vdma_config_core_op.get().deactivate_impl();
     if (HAILO_SUCCESS != status) {
         LOGGER__ERROR("Failed deactivating core-op (status {})", status);
     }
@@ -131,10 +134,4 @@ Expected<Buffer> VdmaConfigActivatedCoreOp::get_intermediate_buffer(const Interm
     return m_resources_manager->read_intermediate_buffer(key);
 }
 
-hailo_status VdmaConfigActivatedCoreOp::set_keep_nn_config_during_reset(const bool keep_nn_config_during_reset)
-{
-    m_keep_nn_config_during_reset = keep_nn_config_during_reset;
-    return HAILO_SUCCESS;
-}
-
 } /* namespace hailort */
diff --git a/hailort/libhailort/src/vdma/vdma_config_activated_core_op.hpp b/hailort/libhailort/src/vdma/vdma_config_activated_core_op.hpp
deleted file mode 100644 (file)
index 336e534..0000000
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file vdma_config_activated_core_op.hpp
- * @brief Represent activated core-op from HEF
- **/
-
-#ifndef _HAILO_CONTEXT_SWITCH_VDMA_CONFIG_ACTIVATED_CORE_OP_HPP_
-#define _HAILO_CONTEXT_SWITCH_VDMA_CONFIG_ACTIVATED_CORE_OP_HPP_
-
-#include "hailo/expected.hpp"
-
-#include "vdma/channel/boundary_channel.hpp"
-#include "core_op/active_core_op_holder.hpp"
-#include "core_op/resource_manager/resource_manager.hpp"
-
-#include <vector>
-#include <map>
-#include <functional>
-
-
-namespace hailort
-{
-
-class VdmaConfigActivatedCoreOp : public ActivatedCoreOp
-{
-public:
-
-    static Expected<VdmaConfigActivatedCoreOp> create(
-        ActiveCoreOpHolder &active_core_op_holder,
-        const std::string &core_op_name,
-        std::shared_ptr<ResourcesManager> resources_manager,
-        const hailo_activate_network_group_params_t &network_group_params,
-        uint16_t dynamic_batch_size,
-        std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
-        std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
-        EventPtr core_op_activated_event,
-        AccumulatorPtr deactivation_time_accumulator,
-        bool resume_pending_stream_transfers,
-        CoreOp &core_op);
-
-    virtual ~VdmaConfigActivatedCoreOp();
-
-    VdmaConfigActivatedCoreOp(const VdmaConfigActivatedCoreOp &other) = delete;
-    VdmaConfigActivatedCoreOp &operator=(const VdmaConfigActivatedCoreOp &other) = delete;
-    VdmaConfigActivatedCoreOp &operator=(VdmaConfigActivatedCoreOp &&other) = delete;
-    VdmaConfigActivatedCoreOp(VdmaConfigActivatedCoreOp &&other) noexcept;
-
-    virtual const std::string &get_network_group_name() const override;
-    virtual Expected<Buffer> get_intermediate_buffer(const IntermediateBufferKey &key) override;
-    virtual hailo_status set_keep_nn_config_during_reset(const bool keep_nn_config_during_reset) override;
-
-private:
-    VdmaConfigActivatedCoreOp(
-      const std::string &core_op_name,
-      const hailo_activate_network_group_params_t &network_group_params,
-      uint16_t dynamic_batch_size,
-      std::map<std::string, std::shared_ptr<InputStream>> &input_streams,
-      std::map<std::string, std::shared_ptr<OutputStream>> &output_streams,
-      std::shared_ptr<ResourcesManager> &&resources_manager,
-      ActiveCoreOpHolder &active_core_op_holder,
-      EventPtr &&core_op_activated_event,
-      AccumulatorPtr deactivation_time_accumulator,
-      bool resume_pending_stream_transfers,
-      CoreOp &core_op,
-      hailo_status &status);
-
-  std::string m_core_op_name;
-  bool m_should_reset_core_op;
-  ActiveCoreOpHolder &m_active_core_op_holder;
-  std::shared_ptr<ResourcesManager> m_resources_manager;
-  AccumulatorPtr m_deactivation_time_accumulator;
-  bool m_keep_nn_config_during_reset;
-};
-
-} /* namespace hailort */
-
-#endif /* _HAILO_CONTEXT_SWITCH_VDMA_CONFIG_ACTIVATED_CORE_OP_HPP_ */
index c0f80209bd5f975b7f106733c44d6dd72b725bff..e40074428ee2626ca444ffc5643673a4f85dbc08 100644 (file)
@@ -8,7 +8,7 @@ namespace hailort
 {
 
 Expected<VdmaConfigCoreOp> VdmaConfigCoreOp::create(ActiveCoreOpHolder &active_core_op_holder,
-        const ConfigureNetworkParams &config_params, 
+        const ConfigureNetworkParams &config_params,
         std::shared_ptr<ResourcesManager> resources_manager,
         std::shared_ptr<CoreOpMetadata> metadata)
 {
@@ -25,23 +25,19 @@ VdmaConfigCoreOp::VdmaConfigCoreOp(ActiveCoreOpHolder &active_core_op_holder,
     const ConfigureNetworkParams &config_params,
     std::shared_ptr<ResourcesManager> &&resources_manager,
     std::shared_ptr<CoreOpMetadata> metadata, hailo_status &status) :
-        CoreOp(config_params, metadata, status),
-        m_active_core_op_holder(active_core_op_holder),
+        CoreOp(config_params, metadata, active_core_op_holder, status),
         m_resources_manager(std::move(resources_manager))
 {}
 
-hailo_status VdmaConfigCoreOp::activate_impl(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers)
+hailo_status VdmaConfigCoreOp::activate_impl(uint16_t dynamic_batch_size)
 {
     auto status = HAILO_UNINITIALIZED;
 
-    // Check that no network is currently activated
-    CHECK(!m_active_core_op_holder.is_any_active(), HAILO_INTERNAL_FAILURE,
-        "Cant activate network because a network is already activated");
-
-    m_active_core_op_holder.set(*this);
-
-    status = m_resources_manager->set_dynamic_batch_size(dynamic_batch_size);
-    CHECK_SUCCESS(status, "Failed to set inter-context channels dynamic batch size.");
+    if (CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE != dynamic_batch_size) {
+        CHECK(dynamic_batch_size <= get_smallest_configured_batch_size(get_config_params()),
+            HAILO_INVALID_ARGUMENT, "Batch size given is {} although max is {}", dynamic_batch_size,
+            get_smallest_configured_batch_size(get_config_params()));
+    }
 
     status = m_resources_manager->enable_state_machine(dynamic_batch_size);
     CHECK_SUCCESS(status, "Failed to activate state-machine");
@@ -51,46 +47,36 @@ hailo_status VdmaConfigCoreOp::activate_impl(uint16_t dynamic_batch_size, bool r
 
     // Low-level streams assume that the vdma channels are enabled (happens in `enable_state_machine`), and that
     // the interrupt dispatcher is running (so they can wait for interrupts).
-    status = activate_low_level_streams(dynamic_batch_size, resume_pending_stream_transfers);
+    status = activate_low_level_streams();
+    if (HAILO_STREAM_ABORTED_BY_USER == status) {
+        LOGGER__INFO("Low level streams activation failed because some were aborted by user");
+        return status;
+    }
     CHECK_SUCCESS(status, "Failed to activate low level streams");
 
-    status = m_core_op_activated_event->signal();
-    CHECK_SUCCESS(status, "Failed to signal network activation event");
+    TRACE(SwitchCoreOpTrace, std::string(m_resources_manager->get_dev_id()), vdevice_core_op_handle());
 
     return HAILO_SUCCESS;
 }
 
-hailo_status VdmaConfigCoreOp::deactivate_impl(bool keep_nn_config_during_reset)
+hailo_status VdmaConfigCoreOp::deactivate_impl()
 {
     auto status = deactivate_host_resources();
     CHECK_SUCCESS(status);
 
-    status = m_resources_manager->reset_state_machine(keep_nn_config_during_reset);
+    status = m_resources_manager->reset_state_machine();
     CHECK_SUCCESS(status, "Failed to reset context switch state machine");
 
     // After the state machine has been reset the vdma channels are no longer active, so we
-    // can cancel pending async transfers, thus allowing vdma buffers linked to said transfers to be freed
-    status = m_resources_manager->cancel_pending_async_transfers();
-    CHECK_SUCCESS(status, "Failed to cancel pending async transfers");
+    // can cancel pending transfers, thus allowing vdma buffers linked to said transfers to be freed
+    status = m_resources_manager->cancel_pending_transfers();
+    CHECK_SUCCESS(status, "Failed to cancel pending transfers");
 
     return HAILO_SUCCESS;
 }
 
 hailo_status VdmaConfigCoreOp::deactivate_host_resources()
 {
-    // Check that network is currently activated
-    CHECK(m_active_core_op_holder.is_any_active(), HAILO_INTERNAL_FAILURE,
-        "Cant Deactivate network because no network is already activated");
-
-    // Make sure the core op we are deactivating is this object
-    auto active_core_op_ref = m_active_core_op_holder.get().value();
-    CHECK(this == std::addressof(active_core_op_ref.get()), HAILO_INTERNAL_FAILURE,
-        "Trying to deactivate different network goup");
-
-    m_active_core_op_holder.clear();
-
-    m_core_op_activated_event->reset();
-
     auto status = deactivate_low_level_streams();
     CHECK_SUCCESS(status, "Failed to deactivate low level streams");
 
@@ -101,30 +87,6 @@ hailo_status VdmaConfigCoreOp::deactivate_host_resources()
     return HAILO_SUCCESS;
 }
 
-Expected<std::unique_ptr<ActivatedNetworkGroup>> VdmaConfigCoreOp::create_activated_network_group(
-    const hailo_activate_network_group_params_t &network_group_params, uint16_t dynamic_batch_size,
-    bool resume_pending_stream_transfers)
-{
-    auto start_time = std::chrono::steady_clock::now();
-    auto activated_net_group = VdmaConfigActivatedCoreOp::create(
-        m_active_core_op_holder, name(), m_resources_manager, network_group_params, dynamic_batch_size,
-        m_input_streams, m_output_streams, m_core_op_activated_event, m_deactivation_time_accumulator,
-        resume_pending_stream_transfers, *this);
-    const auto elapsed_time_ms = std::chrono::duration<double, std::milli>(
-        std::chrono::steady_clock::now() - start_time).count();
-    CHECK_EXPECTED(activated_net_group);
-
-    LOGGER__INFO("Activating {} took {} milliseconds. Note that the function is asynchronous and"
-                 " thus the network is not fully activated yet.", name(), elapsed_time_ms);
-    m_activation_time_accumulator->add_data_point(elapsed_time_ms);
-
-    std::unique_ptr<ActivatedNetworkGroup> activated_net_group_ptr =
-        make_unique_nothrow<VdmaConfigActivatedCoreOp>(activated_net_group.release());
-    CHECK_AS_EXPECTED(nullptr != activated_net_group_ptr, HAILO_OUT_OF_HOST_MEMORY);
-
-    return activated_net_group_ptr;
-}
-
 Expected<hailo_stream_interface_t> VdmaConfigCoreOp::get_default_streams_interface()
 {
     return m_resources_manager->get_default_streams_interface();
@@ -157,7 +119,10 @@ hailo_status VdmaConfigCoreOp::set_scheduler_priority(uint8_t /*priority*/, cons
 Expected<std::shared_ptr<LatencyMetersMap>> VdmaConfigCoreOp::get_latency_meters()
 {
     auto latency_meters = m_resources_manager->get_latency_meters();
-    return make_shared_nothrow<LatencyMetersMap>(latency_meters);
+    auto res = make_shared_nothrow<LatencyMetersMap>(latency_meters);
+    CHECK_NOT_NULL_AS_EXPECTED(res, HAILO_OUT_OF_HOST_MEMORY);
+
+    return res;
 }
 
 Expected<vdma::BoundaryChannelPtr> VdmaConfigCoreOp::get_boundary_vdma_channel_by_stream_name(const std::string &stream_name)
@@ -170,4 +135,9 @@ Expected<HwInferResults> VdmaConfigCoreOp::run_hw_infer_estimator()
     return m_resources_manager->run_hw_only_infer();
 }
 
+Expected<Buffer> VdmaConfigCoreOp::get_intermediate_buffer(const IntermediateBufferKey &key)
+{
+    return m_resources_manager->read_intermediate_buffer(key);
+}
+
 } /* namespace hailort */
index 821ff92c8dccf7a9d96387e3974dce785b792463..bb6aa1f360a3f263434c68bdb021bed29038cc88 100644 (file)
@@ -4,9 +4,7 @@
  **/
 /**
  * @file vdma_config_core_op.hpp
- * @brief Represent core-op from HEF file that can be activated 
- *
- * This core-op can be used for both single or multi context core-ops but for PCIE only
+ * @brief Represent core-op configured over vDMA for single physical device
   **/
 
 #ifndef _HAILO_CONTEXT_SWITCH_VDMA_CONFIG_CORE_OP_HPP_
@@ -20,7 +18,6 @@
 
 #include "vdma/channel/boundary_channel.hpp"
 #include "core_op/resource_manager/resource_manager.hpp"
-#include "vdma/vdma_config_activated_core_op.hpp"
 #include "core_op/active_core_op_holder.hpp"
 
 #include "control_protocol.h"
@@ -38,7 +35,7 @@ class VdmaConfigCoreOp : public CoreOp
 {
 public:
     static Expected<VdmaConfigCoreOp> create(ActiveCoreOpHolder &active_core_op_holder,
-        const ConfigureNetworkParams &config_params, 
+        const ConfigureNetworkParams &config_params,
         std::shared_ptr<ResourcesManager> resources_managers,
         std::shared_ptr<CoreOpMetadata> metadata);
 
@@ -48,21 +45,12 @@ public:
     }
 
     // Functions to activate and deactivate core ops for scheduler - dont create ActivatedNetworkGroup objects
-    // Note: Care should be taken when calling activate_impl with resume_pending_stream_transfers = true.
-    //       If an output stream has outstanding transfers, and the NG is deactivated (via deactivate_impl) before they
-    //       have been completed, then these pending transfers may be overwritten upon channel activation.
-    //       Hence, when setting resume_pending_stream_transfers = true, the caller must validate that all pending
-    //       reads have been received (i.e. an int has been raised for this transfer)
-    virtual hailo_status activate_impl(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers) override;
+    virtual hailo_status activate_impl(uint16_t dynamic_batch_size) override;
     // Will first deactivate host resources (via deactivate_host_resources) and then reset the core-op on the fw
-    virtual hailo_status deactivate_impl(bool keep_nn_config_during_reset) override;
+    virtual hailo_status deactivate_impl() override;
     // Deactivate all resources related to the core-op on the host, but without resetting the core-op on the fw
     hailo_status deactivate_host_resources();
 
-    virtual Expected<std::unique_ptr<ActivatedNetworkGroup>> create_activated_network_group(
-        const hailo_activate_network_group_params_t &network_group_params, uint16_t dynamic_batch_size,
-        bool resume_pending_stream_transfers) override;
-
     virtual Expected<hailo_stream_interface_t> get_default_streams_interface() override;
 
     virtual Expected<std::shared_ptr<LatencyMetersMap>> get_latency_meters() override;
@@ -74,23 +62,22 @@ public:
     virtual hailo_status set_scheduler_threshold(uint32_t threshold, const std::string &network_name) override;
     virtual hailo_status set_scheduler_priority(uint8_t priority, const std::string &network_name) override;
     virtual Expected<HwInferResults> run_hw_infer_estimator() override;
+    virtual Expected<Buffer> get_intermediate_buffer(const IntermediateBufferKey &) override;
 
     virtual ~VdmaConfigCoreOp() = default;
     VdmaConfigCoreOp(const VdmaConfigCoreOp &other) = delete;
     VdmaConfigCoreOp &operator=(const VdmaConfigCoreOp &other) = delete;
     VdmaConfigCoreOp &operator=(VdmaConfigCoreOp &&other) = delete;
     VdmaConfigCoreOp(VdmaConfigCoreOp &&other) noexcept : CoreOp(std::move(other)),
-        m_active_core_op_holder(other.m_active_core_op_holder),
         m_resources_manager(std::move(other.m_resources_manager))
         {}
 
 private:
-    VdmaConfigCoreOp(ActiveCoreOpHolder &active_core_op_holder,
+VdmaConfigCoreOp(ActiveCoreOpHolder &active_core_op_holder,
         const ConfigureNetworkParams &config_params,
         std::shared_ptr<ResourcesManager> &&resources_manager,
         std::shared_ptr<CoreOpMetadata> metadata, hailo_status &status);
 
-    ActiveCoreOpHolder &m_active_core_op_holder;
     std::shared_ptr<ResourcesManager> m_resources_manager;
 };
 
index 1d96c51a52aff41ae40d2202e4225ed9a541fa53..0f196963070891cfc001d61a5b916096cf71351f 100644 (file)
@@ -14,51 +14,52 @@ namespace hailort
 {
 
 hailo_status VdmaConfigManager::switch_core_op(std::shared_ptr<VdmaConfigCoreOp> current_active_core_op,
-    std::shared_ptr<VdmaConfigCoreOp> next_core_op, const uint16_t batch_size, const bool resume_pending_stream_transfers)
+    std::shared_ptr<VdmaConfigCoreOp> next_core_op, const uint16_t batch_size, const bool is_batch_switch)
 {
-    static const auto RESET_NN_CONFIG = false;
     CHECK((nullptr != current_active_core_op) || (nullptr != next_core_op), HAILO_INVALID_ARGUMENT);
 
     if (nullptr == current_active_core_op) {
         // Activate first core-op
-        return next_core_op->activate_impl(batch_size, resume_pending_stream_transfers);
+        return next_core_op->activate_impl(batch_size);
     } else if (nullptr == next_core_op) {
         // Deactivate last core-op
-        return current_active_core_op->deactivate_impl(RESET_NN_CONFIG);
-    }
-
-    // We're switching from current_active_core_op to next_core_op.
-    // Deactivate the current core-op on the host, meaning the fw state machine won't be reset.
-    // This will be handled by activating the next core-op.
-    auto status = current_active_core_op->deactivate_host_resources();
-    CHECK_SUCCESS(status, "Failed deactivating current core-op");
+        return current_active_core_op->deactivate_impl();
+    } else if (is_batch_switch) {
+        auto status = current_active_core_op->get_resources_manager()->enable_state_machine(batch_size);
+        CHECK_SUCCESS(status, "Failed to activate state-machine");
+    } else {
+        // We're switching from current_active_core_op to next_core_op.
+        // Deactivate the current core-op on the host, meaning the fw state machine won't be reset.
+        // This will be handled by activating the next core-op.
+        auto status = current_active_core_op->deactivate_host_resources();
+        CHECK_SUCCESS(status, "Failed deactivating current core-op");
 
-    // TODO HRT-10799 Fix when enabling batch switch flow for hailo15
-    // TODO: In mercury we need to reset after deactivate. This will be fixed in MSW-762 and the "if" will be removed
-    //       when we make the nn_manager responsible to reset the nn-core.
-    if (Device::Type::INTEGRATED == current_active_core_op->get_resources_manager()->get_device().get_type()) {
-        status = current_active_core_op->get_resources_manager()->reset_state_machine(RESET_NN_CONFIG);
-        CHECK_SUCCESS(status, "Failed to reset state machine in switch core-op");
-    }
+        // TODO: In mercury we need to reset after deactivate. This will be fixed in MSW-762 and the "if" will be removed
+        //       when we make the nn_manager responsible to reset the nn-core.
+        if (Device::Type::INTEGRATED == current_active_core_op->get_resources_manager()->get_device().get_type()) {
+            status = current_active_core_op->get_resources_manager()->reset_state_machine();
+            CHECK_SUCCESS(status, "Failed to reset state machine in switch core-op");
+        }
 
-    // Switch from the current core-op to the next core-op. I.e. current core-op will be deactivated and
-    // next core-op will be activated
-    status = next_core_op->activate_impl(batch_size, resume_pending_stream_transfers);
-    CHECK_SUCCESS(status, "Failed activating next core-op");
+        // Switch from the current core-op to the next core-op. I.e. current core-op will be deactivated and
+        // next core-op will be activated
+        status = next_core_op->activate_impl(batch_size);
+        CHECK_SUCCESS(status, "Failed activating next core-op");
 
-    // Current core-op is now deactivated, so we can cancel pending async transfers
-    status = current_active_core_op->get_resources_manager()->cancel_pending_async_transfers();
-    CHECK_SUCCESS(status, "Failed canceling pending async transfers from previous core-op");
+        // Current core-op is now deactivated (we are not on batch switch), so we can cancel pending transfers.
+        status = current_active_core_op->get_resources_manager()->cancel_pending_transfers();
+        CHECK_SUCCESS(status, "Failed canceling pending transfers from previous core-op");
+    }
 
     return HAILO_SUCCESS;
 }
 
 hailo_status VdmaConfigManager::deactivate_core_op(std::shared_ptr<VdmaConfigCoreOp> current_active_core_op)
 {
-    static const auto RESUME_PENDING_STREAM_TRANSFERS = true;
     static const uint16_t DEACTIVATE_BATCH_SIZE = 0;
     const std::shared_ptr<VdmaConfigCoreOp> DEACTIVATE_NEXT_CORE_OP = nullptr;
-    return switch_core_op(current_active_core_op, DEACTIVATE_NEXT_CORE_OP, DEACTIVATE_BATCH_SIZE, RESUME_PENDING_STREAM_TRANSFERS);
+    static const bool IS_NOT_BATCH_SWITCH = false;
+    return switch_core_op(current_active_core_op, DEACTIVATE_NEXT_CORE_OP, DEACTIVATE_BATCH_SIZE, IS_NOT_BATCH_SWITCH);
 }
 
 } /* namespace hailort */
index c20b1e01aabd44b78a9e1eefc05b276ee83fb4f2..fc13c368744fb73e057281fc8ea4308fbd6795b4 100644 (file)
@@ -27,7 +27,7 @@ public:
     VdmaConfigManager() = delete;
 
     static hailo_status switch_core_op(std::shared_ptr<VdmaConfigCoreOp> current_active_core_op,
-        std::shared_ptr<VdmaConfigCoreOp> next_core_op, const uint16_t batch_size, const bool resume_pending_stream_transfers);
+        std::shared_ptr<VdmaConfigCoreOp> next_core_op, const uint16_t batch_size, const bool is_batch_switch);
 
     static hailo_status deactivate_core_op(std::shared_ptr<VdmaConfigCoreOp> current_active_core_op);
 };
index a3791a5e16d38b11474c03e714d9b967a7f6118b..3eb774f85d93f97b5f60c458777dd614741c66a8 100644 (file)
@@ -31,7 +31,7 @@ static constexpr std::chrono::milliseconds DEFAULT_TIMEOUT(1000);
 static constexpr std::chrono::milliseconds DEFAULT_TIMEOUT(50000);
 #endif /* ifndef HAILO_EMULATOR */
 
-VdmaDevice::VdmaDevice(HailoRTDriver &&driver, Device::Type type) :
+VdmaDevice::VdmaDevice(std::unique_ptr<HailoRTDriver> &&driver, Device::Type type) :
     DeviceBase::DeviceBase(type),
     m_driver(std::move(driver)), m_is_configured(false)
 {
@@ -64,7 +64,7 @@ hailo_status VdmaDevice::wait_for_wakeup()
 
 Expected<D2H_EVENT_MESSAGE_t> VdmaDevice::read_notification()
 {
-    auto notification_buffer = m_driver.read_notification();
+    auto notification_buffer = m_driver->read_notification();
     if (!notification_buffer.has_value()) {
         return make_unexpected(notification_buffer.status());
     }
@@ -78,7 +78,7 @@ Expected<D2H_EVENT_MESSAGE_t> VdmaDevice::read_notification()
 
 hailo_status VdmaDevice::disable_notifications()
 {
-    return m_driver.disable_notifications();
+    return m_driver->disable_notifications();
 }
 
 hailo_status VdmaDevice::fw_interact_impl(uint8_t *request_buffer, size_t request_size,
@@ -94,7 +94,7 @@ hailo_status VdmaDevice::fw_interact_impl(uint8_t *request_buffer, size_t reques
     uint8_t response_md5[PCIE_EXPECTED_MD5_LENGTH];
     uint8_t expected_response_md5[PCIE_EXPECTED_MD5_LENGTH];
 
-    auto status = m_driver.fw_control(request_buffer, request_size, request_md5,
+    auto status = m_driver->fw_control(request_buffer, request_size, request_md5,
         response_buffer, response_size, response_md5,
         DEFAULT_TIMEOUT, cpu_id);
     CHECK_SUCCESS(status, "Failed to send fw control");
@@ -111,15 +111,14 @@ hailo_status VdmaDevice::fw_interact_impl(uint8_t *request_buffer, size_t reques
 
 hailo_status VdmaDevice::clear_configured_apps()
 {
-    static const auto DONT_KEEP_NN_CONFIG_DURING_RESET = false;
-    auto status = Control::reset_context_switch_state_machine(*this, DONT_KEEP_NN_CONFIG_DURING_RESET);
+    auto status = Control::reset_context_switch_state_machine(*this);
     CHECK_SUCCESS(status);
 
     // In case of mercury need to reset nn core before activating network group to clear prior nn core state
     if (Device::Type::INTEGRATED == get_type()) {
         // On core device, the nn_manager is not responsible to reset the nn-core so
         // we use the SCU control for that.
-        status = m_driver.reset_nn_core();
+        status = m_driver->reset_nn_core();
         CHECK_SUCCESS(status);
     }
 
@@ -142,7 +141,7 @@ Expected<ConfiguredNetworkGroupVector> VdmaDevice::add_hef(Hef &hef, const Netwo
         CHECK_SUCCESS_AS_EXPECTED(status);
 
         assert(nullptr == m_vdma_interrupts_dispatcher);
-        auto interrupts_dispatcher = vdma::InterruptsDispatcher::create(std::ref(m_driver));
+        auto interrupts_dispatcher = vdma::InterruptsDispatcher::create(std::ref(*m_driver));
         CHECK_EXPECTED(interrupts_dispatcher);
         m_vdma_interrupts_dispatcher = interrupts_dispatcher.release();
 
@@ -205,7 +204,7 @@ Expected<size_t> VdmaDevice::read_log(MemoryView &buffer, hailo_cpu_id_t cpu_id)
 {
     size_t read_bytes = 0;
     hailo_status status = HAILO_UNINITIALIZED;
-    status = m_driver.read_log(buffer.data(), buffer.size(), &read_bytes, cpu_id);
+    status = m_driver->read_log(buffer.data(), buffer.size(), &read_bytes, cpu_id);
     CHECK_SUCCESS_AS_EXPECTED(status);
     return read_bytes;
 }
@@ -225,7 +224,7 @@ hailo_reset_device_mode_t VdmaDevice::get_default_reset_mode()
 
 hailo_status VdmaDevice::mark_as_used()
 {
-    return m_driver.mark_as_used();
+    return m_driver->mark_as_used();
 }
 
 ExpectedRef<vdma::InterruptsDispatcher> VdmaDevice::get_vdma_interrupts_dispatcher()
index 6d5eea7f88c7c71d1bb2f29681d0fd3a0d4b9b6a..7a4baba7ceb22112de465d0df83a51bfdaeaa6f0 100644 (file)
@@ -37,19 +37,19 @@ public:
 
     HailoRTDriver &get_driver()
     {
-        return std::ref(m_driver);
+        return std::ref(*m_driver);
     };
 
     virtual const char* get_dev_id() const override final
     {
         // m_driver.device_id() is reference. Hence, returning c_str is safe.
-        return m_driver.device_id().c_str();
+        return m_driver->device_id().c_str();
     };
 
     ExpectedRef<vdma::InterruptsDispatcher> get_vdma_interrupts_dispatcher();
 
 protected:
-    VdmaDevice(HailoRTDriver &&driver, Type type);
+    VdmaDevice(std::unique_ptr<HailoRTDriver> &&driver, Type type);
 
     virtual Expected<D2H_EVENT_MESSAGE_t> read_notification() override;
     virtual hailo_status disable_notifications() override;
@@ -57,7 +57,7 @@ protected:
         uint8_t *response_buffer, size_t *response_size, hailo_cpu_id_t cpu_id) override;
     virtual Expected<ConfiguredNetworkGroupVector> add_hef(Hef &hef, const NetworkGroupsParamsMap &configure_params) override;
 
-    HailoRTDriver m_driver;
+    std::unique_ptr<HailoRTDriver> m_driver;
     std::vector<std::shared_ptr<CoreOp>> m_core_ops;
     std::vector<std::shared_ptr<ConfiguredNetworkGroup>> m_network_groups; // TODO: HRT-9547 - Remove when ConfiguredNetworkGroup will be kept in global context
 
index b3f387c2e6fda59bf027a1b58b9b4a50769fb8b3..6709ee2c2423ec2b3cb34214deb980d1b6cfeca2 100644 (file)
 #include "hailo/hailort_common.hpp"
 
 #include "vdma/vdma_stream.hpp"
+#include "vdma/circular_stream_buffer_pool.hpp"
+#include "utils/profiler/tracer_macros.hpp"
 
 
 namespace hailort
 {
 
+
+/** Input stream **/
+Expected<std::shared_ptr<VdmaInputStream>> VdmaInputStream::create(hailo_stream_interface_t interface,
+    VdmaDevice &device, vdma::BoundaryChannelPtr channel, const LayerInfo &edge_layer, EventPtr core_op_activated_event)
+{
+    assert((interface == HAILO_STREAM_INTERFACE_PCIE) || (interface == HAILO_STREAM_INTERFACE_INTEGRATED));
+
+    hailo_status status = HAILO_UNINITIALIZED;
+    auto result = make_shared_nothrow<VdmaInputStream>(device, channel, edge_layer,
+        core_op_activated_event, interface, status);
+    CHECK_NOT_NULL_AS_EXPECTED(result, HAILO_OUT_OF_HOST_MEMORY);
+    CHECK_SUCCESS_AS_EXPECTED(status);
+    return result;
+}
+
 VdmaInputStream::VdmaInputStream(VdmaDevice &device, vdma::BoundaryChannelPtr channel,
                                  const LayerInfo &edge_layer, EventPtr core_op_activated_event,
-                                 uint16_t batch_size, std::chrono::milliseconds transfer_timeout,
                                  hailo_stream_interface_t stream_interface, hailo_status &status) :
-    VdmaInputStreamBase(device, channel, edge_layer, core_op_activated_event, batch_size, transfer_timeout, stream_interface, status),
-    m_write_only_mutex(),
-    m_send_pending_mutex()
+    AsyncInputStreamBase(edge_layer, stream_interface, std::move(core_op_activated_event), status),
+    m_device(device),
+    m_channel(std::move(channel)),
+    m_interface(stream_interface)
 {
     // Checking status for base class c'tor
     if (HAILO_SUCCESS != status) {
         return;
     }
 
-    if (channel->type() != vdma::BoundaryChannel::Type::BUFFERED) {
-        LOGGER__ERROR("Can't create a vdma stream with a non buffered channel. Received channel type {}", channel->type());
-        status = HAILO_INVALID_ARGUMENT;
-        return;
+    status = HAILO_SUCCESS;
+}
+
+VdmaInputStream::~VdmaInputStream()
+{
+    // We want to stop the vdma channel before closing the stream in the firmware
+    // because sending data to a closed stream may terminate the dma engine
+    const auto status = m_channel->deactivate();
+    if (HAILO_SUCCESS != status) {
+        LOGGER__ERROR("Failed to deactivate stream with error status {}", status);
     }
+}
 
-    status = HAILO_SUCCESS;
+hailo_stream_interface_t VdmaInputStream::get_interface() const
+{
+    return m_interface;
 }
 
-hailo_status VdmaInputStream::write_impl(const MemoryView &buffer)
+vdevice_core_op_handle_t VdmaInputStream::get_vdevice_core_op_handle()
 {
-    return m_channel->transfer_sync((void*)buffer.data(), buffer.size(), m_channel_timeout);
+    return m_core_op_handle;
 }
 
-hailo_status VdmaInputStream::write_buffer_only(const MemoryView &buffer,
-    const std::function<bool()> &should_cancel)
+void VdmaInputStream::set_vdevice_core_op_handle(vdevice_core_op_handle_t core_op_handle)
 {
-    std::unique_lock<std::mutex> lock(m_write_only_mutex);
-    return m_channel->write_buffer(buffer, m_channel_timeout, should_cancel);
+    m_core_op_handle = core_op_handle;
 }
 
-hailo_status VdmaInputStream::send_pending_buffer(const device_id_t &device_id)
+Expected<std::unique_ptr<StreamBufferPool>> VdmaInputStream::allocate_buffer_pool()
 {
-    (void)device_id;
-    std::unique_lock<std::mutex> lock(m_send_pending_mutex);
-    hailo_status status = m_channel->wait(get_frame_size(), m_channel_timeout);
-    if ((HAILO_STREAM_ABORTED_BY_USER == status) || (HAILO_STREAM_NOT_ACTIVATED == status)) {
-        return status;
-    }
-    CHECK(HAILO_TIMEOUT != status, HAILO_TIMEOUT,
-        "{} (H2D) failed with status={} (timeout={}ms)", name(), HAILO_TIMEOUT, m_channel_timeout.count());
-    CHECK_SUCCESS(status);
+    auto circular_pool = CircularStreamBufferPool::create(m_device.get_driver(), HailoRTDriver::DmaDirection::H2D,
+        m_channel->get_desc_list()->desc_page_size(), m_channel->get_desc_list()->count(), get_frame_size());
+    CHECK_EXPECTED(circular_pool);
 
-    return m_channel->send_pending_buffer();
+    return std::unique_ptr<StreamBufferPool>(circular_pool.release());
+}
+
+size_t VdmaInputStream::get_max_ongoing_transfers() const
+{
+    return m_channel->get_max_ongoing_transfers(get_frame_size());
+}
+
+hailo_status VdmaInputStream::write_async_impl(TransferRequest &&transfer_request)
+{
+    TRACE(InputVdmaDequeueTrace, m_device.get_dev_id(), m_core_op_handle, name());
+    const auto user_owns_buffer = (buffer_mode() == StreamBufferMode::NOT_OWNING);
+    return m_channel->launch_transfer(std::move(transfer_request), user_owns_buffer);
+}
+
+hailo_status VdmaInputStream::activate_stream_impl()
+{
+    return m_channel->activate();
+}
+
+hailo_status VdmaInputStream::deactivate_stream_impl()
+{
+    return m_channel->deactivate();
 }
 
 /** Output stream **/
+Expected<std::shared_ptr<VdmaOutputStream>> VdmaOutputStream::create(hailo_stream_interface_t interface,
+    VdmaDevice &device, vdma::BoundaryChannelPtr channel, const LayerInfo &edge_layer,
+    EventPtr core_op_activated_event)
+{
+    assert((interface == HAILO_STREAM_INTERFACE_PCIE) || (interface == HAILO_STREAM_INTERFACE_INTEGRATED));
+
+    hailo_status status = HAILO_UNINITIALIZED;
+    auto result = make_shared_nothrow<VdmaOutputStream>(device, channel, edge_layer,
+        core_op_activated_event, interface, status);
+    CHECK_NOT_NULL_AS_EXPECTED(result, HAILO_OUT_OF_HOST_MEMORY);
+    CHECK_SUCCESS_AS_EXPECTED(status);
+
+    return result;
+}
 
 VdmaOutputStream::VdmaOutputStream(VdmaDevice &device, vdma::BoundaryChannelPtr channel, const LayerInfo &edge_layer,
-                                   EventPtr core_op_activated_event, uint16_t batch_size,
-                                   std::chrono::milliseconds transfer_timeout, hailo_stream_interface_t interface,
+                                   EventPtr core_op_activated_event,
+                                   hailo_stream_interface_t interface,
                                    hailo_status &status) :
-    VdmaOutputStreamBase(device, channel, edge_layer, core_op_activated_event, batch_size, transfer_timeout, interface, status)
+    AsyncOutputStreamBase(edge_layer, interface, std::move(core_op_activated_event), status),
+    m_device(device),
+    m_channel(std::move(channel)),
+    m_interface(interface),
+    m_transfer_size(get_transfer_size(m_stream_info, get_layer_info()))
 {
     // Check status for base class c'tor
     if (HAILO_SUCCESS != status) {
         return;
     }
 
-    if (channel->type() != vdma::BoundaryChannel::Type::BUFFERED) {
-        LOGGER__ERROR("Can't create a vdma stream with a non buffered channel. Received channel type {}", channel->type());
-        status = HAILO_INVALID_ARGUMENT;
-        return;
+    status = HAILO_SUCCESS;
+}
+
+VdmaOutputStream::~VdmaOutputStream()
+{
+    // We want to stop the vdma channel before closing the stream in the firmware
+    // because sending data to a closed stream may terminate the dma engine
+    const auto status = m_channel->deactivate();
+    if (HAILO_SUCCESS != status) {
+        LOGGER__ERROR("Failed to deactivate stream with error status {}", status);
     }
+}
 
-    status = HAILO_SUCCESS;
+hailo_stream_interface_t VdmaOutputStream::get_interface() const
+{
+    return m_interface;
 }
 
-hailo_status VdmaOutputStream::read_impl(MemoryView &buffer)
+Expected<std::unique_ptr<StreamBufferPool>> VdmaOutputStream::allocate_buffer_pool()
 {
-    CHECK((buffer.size() % HailoRTCommon::HW_DATA_ALIGNMENT) == 0, HAILO_INVALID_ARGUMENT,
-        "Size must be aligned to {} (got {})", HailoRTCommon::HW_DATA_ALIGNMENT, buffer.size());
+    auto circular_pool = CircularStreamBufferPool::create(m_device.get_driver(), HailoRTDriver::DmaDirection::D2H,
+        m_channel->get_desc_list()->desc_page_size(), m_channel->get_desc_list()->count(), m_transfer_size);
+    CHECK_EXPECTED(circular_pool);
+
+    return std::unique_ptr<StreamBufferPool>(circular_pool.release());
+}
 
-    return m_channel->transfer_sync(buffer.data(), buffer.size(), m_transfer_timeout);
+size_t VdmaOutputStream::get_max_ongoing_transfers() const
+{
+    return m_channel->get_max_ongoing_transfers(m_transfer_size);
+}
+
+hailo_status VdmaOutputStream::read_async_impl(TransferRequest &&transfer_request)
+{
+    const auto user_owns_buffer = (buffer_mode() == StreamBufferMode::NOT_OWNING);
+    return m_channel->launch_transfer(std::move(transfer_request), user_owns_buffer);
+}
+
+hailo_status VdmaOutputStream::activate_stream_impl()
+{
+    return m_channel->activate();
+}
+
+hailo_status VdmaOutputStream::deactivate_stream_impl()
+{
+    return m_channel->deactivate();
+}
+
+uint32_t VdmaOutputStream::get_transfer_size(const hailo_stream_info_t &stream_info, const LayerInfo &layer_info)
+{
+    return LayerInfoUtils::get_stream_transfer_size(stream_info, layer_info);
 }
 
 } /* namespace hailort */
index bd4329fcd62ed9efe3ae222d45380a508b95665b..814925a66f70a08745962a70650ec50507b46b53 100644 (file)
 #ifndef _HAILO_VDMA_STREAM_HPP_
 #define _HAILO_VDMA_STREAM_HPP_
 
-#include "hailo/hailort.h"
 #include "hailo/expected.hpp"
 
-#include "stream_common/stream_internal.hpp"
-#include "vdma/vdma_stream_base.hpp"
+#include "stream_common/async_stream_base.hpp"
 #include "vdma/vdma_device.hpp"
 #include "vdma/channel/boundary_channel.hpp"
-#include "vdevice/scheduler/scheduled_core_op_state.hpp"
 
 
 namespace hailort
 {
 
-class VdmaInputStream : public VdmaInputStreamBase
-{
+class VdmaInputStream : public AsyncInputStreamBase {
 public:
+
+    static Expected<std::shared_ptr<VdmaInputStream>> create(hailo_stream_interface_t interface,
+        VdmaDevice &device, vdma::BoundaryChannelPtr channel, const LayerInfo &edge_layer,
+        EventPtr core_op_activated_event);
+
     VdmaInputStream(VdmaDevice &device, vdma::BoundaryChannelPtr channel, const LayerInfo &edge_layer,
-                    EventPtr core_op_activated_event, uint16_t batch_size,
-                    std::chrono::milliseconds transfer_timeout, hailo_stream_interface_t stream_interface,
-                    hailo_status &status);
-    virtual ~VdmaInputStream() = default;
+                    EventPtr core_op_activated_event, hailo_stream_interface_t stream_interface, hailo_status &status);
+    virtual ~VdmaInputStream();
+
+    virtual hailo_stream_interface_t get_interface() const override;
 
-    virtual hailo_status write_buffer_only(const MemoryView &buffer, const std::function<bool()> &should_cancel = []() { return false; }) override;
-    virtual hailo_status send_pending_buffer(const device_id_t &device_id) override;
+    virtual vdevice_core_op_handle_t get_vdevice_core_op_handle() override;
+
+    virtual void set_vdevice_core_op_handle(vdevice_core_op_handle_t core_op_handle) override;
 
 private:
-    virtual hailo_status write_impl(const MemoryView &buffer) override;
+    Expected<std::unique_ptr<StreamBufferPool>> allocate_buffer_pool() override;
+    virtual size_t get_max_ongoing_transfers() const override;
+    virtual hailo_status write_async_impl(TransferRequest &&transfer_request) override;
+    virtual hailo_status activate_stream_impl() override;
+    virtual hailo_status deactivate_stream_impl() override;
 
-    std::mutex m_write_only_mutex;
-    std::mutex m_send_pending_mutex;
+    VdmaDevice &m_device;
+    vdma::BoundaryChannelPtr m_channel;
+    const hailo_stream_interface_t m_interface;
+    vdevice_core_op_handle_t m_core_op_handle;
 };
 
-class VdmaOutputStream : public VdmaOutputStreamBase
+class VdmaOutputStream : public AsyncOutputStreamBase
 {
 public:
+    static Expected<std::shared_ptr<VdmaOutputStream>> create(hailo_stream_interface_t interface,
+        VdmaDevice &device, vdma::BoundaryChannelPtr channel, const LayerInfo &edge_layer,
+        EventPtr core_op_activated_event);
+
     VdmaOutputStream(VdmaDevice &device, vdma::BoundaryChannelPtr channel, const LayerInfo &edge_layer,
-                     EventPtr core_op_activated_event, uint16_t batch_size,
-                     std::chrono::milliseconds transfer_timeout, hailo_stream_interface_t interface,
+                     EventPtr core_op_activated_event, hailo_stream_interface_t interface,
                      hailo_status &status);
-    virtual ~VdmaOutputStream() = default;
+    virtual ~VdmaOutputStream();
 
+    virtual hailo_stream_interface_t get_interface() const override;
+
+    virtual Expected<std::unique_ptr<StreamBufferPool>> allocate_buffer_pool() override;
+    virtual size_t get_max_ongoing_transfers() const override;
+    virtual hailo_status read_async_impl(TransferRequest &&transfer_request) override;
+    virtual hailo_status activate_stream_impl() override;
+    virtual hailo_status deactivate_stream_impl() override;
 private:
-    virtual hailo_status read_impl(MemoryView &buffer) override;
+    static uint32_t get_transfer_size(const hailo_stream_info_t &stream_info, const LayerInfo &layer_info);
+
+    VdmaDevice &m_device;
+    vdma::BoundaryChannelPtr m_channel;
+    const hailo_stream_interface_t m_interface;
+    const uint32_t m_transfer_size;
 };
 
 
diff --git a/hailort/libhailort/src/vdma/vdma_stream_base.cpp b/hailort/libhailort/src/vdma/vdma_stream_base.cpp
deleted file mode 100644 (file)
index 0f5189d..0000000
+++ /dev/null
@@ -1,417 +0,0 @@
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file vdma_stream_base.cpp
- **/
-
-#include "hailo/hailort_common.hpp"
-
-#include "vdma/vdma_stream_base.hpp"
-#include "vdma/vdma_stream.hpp"
-#include "vdma/vdma_async_stream.hpp"
-
-
-namespace hailort
-{
-
-static bool validate_device_interface_compatibility(hailo_stream_interface_t interface, Device::Type type)
-{
-    bool interface_valid = false;
-    switch (type)
-    {
-    case Device::Type::PCIE:
-        interface_valid = (HAILO_STREAM_INTERFACE_PCIE == interface);
-        break;
-
-    case Device::Type::INTEGRATED:
-        interface_valid = (HAILO_STREAM_INTERFACE_INTEGRATED == interface);
-        break;
-
-    default:
-        LOGGER__ERROR("Invalid device type {}", type);
-        return false;
-    }
-
-    if (interface_valid) {
-        return true;
-    }
-
-    LOGGER__ERROR("Invalid interface {} for device of type {}", interface, type);
-    return false;
-}
-
-Expected<std::shared_ptr<VdmaInputStreamBase>> VdmaInputStreamBase::create(hailo_stream_interface_t interface,
-    VdmaDevice &device, vdma::BoundaryChannelPtr channel, const LayerInfo &edge_layer,
-    const hailo_stream_parameters_t &stream_params, uint16_t batch_size, EventPtr core_op_activated_event)
-{
-    CHECK_AS_EXPECTED(validate_device_interface_compatibility(interface, device.get_type()), HAILO_INTERNAL_FAILURE);
-
-    hailo_status status = HAILO_UNINITIALIZED;
-    std::shared_ptr<VdmaInputStreamBase> result = nullptr;
-    if ((stream_params.flags & HAILO_STREAM_FLAGS_ASYNC) != 0) {
-        result = make_shared_nothrow<VdmaAsyncInputStream>(device, channel, edge_layer, core_op_activated_event,
-            batch_size, DEFAULT_TRANSFER_TIMEOUT, interface, status);
-    } else {
-        result = make_shared_nothrow<VdmaInputStream>(device, channel, edge_layer, core_op_activated_event,
-            batch_size, DEFAULT_TRANSFER_TIMEOUT, interface, status);
-    }
-
-    // Check that the creation of the various subclasses succeeded
-    CHECK_SUCCESS_AS_EXPECTED(status);
-    CHECK_NOT_NULL_AS_EXPECTED(result, HAILO_OUT_OF_HOST_MEMORY);
-    return result;
-}
-
-VdmaInputStreamBase::VdmaInputStreamBase(VdmaDevice &device, vdma::BoundaryChannelPtr channel,
-                                         const LayerInfo &edge_layer, EventPtr core_op_activated_event,
-                                         uint16_t batch_size, std::chrono::milliseconds transfer_timeout,
-                                         hailo_stream_interface_t stream_interface, hailo_status &status) :
-    InputStreamBase(edge_layer, stream_interface, std::move(core_op_activated_event), status),
-    m_device(&device),
-    m_channel(std::move(channel)),
-    m_interface(stream_interface),
-    is_stream_activated(false),
-    m_channel_timeout(transfer_timeout),
-    m_max_batch_size(batch_size),
-    m_dynamic_batch_size(batch_size)
-{
-    // Checking status for base class c'tor
-    if (HAILO_SUCCESS != status) {
-        return;
-    }
-
-    status = HAILO_SUCCESS;
-}
-
-VdmaInputStreamBase::~VdmaInputStreamBase()
-{
-    // We want to stop the vdma channel before closing the stream in the firmware
-    // because sending data to a closed stream may terminate the dma engine
-    if (this->is_stream_activated) {
-        const auto status = VdmaInputStreamBase::deactivate_stream();
-        if (HAILO_SUCCESS != status) {
-            LOGGER__ERROR("Failed to deactivate stream with error status {}", status);
-        }
-    }
-}
-
-hailo_stream_interface_t VdmaInputStreamBase::get_interface() const
-{
-    return m_interface;
-}
-
-std::chrono::milliseconds VdmaInputStreamBase::get_timeout() const
-{
-    return this->m_channel_timeout;
-}
-
-hailo_status VdmaInputStreamBase::set_timeout(std::chrono::milliseconds timeout)
-{
-    this->m_channel_timeout = timeout;
-    return HAILO_SUCCESS;
-}
-
-hailo_status VdmaInputStreamBase::abort()
-{
-    return m_channel->abort();
-}
-
-hailo_status VdmaInputStreamBase::clear_abort()
-{
-    return m_channel->clear_abort();
-}
-
-hailo_status VdmaInputStreamBase::flush()
-{
-    const auto dynamic_batch_size = (CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE == m_dynamic_batch_size) ?
-        1 : m_dynamic_batch_size;
-    return m_channel->flush(m_channel_timeout * dynamic_batch_size);
-}
-
-hailo_status VdmaInputStreamBase::activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers)
-{
-    auto status = set_dynamic_batch_size(dynamic_batch_size);
-    CHECK_SUCCESS(status);
-
-    status = m_channel->activate(0, resume_pending_stream_transfers);
-    CHECK_SUCCESS(status);
-
-    this->is_stream_activated = true;
-
-    return HAILO_SUCCESS;
-}
-
-hailo_status VdmaInputStreamBase::deactivate_stream()
-{
-    if (!is_stream_activated) {
-        return HAILO_SUCCESS;
-    }
-
-
-    auto status = m_channel->deactivate();
-    if (HAILO_SUCCESS != status) {
-        LOGGER__ERROR("Failed to stop channel with status {}", status);
-    }
-
-    this->is_stream_activated = false;
-    return status;
-}
-
-uint16_t VdmaInputStreamBase::get_dynamic_batch_size() const
-{
-    return std::max(m_dynamic_batch_size, static_cast<uint16_t>(1));
-}
-
-const char* VdmaInputStreamBase::get_dev_id() const
-{
-    return m_device->get_dev_id();
-}
-
-Expected<vdma::BoundaryChannel::BufferState> VdmaInputStreamBase::get_buffer_state()
-{
-    return m_channel->get_buffer_state();
-}
-
-Expected<size_t> VdmaInputStreamBase::get_buffer_frames_size() const
-{
-    return m_channel->get_transfers_count_in_buffer(m_stream_info.hw_frame_size);
-}
-
-Expected<size_t> VdmaInputStreamBase::get_pending_frames_count() const
-{
-    return m_channel->get_h2d_pending_frames_count();
-}
-
-hailo_status VdmaInputStreamBase::set_dynamic_batch_size(uint16_t dynamic_batch_size)
-{
-    // TODO: use std::max in the configure stage
-    if (CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE == m_max_batch_size) {
-        LOGGER__TRACE("max_batch_size is CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE; "
-                      "Ignoring value of dynamic_batch_size {}", m_dynamic_batch_size);
-        return HAILO_SUCCESS;
-    }
-
-    CHECK(dynamic_batch_size <= m_max_batch_size, HAILO_INVALID_ARGUMENT,
-        "Dynamic batch size ({}) must be <= than the configured batch size ({})",
-        dynamic_batch_size, m_max_batch_size);
-
-    if (CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE == dynamic_batch_size) {
-        LOGGER__TRACE("Received CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE == dynamic_batch_size; "
-                      "Leaving previously set value of {}", m_dynamic_batch_size);
-    } else {
-        LOGGER__TRACE("Setting stream's dynamic_batch_size to {}", dynamic_batch_size);
-        m_dynamic_batch_size = dynamic_batch_size;
-
-        const auto status = m_channel->set_transfers_per_axi_intr(m_dynamic_batch_size);
-        CHECK_SUCCESS(status);
-    }
-
-    return HAILO_SUCCESS;
-}
-
-/** Output stream **/
-Expected<std::shared_ptr<VdmaOutputStreamBase>> VdmaOutputStreamBase::create(hailo_stream_interface_t interface,
-    VdmaDevice &device, vdma::BoundaryChannelPtr channel, const LayerInfo &edge_layer, uint16_t batch_size,
-    const hailo_stream_parameters_t &stream_params, EventPtr core_op_activated_event)
-{
-    CHECK_AS_EXPECTED(validate_device_interface_compatibility(interface, device.get_type()), HAILO_INTERNAL_FAILURE);
-
-    hailo_status status = HAILO_UNINITIALIZED;
-    std::shared_ptr<VdmaOutputStreamBase> result = nullptr;
-    // TODO: after adding NMS single int, we can create an async channel for async nms output stream (HRT-10553)
-    if ((stream_params.flags & HAILO_STREAM_FLAGS_ASYNC) != 0) {
-        if (edge_layer.format.order == HAILO_FORMAT_ORDER_HAILO_NMS) {
-            result = make_shared_nothrow<VdmaAsyncOutputNmsStream>(device, channel, edge_layer, core_op_activated_event,
-                batch_size, DEFAULT_TRANSFER_TIMEOUT, interface, status);
-        } else {
-            result = make_shared_nothrow<VdmaAsyncOutputStream>(device, channel, edge_layer, core_op_activated_event,
-                batch_size, DEFAULT_TRANSFER_TIMEOUT, interface, status);
-        }
-    } else {
-        result = make_shared_nothrow<VdmaOutputStream>(device, channel, edge_layer, core_op_activated_event,
-                batch_size, DEFAULT_TRANSFER_TIMEOUT, interface, status);
-    }
-
-    // Check that the creation of the various subclasses succeeded
-    CHECK_SUCCESS_AS_EXPECTED(status);
-    CHECK_NOT_NULL_AS_EXPECTED(result, HAILO_OUT_OF_HOST_MEMORY);
-
-    return result;
-}
-
-VdmaOutputStreamBase::VdmaOutputStreamBase(VdmaDevice &device, vdma::BoundaryChannelPtr channel, const LayerInfo &edge_layer,
-                                           EventPtr core_op_activated_event, uint16_t batch_size,
-                                           std::chrono::milliseconds transfer_timeout, hailo_stream_interface_t interface,
-                                           hailo_status &status) :
-    OutputStreamBase(edge_layer, interface, std::move(core_op_activated_event), status),
-    m_device(&device),
-    m_channel(std::move(channel)),
-    m_interface(interface),
-    is_stream_activated(false),
-    m_transfer_timeout(transfer_timeout),
-    m_max_batch_size(batch_size),
-    m_dynamic_batch_size(batch_size),
-    m_transfer_size(get_transfer_size(m_stream_info, get_layer_info()))
-{
-    // Check status for base class c'tor
-    if (HAILO_SUCCESS != status) {
-        return;
-    }
-
-    status = HAILO_SUCCESS;
-}
-
-VdmaOutputStreamBase::~VdmaOutputStreamBase()
-{
-    // We want to stop the vdma channel before closing the stream in the firmware
-    // because sending data to a closed stream may terminate the dma engine
-    if (this->is_stream_activated) {
-        const auto status = VdmaOutputStreamBase::deactivate_stream();
-        if (HAILO_SUCCESS != status) {
-            LOGGER__ERROR("Failed to deactivate stream with error status {}", status);
-        }
-    }
-}
-
-hailo_stream_interface_t VdmaOutputStreamBase::get_interface() const
-{
-    return m_interface;
-}
-
-hailo_status VdmaOutputStreamBase::set_timeout(std::chrono::milliseconds timeout)
-{
-    this->m_transfer_timeout = timeout;
-    return HAILO_SUCCESS;
-}
-
-std::chrono::milliseconds VdmaOutputStreamBase::get_timeout() const
-{
-    return this->m_transfer_timeout;
-}
-
-hailo_status VdmaOutputStreamBase::abort()
-{
-    return m_channel->abort();
-}
-
-hailo_status VdmaOutputStreamBase::clear_abort()
-{
-    return m_channel->clear_abort();
-}
-
-uint16_t VdmaOutputStreamBase::get_dynamic_batch_size() const
-{
-    return std::max(m_dynamic_batch_size, static_cast<uint16_t>(1));
-}
-
-const char* VdmaOutputStreamBase::get_dev_id() const
-{
-    return m_device->get_dev_id();
-}
-
-Expected<vdma::BoundaryChannel::BufferState> VdmaOutputStreamBase::get_buffer_state()
-{
-    return m_channel->get_buffer_state();
-}
-
-hailo_status VdmaOutputStreamBase::activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers)
-{
-    auto status = set_dynamic_batch_size(dynamic_batch_size);
-    CHECK_SUCCESS(status);
-
-    status = m_channel->activate(m_transfer_size, resume_pending_stream_transfers);
-    CHECK_SUCCESS(status);
-
-    this->is_stream_activated = true;
-
-    return HAILO_SUCCESS;
-}
-
-void VdmaOutputStreamBase::register_interrupt_callback(const vdma::ProcessingCompleteCallback &callback)
-{
-    m_channel->register_interrupt_callback(callback);
-}
-
-hailo_status VdmaOutputStreamBase::deactivate_stream()
-{
-    if (!is_stream_activated) {
-        return HAILO_SUCCESS;
-    }
-
-    auto status = m_channel->deactivate();
-    if (HAILO_SUCCESS != status) {
-        LOGGER__ERROR("Failed to stop channel with status {}", status);
-    }
-
-    this->is_stream_activated = false;
-    return HAILO_SUCCESS;
-}
-
-uint32_t VdmaOutputStreamBase::get_transfer_size(const hailo_stream_info_t &stream_info, const LayerInfo &layer_info)
-{
-    return LayerInfoUtils::get_stream_transfer_size(stream_info, layer_info);
-}
-
-hailo_status VdmaOutputStreamBase::set_dynamic_batch_size(uint16_t dynamic_batch_size)
-{
-    // TODO: use std::max in the configure stage
-    if (CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE == m_max_batch_size) {
-        LOGGER__TRACE("max_batch_size is CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE; "
-                      "Ignoring value of dynamic_batch_size {}", m_dynamic_batch_size);
-        return HAILO_SUCCESS;
-    }
-
-    CHECK(dynamic_batch_size <= m_max_batch_size, HAILO_INVALID_ARGUMENT,
-        "Dynamic batch size ({}) must be <= than the configured batch size ({})",
-        dynamic_batch_size, m_max_batch_size);
-
-    if (CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE == dynamic_batch_size) {
-        LOGGER__TRACE("Received CONTROL_PROTOCOL__IGNORE_DYNAMIC_BATCH_SIZE == dynamic_batch_size; "
-                      "Leaving previously set value of {}", m_dynamic_batch_size);
-    } else {
-        LOGGER__TRACE("Setting stream's dynamic_batch_size to {}", dynamic_batch_size);
-        m_dynamic_batch_size = dynamic_batch_size;
-
-        const auto status = m_channel->set_transfers_per_axi_intr(m_dynamic_batch_size);
-        CHECK_SUCCESS(status);
-    }
-
-    return HAILO_SUCCESS;
-}
-
-Expected<size_t> VdmaOutputStreamBase::get_buffer_frames_size() const
-{
-    if (HAILO_FORMAT_ORDER_HAILO_NMS == m_stream_info.format.order) {
-        // In NMS, each output frame has different size depending on the number of bboxes found for each class
-        // and m_stream_info.hw_frame_size is the max frame size. To know the actual frame size and
-        // calculate the number of frames we need to read the content of the buffer (and finding the delimiter for each class in each frame).
-        LOGGER__INFO("NMS is not supported in function get_buffer_frames_size()");
-        return make_unexpected(HAILO_NOT_AVAILABLE);
-    }
-
-    return m_channel->get_transfers_count_in_buffer(m_stream_info.hw_frame_size);
-}
-
-Expected<size_t> VdmaOutputStreamBase::get_pending_frames_count() const
-{
-    if (HAILO_FORMAT_ORDER_HAILO_NMS == m_stream_info.format.order) {
-        // In NMS, each output frame has different size depending on the number of bboxes found for each class
-        // and m_stream_info.hw_frame_size is the max frame size. To know the actual frame size and
-        // calculate the number of frames we need to read the content of the buffer (and finding the delimiter for each class in each frame).
-        LOGGER__INFO("NMS is not supported in function get_pending_frames_count()");
-        return make_unexpected(HAILO_NOT_AVAILABLE);
-    }
-
-    auto pending_descs_count = m_channel->get_d2h_pending_descs_count();
-    CHECK_EXPECTED(pending_descs_count);
-
-    auto channel_page_size = m_channel->get_page_size();
-    uint32_t descs_per_frame = (0 == (m_stream_info.hw_frame_size % channel_page_size)) ? (m_stream_info.hw_frame_size / channel_page_size) :
-        ((m_stream_info.hw_frame_size / channel_page_size) + 1);
-
-    return static_cast<size_t>(std::floor(pending_descs_count.value() / descs_per_frame));
-}
-
-} /* namespace hailort */
diff --git a/hailort/libhailort/src/vdma/vdma_stream_base.hpp b/hailort/libhailort/src/vdma/vdma_stream_base.hpp
deleted file mode 100644 (file)
index 9569f24..0000000
+++ /dev/null
@@ -1,121 +0,0 @@
-/**
- * Copyright (c) 2020-2022 Hailo Technologies Ltd. All rights reserved.
- * Distributed under the MIT license (https://opensource.org/licenses/MIT)
- **/
-/**
- * @file vdma_stream_base.hpp
- * @brief Base class for stream objects over vDMA channel
- **/
-
-#ifndef _HAILO_VDMA_STREAM_BASE_HPP_
-#define _HAILO_VDMA_STREAM_BASE_HPP_
-
-#include "hailo/hailort.h"
-#include "hailo/expected.hpp"
-
-#include "stream_common/stream_internal.hpp"
-#include "vdma/vdma_device.hpp"
-#include "vdma/channel/boundary_channel.hpp"
-
-
-namespace hailort
-{
-
-class VdmaInputStreamBase : public InputStreamBase {
-public:
-    static Expected<std::shared_ptr<VdmaInputStreamBase>> create(hailo_stream_interface_t interface,
-        VdmaDevice &device, vdma::BoundaryChannelPtr channel, const LayerInfo &edge_layer,
-        const hailo_stream_parameters_t &stream_params, uint16_t batch_size, EventPtr core_op_activated_event);
-
-    virtual ~VdmaInputStreamBase();
-
-    virtual hailo_stream_interface_t get_interface() const override;
-    virtual std::chrono::milliseconds get_timeout() const override;
-    virtual hailo_status set_timeout(std::chrono::milliseconds timeout) override;
-    virtual hailo_status abort() override;
-    virtual hailo_status clear_abort() override;
-    virtual hailo_status flush() override;
-    uint16_t get_dynamic_batch_size() const;
-    const char* get_dev_id() const;
-    Expected<vdma::BoundaryChannel::BufferState> get_buffer_state();
-    virtual Expected<size_t> get_buffer_frames_size() const override;
-    virtual Expected<size_t> get_pending_frames_count() const override;
-
-    virtual hailo_status write_buffer_only(const MemoryView &buffer, const std::function<bool()> &should_cancel = []() { return false; }) = 0;
-    virtual hailo_status send_pending_buffer(const device_id_t &device_id) = 0;
-
-    void notify_all()
-    {
-        m_channel->notify_all();
-    }
-
-protected:
-    VdmaInputStreamBase(VdmaDevice &device, vdma::BoundaryChannelPtr channel, const LayerInfo &edge_layer,
-                    EventPtr core_op_activated_event, uint16_t batch_size,
-                    std::chrono::milliseconds transfer_timeout, hailo_stream_interface_t stream_interface,
-                    hailo_status &status);
-
-    virtual hailo_status activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers) override;
-    virtual hailo_status deactivate_stream() override;
-    hailo_status set_dynamic_batch_size(uint16_t dynamic_batch_size);
-
-    friend class VDeviceInputStreamBase;
-    friend class VDeviceNativeInputStream;
-
-    VdmaDevice *m_device;
-    vdma::BoundaryChannelPtr m_channel;
-    const hailo_stream_interface_t m_interface;
-    bool is_stream_activated;
-    std::chrono::milliseconds m_channel_timeout;
-    const uint16_t m_max_batch_size;
-    uint16_t m_dynamic_batch_size;
-};
-
-class VdmaOutputStreamBase : public OutputStreamBase {
-public:
-    static Expected<std::shared_ptr<VdmaOutputStreamBase>> create(hailo_stream_interface_t interface,
-        VdmaDevice &device, vdma::BoundaryChannelPtr channel, const LayerInfo &edge_layer, uint16_t batch_size, 
-        const hailo_stream_parameters_t &stream_params, EventPtr core_op_activated_event);
-
-    virtual ~VdmaOutputStreamBase();
-
-    virtual hailo_stream_interface_t get_interface() const override;
-    virtual std::chrono::milliseconds get_timeout() const override;
-    virtual hailo_status set_timeout(std::chrono::milliseconds timeout) override;
-    virtual hailo_status abort() override;
-    virtual hailo_status clear_abort() override;
-    uint16_t get_dynamic_batch_size() const;
-    const char* get_dev_id() const;
-    Expected<vdma::BoundaryChannel::BufferState> get_buffer_state();
-    virtual Expected<size_t> get_buffer_frames_size() const override;
-    virtual Expected<size_t> get_pending_frames_count() const override;
-
-    void register_interrupt_callback(const vdma::ProcessingCompleteCallback &callback);
-
-protected:
-    VdmaOutputStreamBase(VdmaDevice &device, vdma::BoundaryChannelPtr channel, const LayerInfo &edge_layer,
-                         EventPtr core_op_activated_event, uint16_t batch_size,
-                         std::chrono::milliseconds transfer_timeout, hailo_stream_interface_t interface,
-                         hailo_status &status);
-
-    virtual hailo_status activate_stream(uint16_t dynamic_batch_size, bool resume_pending_stream_transfers) override;
-    virtual hailo_status deactivate_stream() override;
-    static uint32_t get_transfer_size(const hailo_stream_info_t &stream_info, const LayerInfo &layer_info);
-    hailo_status set_dynamic_batch_size(uint16_t dynamic_batch_size);
-
-    friend class VDeviceOutputStreamBase;
-
-    VdmaDevice *m_device;
-    vdma::BoundaryChannelPtr m_channel;
-    const hailo_stream_interface_t m_interface;
-    bool is_stream_activated;
-    std::chrono::milliseconds m_transfer_timeout;
-    const uint16_t m_max_batch_size;
-    uint16_t m_dynamic_batch_size;
-    const uint32_t m_transfer_size;
-};
-
-
-} /* namespace hailort */
-
-#endif /* _HAILO_VDMA_STREAM_BASE_HPP_ */
diff --git a/hailort/libhailort/tracer_profiler.proto b/hailort/libhailort/tracer_profiler.proto
new file mode 100644 (file)
index 0000000..7c5330b
--- /dev/null
@@ -0,0 +1,122 @@
+syntax = "proto3";
+
+option optimize_for = LITE_RUNTIME;
+
+message ProtoProfiler {
+    ProtoProfilerTopHeader top_header = 1;
+    // Each event is under this added trace
+    repeated ProtoTraceMessage added_trace = 2;
+}
+
+message ProtoProfilerTopHeader {
+    // Relative time, nanosec
+    uint64 time_stamp = 1;
+    ProtoDateTime time = 2;
+    // Hailort version
+    string hailort_ver = 3;
+    string os_name = 4;
+    string os_ver = 5;
+    string cpu_arch = 6;
+    uint64 sys_ram_size = 7; //bytes
+    uint64 time_stamp_since_epoch =8; //nanosec
+}
+
+message ProtoTraceMessage {
+    oneof trace {
+        ProtoProfilerSetSchedulerParam core_op_set_value = 1;
+        ProtoProfilerAddStreamTrace added_stream = 2;
+        ProtoProfilerAddCoreOpTrace added_core_op = 3;
+        ProtoProfilerAddDeviceTrace added_device = 4;
+        ProtoProfilerSwitchedCoreOpTrace switched_core_op = 5;
+        ProtoProfilerFrameEnqueueTrace frame_enqueue = 6;
+        ProtoProfilerFrameDequeueTrace frame_dequeue = 7;
+        ProtoProfilerCoreOpSwitchDecision switch_core_op_decision = 8;
+    }
+}
+
+message ProtoDateTime {
+    uint32 year = 1;
+    uint32 month = 2;
+    uint32 day = 3;
+    uint32 hour = 4;
+    uint32 min = 5;
+}
+
+message ProtoProfilerDeviceInfo {
+    string device_id = 1;
+    string device_arch = 2;
+}
+
+// The direction of the event, Host-To-Device/Device-To-Host
+enum ProtoProfilerStreamDirection {
+    PROTO__STREAM_DIRECTION__H2D = 0;
+    PROTO__STREAM_DIRECTION__D2H = 1;
+}
+
+message ProtoProfilerAddCoreOpTrace {
+    uint64 time_stamp = 1; // nanosec
+    int32 core_op_handle = 2;
+    string core_op_name = 3;
+}
+
+// Frame dequeue means transferring a frame from the buffer to device
+// or from the buffer to host
+message ProtoProfilerFrameDequeueTrace {
+    uint64 time_stamp = 1; // nanosec
+    int32 core_op_handle = 2;
+    string device_id = 3;
+    string stream_name = 4;
+    ProtoProfilerStreamDirection direction = 5;
+}
+
+// Frame enqueue means transferring a frame from the user to buffer
+// or from the device to buffer
+message ProtoProfilerFrameEnqueueTrace {
+    uint64 time_stamp = 1; // nanosec
+    int32 core_op_handle = 2;
+    string device_id = 3;
+    string stream_name = 4;
+    ProtoProfilerStreamDirection direction = 5;
+}
+
+// Relevant when using scheduler
+message ProtoProfilerCoreOpSwitchDecision {
+    uint64 time_stamp = 1; // nanosec
+    int32 core_op_handle = 2;
+    bool over_threshold = 3;
+    bool over_timeout = 4;
+    bool switch_because_idle = 5;
+}
+
+message ProtoProfilerSwitchedCoreOpTrace {
+    uint64 time_stamp = 1; // nanosec
+    int32 new_core_op_handle = 2;
+    string core_op_name = 3;
+    string device_id = 4;
+}
+
+// Low level streams adding
+message ProtoProfilerAddStreamTrace {
+    uint64 time_stamp = 1; // nanosec
+    int32 core_op_handle = 2;
+    string device_id = 3;
+    string stream_name = 4;
+    int32 queue_size = 5;
+    ProtoProfilerStreamDirection direction = 6;
+}
+
+// Setting a new params relevant a specific core_op_handle
+message ProtoProfilerSetSchedulerParam {
+    uint64 time_stamp = 1; // nanosec
+    int32 core_op_handle = 2;
+    oneof value {
+        int64 timeout = 3; // millisec
+        int32 threshold = 4;
+        int32 priority = 5;
+    }
+}
+
+message ProtoProfilerAddDeviceTrace {
+    uint64 time_stamp = 1; // nanosec
+    ProtoProfilerDeviceInfo  device_info = 2;
+}
index 1c7438e95fed0b33b9aadf9021e12612e8d864d2..66c5f52face9a199f0640e8b93f7269859c2fa36 100644 (file)
@@ -16,16 +16,19 @@ function(git_clone proj repo tag)
     )
 endfunction()
 
-include(${CMAKE_CURRENT_LIST_DIR}/../../libhailort/bindings/python/externals/pybind11.cmake)
-git_clone(Catch2            https://github.com/catchorg/Catch2.git                                  c4e3767e265808590986d5db6ca1b5532a7f3d13)
+include(${CMAKE_CURRENT_LIST_DIR}/../../cmake/external/pybind11.cmake)
+include(${CMAKE_CURRENT_LIST_DIR}/../../cmake/external/catch2.cmake)
+include(${CMAKE_CURRENT_LIST_DIR}/../../cmake/external/spdlog.cmake)
+include(${CMAKE_CURRENT_LIST_DIR}/../../cmake/external/json.cmake)
+include(${CMAKE_CURRENT_LIST_DIR}/../../cmake/external/dotwriter.cmake)
+include(${CMAKE_CURRENT_LIST_DIR}/../../cmake/external/benchmark.cmake)
+include(${CMAKE_CURRENT_LIST_DIR}/../../cmake/external/readerwriterqueue.cmake)
+if(CMAKE_SYSTEM_NAME STREQUAL QNX)
+    include(${CMAKE_CURRENT_LIST_DIR}/../../cmake/external/pevents.cmake)
+endif()
+
 git_clone(CLI11             https://github.com/hailo-ai/CLI11.git                                   f1644f15f219303b7ad670732c21018a1e6f0e11)
-git_clone(spdlog            https://github.com/gabime/spdlog.git                                    e2789531912a5c6ab28a90387f97c52963eec08a)
 git_clone(protobuf          https://github.com/protocolbuffers/protobuf.git                         22d0e265de7d2b3d2e9a00d071313502e7d4cccf)
-git_clone(readerwriterqueue https://github.com/cameron314/readerwriterqueue.git                     435e36540e306cac40fcfeab8cc0a22d48464509)
-git_clone(json              https://github.com/ArthurSonzogni/nlohmann_json_cmake_fetchcontent.git  391786c6c3abdd3eeb993a3154f1f2a4cfe137a0)
-git_clone(DotWriter         https://github.com/hailo-ai/DotWriter.git                               e5fa8f281adca10dd342b1d32e981499b8681daf)
-git_clone(benchmark         https://github.com/google/benchmark.git                                 f91b6b42b1b9854772a90ae9501464a161707d1e)
-git_clone(pevents           https://github.com/neosmart/pevents.git                                 1209b1fd1bd2e75daab4380cf43d280b90b45366)
 
 if(HAILO_BUILD_SERVICE)
     git_clone(grpc              https://github.com/grpc/grpc                                            53d69cc581c5b7305708587f4f1939278477c28a)
index 873c9ba922bde591c418321f9ad093b2d36d109b..1ebdd8b8f9ec8a33c1c02a2dc8851ceb52a11134 100644 (file)
@@ -7,13 +7,12 @@ service ProtoHailoRtRpc {
     rpc get_service_version (get_service_version_Request) returns (get_service_version_Reply) {}
 
     rpc VDevice_create (VDevice_create_Request) returns (VDevice_create_Reply) {}
-    rpc VDevice_dup_handle (dup_handle_Request) returns (dup_handle_Reply) {}
     rpc VDevice_release (Release_Request) returns (Release_Reply) {}
     rpc VDevice_configure (VDevice_configure_Request) returns (VDevice_configure_Reply) {}
     rpc VDevice_get_physical_devices_ids (VDevice_get_physical_devices_ids_Request) returns (VDevice_get_physical_devices_ids_Reply) {}
     rpc VDevice_get_default_streams_interface (VDevice_get_default_streams_interface_Request) returns (VDevice_get_default_streams_interface_Reply) {}
 
-    rpc ConfiguredNetworkGroup_dup_handle (dup_handle_Request) returns (dup_handle_Reply) {}
+    rpc ConfiguredNetworkGroup_dup_handle (ConfiguredNetworkGroup_dup_handle_Request) returns (ConfiguredNetworkGroup_dup_handle_Reply) {}
     rpc ConfiguredNetworkGroup_release (Release_Request) returns (Release_Reply) {}
     rpc ConfiguredNetworkGroup_make_input_vstream_params (ConfiguredNetworkGroup_make_input_vstream_params_Request) returns (ConfiguredNetworkGroup_make_input_vstream_params_Reply) {}
     rpc ConfiguredNetworkGroup_make_output_vstream_params (ConfiguredNetworkGroup_make_output_vstream_params_Request) returns (ConfiguredNetworkGroup_make_output_vstream_params_Reply) {}
@@ -38,12 +37,12 @@ service ProtoHailoRtRpc {
     rpc ConfiguredNetworkGroup_get_vstream_names_from_stream_name(ConfiguredNetworkGroup_get_vstream_names_from_stream_name_Request) returns (ConfiguredNetworkGroup_get_vstream_names_from_stream_name_Reply) {}
 
     rpc InputVStreams_create (VStream_create_Request) returns (VStreams_create_Reply) {}
-    rpc InputVStream_dup_handle (dup_handle_Request) returns (dup_handle_Reply) {}
-    rpc OutputVStream_dup_handle (dup_handle_Request) returns (dup_handle_Reply) {}
     rpc InputVStream_release (Release_Request) returns (Release_Reply) {}
     rpc OutputVStreams_create (VStream_create_Request) returns (VStreams_create_Reply) {}
     rpc OutputVStream_release (Release_Request) returns (Release_Reply) {}
+    rpc InputVStream_is_multi_planar (InputVStream_is_multi_planar_Request) returns (InputVStream_is_multi_planar_Reply) {}
     rpc InputVStream_write (InputVStream_write_Request) returns (InputVStream_write_Reply) {}
+    rpc InputVStream_write_pix (InputVStream_write_pix_Request) returns (InputVStream_write_pix_Reply) {}
     rpc OutputVStream_read (OutputVStream_read_Request) returns (OutputVStream_read_Reply) {}
     rpc InputVStream_get_frame_size (VStream_get_frame_size_Request) returns (VStream_get_frame_size_Reply) {}
     rpc OutputVStream_get_frame_size (VStream_get_frame_size_Request) returns (VStream_get_frame_size_Reply) {}
@@ -66,6 +65,9 @@ service ProtoHailoRtRpc {
     rpc OutputVStream_get_info (VStream_get_info_Request) returns (VStream_get_info_Reply) {}
     rpc InputVStream_is_aborted (VStream_is_aborted_Request) returns (VStream_is_aborted_Reply) {}
     rpc OutputVStream_is_aborted (VStream_is_aborted_Request) returns (VStream_is_aborted_Reply) {}
+    rpc OutputVStream_set_nms_score_threshold (VStream_set_nms_score_threshold_Request) returns (VStream_set_nms_score_threshold_Reply) {}
+    rpc OutputVStream_set_nms_iou_threshold (VStream_set_nms_iou_threshold_Request) returns (VStream_set_nms_iou_threshold_Reply) {}
+    rpc OutputVStream_set_nms_max_proposals_per_class (VStream_set_nms_max_proposals_per_class_Request) returns (VStream_set_nms_max_proposals_per_class_Reply) {}
 }
 
 message empty {}
@@ -74,6 +76,21 @@ message keepalive_Request {
     uint32 pid = 1;
 }
 
+message ProtoVDeviceIdentifier {
+    uint32 vdevice_handle = 1;
+}
+
+message ProtoConfiguredNetworkGroupIdentifier {
+    uint32 vdevice_handle = 1;
+    uint32 network_group_handle = 2;
+}
+
+message ProtoVStreamIdentifier {
+    uint32 vdevice_handle = 1;
+    uint32 network_group_handle = 2;
+    uint32 vstream_handle = 3;
+}
+
 message ProtoVDeviceParams {
     uint32 device_count = 1;
     repeated string device_ids = 2;
@@ -95,12 +112,12 @@ message get_service_version_Reply {
     ProtoHailoVersion hailo_version = 2;
 }
 
-message dup_handle_Request {
+message ConfiguredNetworkGroup_dup_handle_Request {
     uint32 pid = 1;
-    uint32 handle = 2;
+    ProtoConfiguredNetworkGroupIdentifier identifier = 2;
 }
 
-message dup_handle_Reply {
+message ConfiguredNetworkGroup_dup_handle_Reply {
     uint32 handle = 1;
 }
 
@@ -114,11 +131,20 @@ message VDevice_create_Reply {
     uint32 handle = 2;
 }
 
-message Release_Request {
-    uint32 handle = 1;
+message Release_ConfiguredNetworkGroup_Request {
+    ProtoConfiguredNetworkGroupIdentifier identifier = 1;
     uint32 pid = 2;
 }
 
+message Release_Request {
+    uint32 pid = 1;
+    oneof identifier {
+        ProtoVDeviceIdentifier vdevice_identifier = 2;
+        ProtoConfiguredNetworkGroupIdentifier network_group_identifier = 3;
+        ProtoVStreamIdentifier vstream_identifier = 4;
+    }
+}
+
 message Release_Reply {
     uint32 status = 1;
 }
@@ -129,7 +155,7 @@ message VStreams_create_Reply {
 }
 
 message VStream_create_Request {
-    uint32 net_group = 1;
+    ProtoConfiguredNetworkGroupIdentifier identifier = 1;
     repeated ProtoNamedVStreamParams vstreams_params = 2;
     uint32 pid = 3;
 }
@@ -234,6 +260,7 @@ message ProtoNamedNetworkParams {
 message ProtoNmsShape {
     uint32 number_of_classes = 1;
     uint32 max_bbox_per_class = 2;
+    uint32 max_mask_size = 3;
 }
 
 message ProtoVStreamInfo {
@@ -264,7 +291,7 @@ message ProtoVStreamGroup {
 }
 
 message VDevice_configure_Request {
-    uint32 handle = 1;
+    ProtoVDeviceIdentifier identifier = 1;
     bytes hef = 2;
     repeated ProtoNamedConfigureNetworkParams configure_params_map = 3;
     uint32 pid = 4;
@@ -276,7 +303,7 @@ message VDevice_configure_Reply {
 }
 
 message VDevice_get_physical_devices_ids_Request {
-    uint32 handle = 1;
+    ProtoVDeviceIdentifier identifier = 1;
 }
 
 message VDevice_get_physical_devices_ids_Reply {
@@ -285,7 +312,7 @@ message VDevice_get_physical_devices_ids_Reply {
 }
 
 message VDevice_get_default_streams_interface_Request {
-    uint32 handle = 1;
+    ProtoVDeviceIdentifier identifier = 1;
 }
 
 message VDevice_get_default_streams_interface_Reply {
@@ -294,7 +321,7 @@ message VDevice_get_default_streams_interface_Reply {
 }
 
 message ConfiguredNetworkGroup_make_input_vstream_params_Request {
-    uint32 handle = 1;
+    ProtoConfiguredNetworkGroupIdentifier identifier = 1;
     bool quantized = 2;
     uint32 format_type = 3;
     uint32 timeout_ms = 4;
@@ -311,8 +338,9 @@ message ConfiguredNetworkGroup_make_input_vstream_params_Reply {
     ProtoNamedVStreamParamsMap vstream_params_map = 2;
 }    
 
+// TODO: Why do we have this struct for both input and output instead of 1 struct for both
 message ConfiguredNetworkGroup_make_output_vstream_params_Request {
-    uint32 handle = 1;
+    ProtoConfiguredNetworkGroupIdentifier identifier = 1;
     bool quantized = 2;
     uint32 format_type = 3;
     uint32 timeout_ms = 4;
@@ -326,7 +354,7 @@ message ConfiguredNetworkGroup_make_output_vstream_params_Reply {
 }
 
 message ConfiguredNetworkGroup_make_output_vstream_params_groups_Request {
-    uint32 handle = 1;
+    ProtoConfiguredNetworkGroupIdentifier identifier = 1;
     bool quantized = 2;
     uint32 format_type = 3;
     uint32 timeout_ms = 4;
@@ -339,7 +367,7 @@ message ConfiguredNetworkGroup_make_output_vstream_params_groups_Reply {
 }
 
 message ConfiguredNetworkGroup_name_Request {
-    uint32 handle = 1;
+    ProtoConfiguredNetworkGroupIdentifier identifier = 1;
 }
 
 message ConfiguredNetworkGroup_name_Reply {
@@ -348,7 +376,7 @@ message ConfiguredNetworkGroup_name_Reply {
 }
 
 message ConfiguredNetworkGroup_get_network_infos_Request {
-    uint32 handle = 1;
+    ProtoConfiguredNetworkGroupIdentifier identifier = 1;
 }
 
 message ConfiguredNetworkGroup_get_network_infos_Reply {
@@ -357,7 +385,7 @@ message ConfiguredNetworkGroup_get_network_infos_Reply {
 }
 
 message ConfiguredNetworkGroup_get_all_stream_infos_Request {
-    uint32 handle = 1;
+    ProtoConfiguredNetworkGroupIdentifier identifier = 1;
     string network_name = 2;
 }
 
@@ -367,7 +395,7 @@ message ConfiguredNetworkGroup_get_all_stream_infos_Reply {
 }
 
 message ConfiguredNetworkGroup_get_default_stream_interface_Request {
-    uint32 handle = 1;
+    ProtoConfiguredNetworkGroupIdentifier identifier = 1;
 }
 
 message ConfiguredNetworkGroup_get_default_stream_interface_Reply {
@@ -376,7 +404,7 @@ message ConfiguredNetworkGroup_get_default_stream_interface_Reply {
 }
 
 message ConfiguredNetworkGroup_get_output_vstream_groups_Request {
-    uint32 handle = 1;
+    ProtoConfiguredNetworkGroupIdentifier identifier = 1;
 }
 
 message ConfiguredNetworkGroup_get_output_vstream_groups_Reply {
@@ -385,12 +413,12 @@ message ConfiguredNetworkGroup_get_output_vstream_groups_Reply {
 }
 
 message ConfiguredNetworkGroup_get_vstream_infos_Request {
-    uint32 handle = 1;
+    ProtoConfiguredNetworkGroupIdentifier identifier = 1;
     string network_name = 2;
 }
 
 message ConfiguredNetworkGroup_get_latency_measurement_Request {
-    uint32 handle = 1;
+    ProtoConfiguredNetworkGroupIdentifier identifier = 1;
     string network_name = 2;
 }
 
@@ -400,7 +428,7 @@ message ConfiguredNetworkGroup_get_vstream_infos_Reply {
 }
 
 message ConfiguredNetworkGroup_is_scheduled_Request {
-    uint32 handle = 1;
+    ProtoConfiguredNetworkGroupIdentifier identifier = 1;
 }
 
 message ConfiguredNetworkGroup_is_scheduled_Reply {
@@ -409,7 +437,7 @@ message ConfiguredNetworkGroup_is_scheduled_Reply {
 }
 
 message ConfiguredNetworkGroup_set_scheduler_timeout_Request {
-    uint32 handle = 1;
+    ProtoConfiguredNetworkGroupIdentifier identifier = 1;
     uint32 timeout_ms = 2;
     string network_name = 3;
 }
@@ -419,7 +447,7 @@ message ConfiguredNetworkGroup_set_scheduler_timeout_Reply {
 }
 
 message ConfiguredNetworkGroup_set_scheduler_threshold_Request {
-    uint32 handle = 1;
+    ProtoConfiguredNetworkGroupIdentifier identifier = 1;
     uint32 threshold = 2;
     string network_name = 3;
 }
@@ -429,7 +457,7 @@ message ConfiguredNetworkGroup_set_scheduler_threshold_Reply {
 }
 
 message ConfiguredNetworkGroup_set_scheduler_priority_Request {
-    uint32 handle = 1;
+    ProtoConfiguredNetworkGroupIdentifier identifier = 1;
     uint32 priority = 2;
     string network_name = 3;
 }
@@ -444,7 +472,7 @@ message ConfiguredNetworkGroup_get_latency_measurement_Reply {
 }
 
 message ConfiguredNetworkGroup_is_multi_context_Request {
-    uint32 handle = 1;
+    ProtoConfiguredNetworkGroupIdentifier identifier = 1;
 }
 
 message ConfiguredNetworkGroup_is_multi_context_Reply {
@@ -453,7 +481,7 @@ message ConfiguredNetworkGroup_is_multi_context_Reply {
 }
 
 message ConfiguredNetworkGroup_get_config_params_Request {
-    uint32 handle = 1;
+    ProtoConfiguredNetworkGroupIdentifier identifier = 1;
 }
 
 message ConfiguredNetworkGroup_get_config_params_Reply {
@@ -462,7 +490,7 @@ message ConfiguredNetworkGroup_get_config_params_Reply {
 }
 
 message ConfiguredNetworkGroup_get_sorted_output_names_Request {
-    uint32 handle = 1;
+    ProtoConfiguredNetworkGroupIdentifier identifier = 1;
 }
 
 message ConfiguredNetworkGroup_get_sorted_output_names_Reply {
@@ -471,7 +499,7 @@ message ConfiguredNetworkGroup_get_sorted_output_names_Reply {
 }
 
 message ConfiguredNetworkGroup_get_stream_names_from_vstream_name_Request {
-    uint32 handle = 1;
+    ProtoConfiguredNetworkGroupIdentifier identifier = 1;
     string vstream_name = 2;
 }
 
@@ -481,7 +509,7 @@ message ConfiguredNetworkGroup_get_stream_names_from_vstream_name_Reply {
 }
 
 message ConfiguredNetworkGroup_get_vstream_names_from_stream_name_Request {
-    uint32 handle = 1;
+    ProtoConfiguredNetworkGroupIdentifier identifier = 1;
     string stream_name = 2;
 }
 
@@ -491,7 +519,7 @@ message ConfiguredNetworkGroup_get_vstream_names_from_stream_name_Reply {
 }
 
 message InputVStream_write_Request {
-    uint32 handle = 1;
+    ProtoVStreamIdentifier identifier = 1;
     bytes data = 2;
 }
 
@@ -499,8 +527,19 @@ message InputVStream_write_Reply {
     uint32 status = 1;
 }
 
+message InputVStream_write_pix_Request {
+    ProtoVStreamIdentifier identifier = 1;
+    uint32 index = 2;
+    uint32 number_of_planes = 3;
+    repeated bytes planes_data = 4;
+}
+
+message InputVStream_write_pix_Reply {
+    uint32 status = 1;
+}
+
 message OutputVStream_read_Request {
-    uint32 handle = 1;
+    ProtoVStreamIdentifier identifier = 1;
     uint32 size = 2;
 }
 
@@ -510,7 +549,7 @@ message OutputVStream_read_Reply {
 }
 
 message VStream_get_frame_size_Request {
-    uint32 handle = 1;
+    ProtoVStreamIdentifier identifier = 1;
 }
 
 message VStream_get_frame_size_Reply {
@@ -519,7 +558,7 @@ message VStream_get_frame_size_Reply {
 }
 
 message InputVStream_flush_Request {
-    uint32 handle = 1;
+    ProtoVStreamIdentifier identifier = 1;
 }
 
 message InputVStream_flush_Reply {
@@ -527,7 +566,7 @@ message InputVStream_flush_Reply {
 }
 
 message VStream_name_Request {
-    uint32 handle = 1;
+    ProtoVStreamIdentifier identifier = 1;
 }
 
 message VStream_name_Reply {
@@ -536,7 +575,7 @@ message VStream_name_Reply {
 }
 
 message VStream_network_name_Request {
-    uint32 handle = 1;
+    ProtoVStreamIdentifier identifier = 1;
 }
 
 message VStream_network_name_Reply {
@@ -545,7 +584,7 @@ message VStream_network_name_Reply {
 }
 
 message VStream_abort_Request {
-    uint32 handle = 1;
+    ProtoVStreamIdentifier identifier = 1;
 }
 
 message VStream_abort_Reply {
@@ -553,7 +592,7 @@ message VStream_abort_Reply {
 }
 
 message VStream_stop_and_clear_Request {
-    uint32 handle = 1;
+    ProtoVStreamIdentifier identifier = 1;
 }
 
 message VStream_stop_and_clear_Reply {
@@ -561,7 +600,7 @@ message VStream_stop_and_clear_Reply {
 }
 
 message VStream_start_vstream_Request {
-    uint32 handle = 1;
+    ProtoVStreamIdentifier identifier = 1;
 }
 
 message VStream_start_vstream_Reply {
@@ -569,7 +608,7 @@ message VStream_start_vstream_Reply {
 }
 
 message VStream_resume_Request {
-    uint32 handle = 1;
+    ProtoVStreamIdentifier identifier = 1;
 }
 
 message VStream_resume_Reply {
@@ -577,7 +616,7 @@ message VStream_resume_Reply {
 }
 
 message VStream_get_user_buffer_format_Request {
-    uint32 handle = 1;
+    ProtoVStreamIdentifier identifier = 1;
 }
 
 message VStream_get_user_buffer_format_Reply {
@@ -586,7 +625,7 @@ message VStream_get_user_buffer_format_Reply {
 }
 
 message VStream_get_info_Request {
-    uint32 handle = 1;
+    ProtoVStreamIdentifier identifier = 1;
 }
 
 message VStream_get_info_Reply {
@@ -594,11 +633,47 @@ message VStream_get_info_Reply {
     ProtoVStreamInfo vstream_info = 2;
 }
 
+message InputVStream_is_multi_planar_Request {
+    ProtoVStreamIdentifier identifier = 1;
+}
+
+message InputVStream_is_multi_planar_Reply {
+    uint32 status = 1;
+    bool is_multi_planar = 2;
+}
+
 message VStream_is_aborted_Request {
-    uint32 handle = 1;
+    ProtoVStreamIdentifier identifier = 1;
 }
 
 message VStream_is_aborted_Reply {
     uint32 status = 1;
     bool is_aborted = 2;
+}
+
+message VStream_set_nms_score_threshold_Request {
+    ProtoVStreamIdentifier identifier = 1;
+    float threshold = 2;
+}
+
+message VStream_set_nms_score_threshold_Reply {
+    uint32 status = 1;
+}
+
+message VStream_set_nms_iou_threshold_Request {
+    ProtoVStreamIdentifier identifier = 1;
+    float threshold = 2;
+}
+
+message VStream_set_nms_iou_threshold_Reply {
+    uint32 status = 1;
+}
+
+message VStream_set_nms_max_proposals_per_class_Request {
+    ProtoVStreamIdentifier identifier = 1;
+    uint32 max_proposals_per_class = 2;
+}
+
+message VStream_set_nms_max_proposals_per_class_Reply {
+    uint32 status = 1;
 }
\ No newline at end of file
index 0000c4c7233536ed322f0c2002e3fdddc958549a..7e999b14c25d37a7226025282ae8f1d915005ed9 100644 (file)
@@ -22,6 +22,63 @@ static const std::string HAILORT_SERVICE_DEFAULT_ADDR = HAILO_UDS_PREFIX + HAILO
 #endif
 static const std::chrono::seconds HAILO_KEEPALIVE_INTERVAL(2);
 
+#define HAILORT_SERVICE_ADDRESS_ENV_VAR ("HAILORT_SERVICE_ADDRESS")
+static const std::string HAILORT_SERVICE_ADDRESS = []() {
+    const char* env_var = std::getenv(HAILORT_SERVICE_ADDRESS_ENV_VAR);
+    if (env_var) {
+        return std::string(env_var);
+    } else {
+        return HAILORT_SERVICE_DEFAULT_ADDR; // Default value if environment variable is not set
+    }
+}();
+
+class VDeviceIdentifier {
+public:
+    VDeviceIdentifier(uint32_t vdevice_handle) : m_vdevice_handle(vdevice_handle)
+    {}
+
+    bool equals(const VDeviceIdentifier &other)
+    {
+        return (this->m_vdevice_handle == other.m_vdevice_handle);
+    }
+
+    uint32_t m_vdevice_handle;
+};
+
+class NetworkGroupIdentifier {
+public:
+    NetworkGroupIdentifier(VDeviceIdentifier vdevice_identifier, uint32_t network_group_handle) :
+        m_vdevice_identifier(vdevice_identifier),
+        m_network_group_handle(network_group_handle)
+    {}
+
+    bool equals(const NetworkGroupIdentifier &other)
+    {
+        return ((this->m_vdevice_identifier.equals(other.m_vdevice_identifier)) &&
+            (this->m_network_group_handle == other.m_network_group_handle));
+    }
+
+    VDeviceIdentifier m_vdevice_identifier;
+    uint32_t m_network_group_handle;
+};
+
+class VStreamIdentifier {
+public:
+    VStreamIdentifier(NetworkGroupIdentifier network_group_identifier, uint32_t vstream_handle) :
+        m_network_group_identifier(network_group_identifier),
+        m_vstream_handle(vstream_handle)
+    {}
+
+    bool equals(const VStreamIdentifier &other)
+    {
+        return ((this->m_network_group_identifier.equals(other.m_network_group_identifier)) &&
+            (this->m_vstream_handle == other.m_vstream_handle));
+    }
+
+    NetworkGroupIdentifier m_network_group_identifier;
+    uint32_t m_vstream_handle;
+};
+
 }
 
 #endif
\ No newline at end of file
index db9c3ff4031923600811e46e58f3087706e13e03..df3e2db42e84975dd970763879eff5e3bc9feaa4 100644 (file)
@@ -2,7 +2,7 @@
 @ECHO OFF
 
 set BASE_URI=https://hailo-hailort.s3.eu-west-2.amazonaws.com
-set HRT_VERSION=4.14.0
+set HRT_VERSION=4.15.0
 set FW_DIR=Hailo8/%HRT_VERSION%/FW
 set FW=hailo8_fw.%HRT_VERSION%_eth.bin
 
index d02e0c5c400ff63bab5f3e6a0638800efb1be367..1c5904e7b93d7a5ff330febb8b751bc3bb3cc7cf 100755 (executable)
@@ -2,7 +2,7 @@
 set -e
 
 readonly BASE_URI="https://hailo-hailort.s3.eu-west-2.amazonaws.com"
-readonly HRT_VERSION=4.14.0
+readonly HRT_VERSION=4.15.0
 readonly FW_AWS_DIR="Hailo8/${HRT_VERSION}/FW"
 readonly FW="hailo8_fw.${HRT_VERSION}_eth.bin"
 
index 944367e96c2592df91a826278522acef1b00322b..d5a894386754d77335a9cf52ef426871b403aa69 100644 (file)
@@ -1,7 +1,7 @@
 :: cmd
 @ECHO OFF
 set BASE_URI=https://hailo-hailort.s3.eu-west-2.amazonaws.com
-set HRT_VERSION=4.14.0
+set HRT_VERSION=4.15.0
 set REMOTE_HEF_DIR=Hailo8/%HRT_VERSION%/HEFS
 set LOCAL_EXAMPLES_HEF_DIR=..\libhailort\examples\hefs
 set LOCAL_TUTORIALS_HEF_DIR=..\libhailort\bindings\python\platform\hailo_tutorials\hefs
index ff56bd04ed3ce9f6da326a0a29c57da6146e8eed..2073ded97245f9ae87314961d56fde7b158ae8a2 100755 (executable)
@@ -2,7 +2,7 @@
 set -e
 
 readonly BASE_URI="https://hailo-hailort.s3.eu-west-2.amazonaws.com"
-readonly HRT_VERSION=4.14.0
+readonly HRT_VERSION=4.15.0
 readonly REMOTE_HEF_DIR="Hailo8/${HRT_VERSION}/HEFS"
 readonly LOCAL_EXAMPLES_HEF_DIR="../libhailort/examples/hefs"
 readonly LOCAL_TUTORIALS_HEF_DIR="../libhailort/bindings/python/platform/hailo_tutorials/hefs"
index 0e4e414c0fbc85673e2f91ff3ce91f421761bc12..83fa10126ced6a605372748c3331afd072a4e2fb 100755 (executable)
@@ -6,7 +6,7 @@ script_directory=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd)
 source "$script_directory"/hailo15_env_vars.sh
 
 cd $local_platform_sw_path
-./install.sh comp build_integrated_nnc_driver --image-path /local/bkc/v0.29-build-2023-05-07
+./install.sh comp build_integrated_nnc_driver
 path="$local_platform_sw_path"/hailort/drivers/linux/integrated_nnc/hailo_integrated_nnc.ko
 scp $path root@$h15:/lib/modules/5.15.32-yocto-standard/kernel/drivers/misc/hailo_integrated_nnc.ko
 
index 5c05a0f820aba47ccd4bec4c8bd34f20db50b21b..398e00e9c11e544bc9b293f4412a7549afc7c8e5 100755 (executable)
@@ -11,5 +11,5 @@ ssh root@$h15 "hailortcli fw-logger /tmp/fw_log.dat"
 scp root@$h15:/tmp/fw_log.dat /tmp
 ssh root@$h15 "rm /tmp/fw_log.dat"
 
-python ./platform_internals/hailo_platform_internals/tools/firmware/parse_tracelog.py --fw vpu --core-log-entries firmware/vpu_firmware/build/hailo15_nnc_fw_*_log_entries.csv --core-only --raw-input-file /tmp/fw_log.dat
+python ./platform_internals/hailo_platform_internals/tools/firmware/tracelog_parser_tool/tracelog_parser_tool/parse_tracelog.py --fw vpu --core-log-entries firmware/vpu_firmware/build/hailo15_nnc_fw_*_log_entries.csv --core-only --raw-input-file /tmp/fw_log.dat
 
index eeea604f7ac6078bbb4e40968fbf9941c0e9aa4a..a38c3a116ced5d2b9ab55a74f9839df63f62b578 100644 (file)
@@ -1,5 +1,7 @@
 cmake_minimum_required(VERSION 3.0.0)
 
+include(${HAILO_EXTERNALS_CMAKE_SCRIPTS}/spdlog.cmake)
+
 set(FILES
     main.cpp
     shell.cpp
index 0d0bfaca62f2ed9381cc39f04600fa4ccf431f6d..dc9ecbeac9698f52c0d2f26f54ff6bdb6843fe6f 100644 (file)
@@ -117,7 +117,7 @@ std::shared_ptr<HailoRTDriver> create_driver_object(const std::string &device_id
     if (!hailort_driver) {
         throw std::runtime_error("Failed create hailort driver object");
     }
-    return std::make_shared<HailoRTDriver>(hailort_driver.release());
+    return hailort_driver.release();
 }
 
 int main(int argc, char **argv)